diff --git a/.ci.yml b/.ci.yml
new file mode 100644
index 00000000..7d6d2965
--- /dev/null
+++ b/.ci.yml
@@ -0,0 +1,30 @@
+run:
+ timeout: 10m
+ concurrency: 4
+
+concurrency: 4
+
+linters:
+ disable-all: true
+ enable:
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - unused
+ - misspell
+ - dupl
+ - stylecheck
+
+linters-settings:
+ gofmt:
+ simplify: true
+ dupl:
+ threshold: 400
+
+issues:
+ # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
+ max-issues-per-linter: 0
+ # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
+ max-same-issues: 0
diff --git a/.github/labeler.yml b/.github/labeler.yml
new file mode 100644
index 00000000..ffeada5d
--- /dev/null
+++ b/.github/labeler.yml
@@ -0,0 +1,25 @@
+# Add 'Documentation' label to any changes within 'docs' folder or any subfolders
+Documentation:
+ - changed-files:
+ - any-glob-to-any-file:
+ - 'docs/**'
+
+# Add 'Documentation' label to any file changes within 'docs' folder
+Documentation_docs:
+ - changed-files:
+ - any-glob-to-any-file:
+ - 'docs/*'
+
+# Add 'Documentation' label to any change to .md files within the entire repository
+Documentation_md:
+ - changed-files:
+ - any-glob-to-any-file:
+ - '**/*.md'
+
+# Add 'feature' label to any PR where the head branch name starts with `feature` or has a `feature` section in the name
+feature:
+ - head-branch: ['^feature', 'feature']
+
+# Add 'release' label to any PR that is opened against the `main` branch
+release:
+ - base-branch: 'main'
diff --git a/.github/workflows/agent-container-pr.yml b/.github/workflows/agent-container-pr.yml
index 5c61e536..e34509fb 100644
--- a/.github/workflows/agent-container-pr.yml
+++ b/.github/workflows/agent-container-pr.yml
@@ -2,6 +2,9 @@ name: Container Agent Docker Image CI
on:
pull_request:
+ paths-ignore:
+ - '**.md'
+ - 'charts/**'
branches:
- 'main'
@@ -17,11 +20,11 @@ jobs:
uses: actions/checkout@v3
with:
fetch-depth: 0
-
+
-
name: Set up QEMU
uses: docker/setup-qemu-action@v2
-
+
- uses: docker/setup-buildx-action@v1
name: Set up Docker Buildx
@@ -32,8 +35,8 @@ jobs:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
-
-
+
+
-
name: Build and push on PR
uses: docker/build-push-action@v4
@@ -45,4 +48,4 @@ jobs:
tags: ${{ env.REGISTRY }}/${{ github.repository }}/container-agent:pr-${{ github.event.pull_request.number }}
build-args: |
"GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}"
-
+
diff --git a/.github/workflows/agent-container.yml b/.github/workflows/agent-container.yml
index cf023c6f..d560d873 100644
--- a/.github/workflows/agent-container.yml
+++ b/.github/workflows/agent-container.yml
@@ -25,7 +25,7 @@ jobs:
steps:
- name: Checkout GitHub Action
uses: actions/checkout@v3
-
+
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
@@ -56,7 +56,7 @@ jobs:
file: ./dockerfiles/agent/container/Dockerfile
tags: ${{ env.REGISTRY }}/${{ github.repository }}/container-agent:${{ github.run_id }}
labels: ${{ steps.metadata.outputs.labels }}
-
+
push: true
- name: Install cosign
@@ -67,12 +67,12 @@ jobs:
cosign sign -y ${{ env.REGISTRY }}/${{ github.repository }}/container-agent:${{ github.run_id }}
env:
COSIGN_EXPERIMENTAL: 1
-
+
- name: Verify the pushed tags
run: cosign verify ${{ env.REGISTRY }}/${{ github.repository }}/container-agent:${{ github.run_id }} --certificate-identity ${{ env.GH_URL }}/${{ github.repository }}/.github/workflows/agent-container.yml@refs/heads/main --certificate-oidc-issuer https://token.actions.githubusercontent.com
env:
COSIGN_EXPERIMENTAL: 1
-
+
- name: Run Trivy in GitHub SBOM mode and submit results to Dependency Graph
uses: aquasecurity/trivy-action@master
with:
diff --git a/.github/workflows/agent-git-pr.yml b/.github/workflows/agent-git-pr.yml
index cd2f294c..7c4a5ede 100644
--- a/.github/workflows/agent-git-pr.yml
+++ b/.github/workflows/agent-git-pr.yml
@@ -2,6 +2,9 @@ name: Git Agent Docker Image CI
on:
pull_request:
+ paths-ignore:
+ - '**.md'
+ - 'charts/**'
branches:
- 'main'
@@ -17,11 +20,11 @@ jobs:
uses: actions/checkout@v3
with:
fetch-depth: 0
-
+
-
name: Set up QEMU
uses: docker/setup-qemu-action@v2
-
+
- uses: docker/setup-buildx-action@v1
name: Set up Docker Buildx
@@ -32,7 +35,7 @@ jobs:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
-
+
-
name: Build and push on PR
uses: docker/build-push-action@v4
@@ -44,4 +47,4 @@ jobs:
tags: ${{ env.REGISTRY }}/${{ github.repository }}/git-agent:pr-${{ github.event.pull_request.number }}
build-args: |
"GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}"
-
+
diff --git a/.github/workflows/agent-git.yml b/.github/workflows/agent-git.yml
index 657b6684..83631cd7 100644
--- a/.github/workflows/agent-git.yml
+++ b/.github/workflows/agent-git.yml
@@ -25,7 +25,7 @@ jobs:
steps:
- name: Checkout GitHub Action
uses: actions/checkout@v3
-
+
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
@@ -56,7 +56,7 @@ jobs:
file: ./dockerfiles/agent/git/Dockerfile
tags: ${{ env.REGISTRY }}/${{ github.repository }}/git-agent:${{ github.run_id }}
labels: ${{ steps.metadata.outputs.labels }}
-
+
push: true
- name: Install cosign
@@ -67,12 +67,12 @@ jobs:
cosign sign -y ${{ env.REGISTRY }}/${{ github.repository }}/git-agent:${{ github.run_id }}
env:
COSIGN_EXPERIMENTAL: 1
-
+
- name: Verify the pushed tags
run: cosign verify ${{ env.REGISTRY }}/${{ github.repository }}/git-agent:${{ github.run_id }} --certificate-identity ${{ env.GH_URL }}/${{ github.repository }}/.github/workflows/agent-git.yml@refs/heads/main --certificate-oidc-issuer https://token.actions.githubusercontent.com
env:
COSIGN_EXPERIMENTAL: 1
-
+
- name: Run Trivy in GitHub SBOM mode and submit results to Dependency Graph
uses: aquasecurity/trivy-action@master
with:
diff --git a/.github/workflows/agent-kubviz-image.yml b/.github/workflows/agent-kubviz-image.yml
index 30586d63..44c2bb67 100644
--- a/.github/workflows/agent-kubviz-image.yml
+++ b/.github/workflows/agent-kubviz-image.yml
@@ -25,7 +25,7 @@ jobs:
steps:
- name: Checkout GitHub Action
uses: actions/checkout@v3
-
+
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
@@ -56,7 +56,7 @@ jobs:
file: ./dockerfiles/agent/kubviz/Dockerfile
tags: ${{ env.REGISTRY }}/${{ github.repository }}/kubviz-agent:${{ github.run_id }}
labels: ${{ steps.metadata.outputs.labels }}
-
+
push: true
- name: Install cosign
@@ -67,12 +67,12 @@ jobs:
cosign sign -y ${{ env.REGISTRY }}/${{ github.repository }}/kubviz-agent:${{ github.run_id }}
env:
COSIGN_EXPERIMENTAL: 1
-
+
- name: Verify the pushed tags
run: cosign verify ${{ env.REGISTRY }}/${{ github.repository }}/kubviz-agent:${{ github.run_id }} --certificate-identity ${{ env.GH_URL }}/${{ github.repository }}/.github/workflows/agent-kubviz-image.yml@refs/heads/main --certificate-oidc-issuer https://token.actions.githubusercontent.com
env:
COSIGN_EXPERIMENTAL: 1
-
+
- name: Run Trivy in GitHub SBOM mode and submit results to Dependency Graph
uses: aquasecurity/trivy-action@master
with:
diff --git a/.github/workflows/agent-kubviz-pr.yml b/.github/workflows/agent-kubviz-pr.yml
index cc0cf561..3aca320e 100644
--- a/.github/workflows/agent-kubviz-pr.yml
+++ b/.github/workflows/agent-kubviz-pr.yml
@@ -2,6 +2,9 @@ name: Agent Docker Image CI
on:
pull_request:
+ paths-ignore:
+ - '**.md'
+ - 'charts/**'
branches:
- 'main'
@@ -17,11 +20,11 @@ jobs:
uses: actions/checkout@v3
with:
fetch-depth: 0
-
+
-
name: Set up QEMU
uses: docker/setup-qemu-action@v2
-
+
- uses: docker/setup-buildx-action@v1
name: Set up Docker Buildx
@@ -32,7 +35,7 @@ jobs:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
-
+
-
name: Build and push on PR
uses: docker/build-push-action@v4
@@ -44,4 +47,4 @@ jobs:
tags: ${{ env.REGISTRY }}/${{ github.repository }}/kubviz-agent:pr-${{ github.event.pull_request.number }}
build-args: |
"GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}"
-
+
diff --git a/.github/workflows/apisec-scan.yml b/.github/workflows/apisec-scan.yml
index b834f854..6151d036 100644
--- a/.github/workflows/apisec-scan.yml
+++ b/.github/workflows/apisec-scan.yml
@@ -3,8 +3,8 @@
# separate terms of service, privacy policy, and support
# documentation.
-# APIsec addresses the critical need to secure APIs before they reach production.
-# APIsec provides the industry’s only automated and continuous API testing platform that uncovers security vulnerabilities and logic flaws in APIs.
+# APIsec addresses the critical need to secure APIs before they reach production.
+# APIsec provides the industry’s only automated and continuous API testing platform that uncovers security vulnerabilities and logic flaws in APIs.
# Clients rely on APIsec to evaluate every update and release, ensuring that no APIs go to production with vulnerabilities.
# How to Get Started with APIsec.ai
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
new file mode 100644
index 00000000..9b8ea51e
--- /dev/null
+++ b/.github/workflows/ci.yaml
@@ -0,0 +1,41 @@
+
+name: ci
+on:
+ push:
+ branches:
+ - "*"
+ - main
+ pull_request:
+
+permissions:
+ contents: write
+ security-events: write
+ # Optional: allow read access to pull request. Use with `only-new-issues` option.
+ pull-requests: read
+
+jobs:
+ golangci:
+ name: lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: actions/setup-go@v4
+ with:
+ go-version: '1.21'
+ cache: false
+ - name: Run tests
+ run: go test ./... -coverprofile=coverage.out -coverpkg=./... -covermode=atomic
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v3
+ with:
+ version: v1.54
+ args: -v --config=.ci.yml
+ skip-pkg-cache: true
+ skip-build-cache: true
+
+ - name: Static check
+ uses: dominikh/staticcheck-action@v1.3.0
+ with:
+ version: "2023.1.6"
+ install-go: false
+ cache-key: '1.21'
diff --git a/.github/workflows/client-image.yml b/.github/workflows/client-image.yml
index 77e20a1c..153881de 100644
--- a/.github/workflows/client-image.yml
+++ b/.github/workflows/client-image.yml
@@ -25,7 +25,7 @@ jobs:
steps:
- name: Checkout GitHub Action
uses: actions/checkout@v3
-
+
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v2
@@ -56,7 +56,7 @@ jobs:
file: ./dockerfiles/client/Dockerfile
tags: ${{ env.REGISTRY }}/${{ github.repository }}/client:${{ github.run_id }}
labels: ${{ steps.metadata.outputs.labels }}
-
+
push: true
- name: Install cosign
@@ -67,12 +67,12 @@ jobs:
cosign sign -y ${{ env.REGISTRY }}/${{ github.repository }}/client:${{ github.run_id }}
env:
COSIGN_EXPERIMENTAL: 1
-
+
- name: Verify the pushed tags
run: cosign verify ${{ env.REGISTRY }}/${{ github.repository }}/client:${{ github.run_id }} --certificate-identity ${{ env.GH_URL }}/${{ github.repository }}/.github/workflows/client-image.yml@refs/heads/main --certificate-oidc-issuer https://token.actions.githubusercontent.com
env:
COSIGN_EXPERIMENTAL: 1
-
+
- name: Run Trivy in GitHub SBOM mode and submit results to Dependency Graph
uses: aquasecurity/trivy-action@master
with:
diff --git a/.github/workflows/client-pr.yml b/.github/workflows/client-pr.yml
index 870686a1..87464778 100644
--- a/.github/workflows/client-pr.yml
+++ b/.github/workflows/client-pr.yml
@@ -2,6 +2,9 @@ name: Client Docker Image CI
on:
pull_request:
+ paths-ignore:
+ - '**.md'
+ - 'charts/**'
branches:
- 'main'
@@ -17,11 +20,11 @@ jobs:
uses: actions/checkout@v3
with:
fetch-depth: 0
-
+
-
name: Set up QEMU
uses: docker/setup-qemu-action@v2
-
+
- uses: docker/setup-buildx-action@v1
name: Set up Docker Buildx
@@ -32,8 +35,8 @@ jobs:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
-
-
+
+
-
name: Build and push on PR
uses: docker/build-push-action@v4
@@ -45,4 +48,4 @@ jobs:
tags: ${{ env.REGISTRY }}/${{ github.repository }}/client:pr-${{ github.event.pull_request.number }}
build-args: |
"GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}"
-
+
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index 533200d3..df705353 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -48,11 +48,11 @@ jobs:
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
-
+
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
-
+
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
@@ -61,7 +61,7 @@ jobs:
# ℹ️ Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
- # If the Autobuild fails above, remove it and uncomment the following three lines.
+ # If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
diff --git a/.github/workflows/devskim.yml b/.github/workflows/devskim.yml
index 62d8407f..d1684f76 100644
--- a/.github/workflows/devskim.yml
+++ b/.github/workflows/devskim.yml
@@ -27,7 +27,7 @@ jobs:
- name: Run DevSkim scanner
uses: microsoft/DevSkim-Action@v1
-
+
- name: Upload DevSkim scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v2
with:
diff --git a/.github/workflows/makefile.yml b/.github/workflows/makefile.yml
index e4dadb23..8db022e5 100644
--- a/.github/workflows/makefile.yml
+++ b/.github/workflows/makefile.yml
@@ -13,15 +13,15 @@ jobs:
steps:
- uses: actions/checkout@v3
-
+
- name: configure
run: ./configure
-
+
- name: Install dependencies
run: make
-
+
- name: Run check
run: make check
-
+
- name: Run distcheck
run: make distcheck
diff --git a/.github/workflows/migration-image.yml b/.github/workflows/migration-image.yml
new file mode 100644
index 00000000..624594e6
--- /dev/null
+++ b/.github/workflows/migration-image.yml
@@ -0,0 +1,79 @@
+name: Migration Docker Image CI
+
+on:
+ push:
+ paths-ignore:
+ - '**.md'
+ branches:
+ - 'main'
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ permissions:
+ packages: write
+ id-token: write
+ contents: read
+ actions: read
+ security-events: write
+ env:
+ REGISTRY: ghcr.io
+ GH_URL: https://github.com
+ steps:
+ - name: Checkout GitHub Action
+ uses: actions/checkout@v3
+
+ - name: Set up Docker Buildx
+ id: buildx
+ uses: docker/setup-buildx-action@v2
+
+ - name: Docker metadata
+ id: metadata
+ uses: docker/metadata-action@v4
+ with:
+ images: ${{ env.REGISTRY }}/${{ github.repository }}/migration
+ tags: |
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
+ flavor: |
+ latest=true
+
+ - name: Login to GitHub Container Registry
+ uses: docker/login-action@v2
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Build image and push to GitHub Container Registry
+ uses: docker/build-push-action@v4
+ with:
+ context: .
+ file: ./dockerfiles/migration/Dockerfile
+ tags: ${{ env.REGISTRY }}/${{ github.repository }}/migration:${{ github.run_id }}
+ labels: ${{ steps.metadata.outputs.labels }}
+ push: true
+
+ - name: Install cosign
+ uses: sigstore/cosign-installer@main
+
+ - name: Sign the images
+ run: |
+ cosign sign -y ${{ env.REGISTRY }}/${{ github.repository }}/migration:${{ github.run_id }}
+ env:
+ COSIGN_EXPERIMENTAL: 1
+
+ - name: Verify the pushed tags
+ run: cosign verify ${{ env.REGISTRY }}/${{ github.repository }}/migration:${{ github.run_id }} --certificate-identity ${{ env.GH_URL }}/${{ github.repository }}/.github/workflows/migration-image.yml@refs/heads/main --certificate-oidc-issuer https://token.actions.githubusercontent.com
+ env:
+ COSIGN_EXPERIMENTAL: 1
+
+ - name: Run Trivy in GitHub SBOM mode and submit results to Dependency Graph
+ uses: aquasecurity/trivy-action@master
+ with:
+ scan-type: 'fs'
+ format: 'github'
+ output: 'dependency-results.sbom.json'
+ image-ref: '.'
+ github-pat: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/migration-pr.yml b/.github/workflows/migration-pr.yml
new file mode 100644
index 00000000..2bcd5997
--- /dev/null
+++ b/.github/workflows/migration-pr.yml
@@ -0,0 +1,50 @@
+name: Migration Docker Image CI
+
+on:
+ pull_request:
+ paths-ignore:
+ - '**.md'
+ - 'charts/**'
+ branches:
+ - 'main'
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ env:
+ REGISTRY: ghcr.io
+ GH_URL: https://github.com
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+
+ -
+ name: Set up QEMU
+ uses: docker/setup-qemu-action@v2
+
+ -
+ name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v1
+
+ -
+ name: Login to ghcr registry
+ uses: docker/login-action@v2
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+
+ -
+ name: Build and push on PR
+ uses: docker/build-push-action@v4
+ if: github.event_name == 'pull_request'
+ with:
+ context: .
+ file: ./dockerfiles/migration/Dockerfile
+ push: true
+ tags: ${{ env.REGISTRY }}/${{ github.repository }}/migration:pr-${{ github.event.pull_request.number }}
+ build-args: |
+ "GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}"
diff --git a/.github/workflows/migration-release.yml b/.github/workflows/migration-release.yml
new file mode 100644
index 00000000..f50446b3
--- /dev/null
+++ b/.github/workflows/migration-release.yml
@@ -0,0 +1,60 @@
+name: migration-release
+
+on:
+ push:
+ tags:
+ - "v*.*.*"
+
+jobs:
+ push_to_registry:
+ name: Build and push Docker image to GitHub container registry.
+ runs-on: ubuntu-20.04
+ permissions:
+ packages: write
+ id-token: write
+ contents: read
+ actions: read
+ security-events: write
+ env:
+ REGISTRY: ghcr.io
+ GH_URL: https://github.com
+ steps:
+ - name: Set environment variable
+ run: |
+ echo "RELEASE_VERSION=${GITHUB_REF:10}" >> $GITHUB_ENV
+ - name: Test environment variable
+ run: echo ${{ env.RELEASE_VERSION }}
+ - name: Check out GitHub repo
+ uses: actions/checkout@v3
+ - name: Login to GitHub Container Registry
+ uses: docker/login-action@v2
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Build image and push to GitHub Container Registry
+ uses: docker/build-push-action@v4
+ with:
+ push: true
+ context: ./
+ file: ./dockerfiles/migration/Dockerfile
+ tags: ${{ env.REGISTRY }}/${{ github.repository }}/migration:${{ env.RELEASE_VERSION }}
+ - name: Install cosign
+ uses: sigstore/cosign-installer@main
+ - name: Sign the images
+ run: |
+ cosign sign -y ${{ env.REGISTRY }}/${{ github.repository }}/migration:${{ env.RELEASE_VERSION }}
+ env:
+ COSIGN_EXPERIMENTAL: 1
+ - name: Verify the pushed tags
+ run: cosign verify ${{ env.REGISTRY }}/${{ github.repository }}/migration:${{ env.RELEASE_VERSION }} --certificate-identity ${{ env.GH_URL }}/${{ github.repository }}/.github/workflows/migration-release.yml@refs/tags/${{ env.RELEASE_VERSION }} --certificate-oidc-issuer https://token.actions.githubusercontent.com
+ env:
+ COSIGN_EXPERIMENTAL: 1
+ - name: Run Trivy in GitHub SBOM mode and submit results to Dependency Graph
+ uses: aquasecurity/trivy-action@master
+ with:
+ scan-type: 'fs'
+ format: 'github'
+ output: 'dependency-results.sbom.json'
+ image-ref: '.'
+ github-pat: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/neuralegion.yml b/.github/workflows/neuralegion.yml
index 9d130bcb..341929c3 100644
--- a/.github/workflows/neuralegion.yml
+++ b/.github/workflows/neuralegion.yml
@@ -50,7 +50,7 @@
#
# `restart_scan`
#
-# **Required** when restarting an existing scan by its ID. You can get the scan ID in the Scans section on [nexploit.app](https://nexploit.app/login).
Please make sure to only use the necessary parameters. Otherwise, you will get a response with the parameter usage requirements.
+# **Required** when restarting an existing scan by its ID. You can get the scan ID in the Scans section on [nexploit.app](https://nexploit.app/login).
Please make sure to only use the necessary parameters. Otherwise, you will get a response with the parameter usage requirements.
#
# _Example:_ `restart_scan: ai3LG8DmVn9Rn1YeqCNRGQ)`
#
@@ -95,7 +95,7 @@
#
# `hosts_filter`
#
-# **Required** when the the discovery type is set to `archive`. Allows selecting specific hosts for a scan.
+# **Required** when the the discovery type is set to `archive`. Allows selecting specific hosts for a scan.
#
# Outputs
#
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index 51a3db4f..b4d7cff2 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -22,7 +22,7 @@ jobs:
# Needs for private repositories.
contents: read
actions: read
-
+
steps:
- name: "Checkout code"
uses: actions/checkout@a12a3943b4bdde767164f792f33f40b04645d846 # v3.0.0
@@ -41,8 +41,8 @@ jobs:
# repo_token: ${{ secrets.SCORECARD_READ_TOKEN }}
# Publish the results for public repositories to enable scorecard badges. For more details, see
- # https://github.com/ossf/scorecard-action#publishing-results.
- # For private repositories, `publish_results` will automatically be set to `false`, regardless
+ # https://github.com/ossf/scorecard-action#publishing-results.
+ # For private repositories, `publish_results` will automatically be set to `false`, regardless
# of the value entered here.
publish_results: true
@@ -54,7 +54,7 @@ jobs:
name: SARIF file
path: results.sarif
retention-days: 5
-
+
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@5f532563584d71fdef14ee64d17bafb34f751ce5 # v1.0.26
diff --git a/.github/workflows/sonarcloud.yml b/.github/workflows/sonarcloud.yml
index 81c46613..e9464daa 100644
--- a/.github/workflows/sonarcloud.yml
+++ b/.github/workflows/sonarcloud.yml
@@ -3,7 +3,7 @@
# separate terms of service, privacy policy, and support
# documentation.
-# This workflow helps you trigger a SonarCloud analysis of your code and populates
+# This workflow helps you trigger a SonarCloud analysis of your code and populates
# GitHub Code Scanning alerts with the vulnerabilities found.
# Free for open source project.
@@ -11,16 +11,16 @@
# 2. Import your project on SonarCloud
# * Add your GitHub organization first, then add your repository as a new project.
-# * Please note that many languages are eligible for automatic analysis,
+# * Please note that many languages are eligible for automatic analysis,
# which means that the analysis will start automatically without the need to set up GitHub Actions.
# * This behavior can be changed in Administration > Analysis Method.
-#
+#
# 3. Follow the SonarCloud in-product tutorial
# * a. Copy/paste the Project Key and the Organization Key into the args parameter below
# (You'll find this information in SonarCloud. Click on "Information" at the bottom left)
#
# * b. Generate a new token and add it to your Github repository's secrets using the name SONAR_TOKEN
-# (On SonarCloud, click on your avatar on top-right > My account > Security
+# (On SonarCloud, click on your avatar on top-right > My account > Security
# or go directly to https://sonarcloud.io/account/security/)
# Feel free to take a look at our documentation (https://docs.sonarcloud.io/getting-started/github/)
@@ -41,9 +41,9 @@ permissions:
jobs:
Analysis:
runs-on: ubuntu-latest
-
+
steps:
- - name: Analyze with SonarCloud
+ - name: Analyze with SonarCloud
# You can pin the exact commit or the version.
# uses: SonarSource/sonarcloud-github-action@de2e56b42aa84d0b1c5b622644ac17e505c9a049
@@ -53,7 +53,7 @@ jobs:
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} # Generate a token on Sonarcloud.io, add it to the secrets of this repo with the name SONAR_TOKEN (Settings > Secrets > Actions > add new repository secret)
with:
# Additional arguments for the sonarcloud scanner
- args:
+ args:
# Unique keys of your project and organization. You can find them in SonarCloud > Information (bottom-left menu)
# mandatory
-Dsonar.projectKey=
@@ -65,4 +65,4 @@ jobs:
# Comma-separated paths to directories containing test source files.
#-Dsonar.tests= # optional. For more info about Code Coverage, please refer to https://docs.sonarcloud.io/enriching/test-coverage/overview/
# Adds more detail to both client and server-side analysis logs, activating DEBUG mode for the scanner, and adding client-side environment variables and system properties to the server-side log of analysis report processing.
- #-Dsonar.verbose= # optional, default is false
+ #-Dsonar.verbose= # optional, default is false
diff --git a/.github/workflows/soos-dast-scan.yml b/.github/workflows/soos-dast-scan.yml
index 4fad66ea..5e4418ba 100644
--- a/.github/workflows/soos-dast-scan.yml
+++ b/.github/workflows/soos-dast-scan.yml
@@ -24,7 +24,7 @@ jobs:
soos:
permissions:
security-events: write # for uploading code scanning alert info
- actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
+ actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
name: SOOS DAST Scan
runs-on: ubuntu-latest
steps:
diff --git a/.github/workflows/synopsys-io.yml b/.github/workflows/synopsys-io.yml
index e578be24..59891d57 100644
--- a/.github/workflows/synopsys-io.yml
+++ b/.github/workflows/synopsys-io.yml
@@ -22,11 +22,11 @@ jobs:
actions: read
contents: read
security-events: write
-
+
steps:
- name: Checkout repository
uses: actions/checkout@v3
-
+
- name: Synopsys Intelligent Security Scan
id: prescription
uses: synopsys-sig/intelligent-security-scan@48eedfcd42bc342a294dc495ac452797b2d9ff08
@@ -36,7 +36,7 @@ jobs:
workflowServerUrl: ${{secrets.WORKFLOW_SERVER_URL}}
additionalWorkflowArgs: --polaris.url=${{secrets.POLARIS_SERVER_URL}} --polaris.token=${{secrets.POLARIS_ACCESS_TOKEN}}
stage: "IO"
-
+
# Please note that the ID in previous step was set to prescription
# in order for this logic to work also make sure that POLARIS_ACCESS_TOKEN
# is defined in settings
@@ -48,7 +48,7 @@ jobs:
wget -q ${{ secrets.POLARIS_SERVER_URL}}/api/tools/polaris_cli-linux64.zip
unzip -j polaris_cli-linux64.zip -d /tmp
/tmp/polaris analyze -w
-
+
# Please note that the ID in previous step was set to prescription
# in order for this logic to work
- name: Software Composition Analysis with Black Duck
@@ -56,7 +56,7 @@ jobs:
uses: blackducksoftware/github-action@9ea442b34409737f64743781e9adc71fd8e17d38
with:
args: '--blackduck.url="${{ secrets.BLACKDUCK_URL}}" --blackduck.api.token="${{ secrets.BLACKDUCK_TOKEN}}" --detect.tools="SIGNATURE_SCAN,DETECTOR"'
-
+
- name: Synopsys Intelligent Security Scan
if: ${{ steps.prescription.outputs.sastScan == 'true' || steps.prescription.outputs.scaScan == 'true' }}
uses: synopsys-sig/intelligent-security-scan@48eedfcd42bc342a294dc495ac452797b2d9ff08
@@ -64,11 +64,11 @@ jobs:
ioServerUrl: ${{secrets.IO_SERVER_URL}}
ioServerToken: ${{secrets.IO_SERVER_TOKEN}}
workflowServerUrl: ${{secrets.WORKFLOW_SERVER_URL}}
- additionalWorkflowArgs: --IS_SAST_ENABLED=${{steps.prescription.outputs.sastScan}} --IS_SCA_ENABLED=${{steps.prescription.outputs.scaScan}}
- --polaris.project.name={{PROJECT_NAME}} --polaris.url=${{secrets.POLARIS_SERVER_URL}} --polaris.token=${{secrets.POLARIS_ACCESS_TOKEN}}
+ additionalWorkflowArgs: --IS_SAST_ENABLED=${{steps.prescription.outputs.sastScan}} --IS_SCA_ENABLED=${{steps.prescription.outputs.scaScan}}
+ --polaris.project.name={{PROJECT_NAME}} --polaris.url=${{secrets.POLARIS_SERVER_URL}} --polaris.token=${{secrets.POLARIS_ACCESS_TOKEN}}
--blackduck.project.name={{PROJECT_NAME}}:{{PROJECT_VERSION}} --blackduck.url=${{secrets.BLACKDUCK_URL}} --blackduck.api.token=${{secrets.BLACKDUCK_TOKEN}}
stage: "WORKFLOW"
-
+
- name: Upload SARIF file
if: ${{steps.prescription.outputs.sastScan == 'true' }}
uses: github/codeql-action/upload-sarif@v2
diff --git a/.github/workflows/sysdig-scan.yml b/.github/workflows/sysdig-scan.yml
index 0a628f9b..568b8a9d 100644
--- a/.github/workflows/sysdig-scan.yml
+++ b/.github/workflows/sysdig-scan.yml
@@ -38,7 +38,7 @@ jobs:
id: scan
uses: sysdiglabs/scan-action@768d7626a14897e0948ea89c8437dd46a814b163
with:
- # Tag of the image to analyse.
+ # Tag of the image to analyse.
# Change ${{ github.repository }} variable by another image name if you want but don't forget changing also image-tag above
image-tag: ${{ github.repository }}:latest
# API token for Sysdig Scanning auth
@@ -46,7 +46,7 @@ jobs:
# Sysdig secure endpoint. Please read: https://docs.sysdig.com/en/docs/administration/saas-regions-and-ip-ranges/
# US-East https://secure.sysdig.com
# US-West https://us2.app.sysdig.com
- # EU https://eu1.app.sysdig.com
+ # EU https://eu1.app.sysdig.com
sysdig-secure-url: https://us2.app.sysdig.com
dockerfile-path: ./Dockerfile
input-type: docker-daemon
diff --git a/.github/workflows/tfsec.yml b/.github/workflows/tfsec.yml
index d81c0e70..3f55eba6 100644
--- a/.github/workflows/tfsec.yml
+++ b/.github/workflows/tfsec.yml
@@ -9,7 +9,7 @@ on:
push:
branches: [ "main" ]
pull_request:
- branches: [ "main" ]
+ branches: [ "main" ]
schedule:
- cron: '28 1 * * 6'
@@ -29,10 +29,10 @@ jobs:
- name: Run tfsec
uses: tfsec/tfsec-sarif-action@9a83b5c3524f825c020e356335855741fd02745f
with:
- sarif_file: tfsec.sarif
+ sarif_file: tfsec.sarif
- name: Upload SARIF file
uses: github/codeql-action/upload-sarif@v2
with:
# Path to SARIF file relative to the root of the repository
- sarif_file: tfsec.sarif
+ sarif_file: tfsec.sarif
diff --git a/.gitignore b/.gitignore
index bc19e09b..87460586 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,6 @@
civo
+alloc.svg
+allocs.pprof
+cpu.pprof
+steps-to-test.txt
+Dockerfile-grphqlserver-build
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 00000000..5f5fcbfd
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,123 @@
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.5.0
+ hooks:
+ # Checks for files that contain merge conflict strings.
+ - id: check-merge-conflict
+ # Detects aws credentials from the aws cli credentials file.
+ - id: detect-aws-credentials
+ args: [--allow-missing-credentials]
+ # detects the presence of private keys.
+ - id: detect-private-key
+ # Trims trailing whitespace in codebase.
+ - id: trailing-whitespace
+ # Protect commit to main branch
+ - id: no-commit-to-branch
+ args: [--branch,main]
+
+
+# Check is the Commit is Signed off using `--signoff/-s`
+- repo: https://github.com/KAUTH/pre-commit-git-checks
+ rev: v0.0.1 # Use the SHA or tag you want to point to
+ hooks:
+ - id: git-signoff
+ stages: [commit-msg]
+
+# Checks your git commit messages for style.
+- repo: https://github.com/jorisroovers/gitlint
+ rev: v0.19.1
+ hooks:
+ - id: gitlint
+ name: Scan Commit messages
+
+# Detects hardcoded secrets, security vulnerabilities and policy breaks using GGShield
+- repo: https://github.com/zricethezav/gitleaks
+ rev: v8.18.1
+ hooks:
+ - id: gitleaks
+ name: Detect hardcoded secrets
+ description: Detect hardcoded secrets using Gitleaks
+ entry: gitleaks protect --verbose --redact --staged
+ language: golang
+ pass_filenames: false
+
+- repo: https://github.com/Bahjat/pre-commit-golang
+ rev: v1.0.3
+ hooks:
+ # Formats Go code
+ # - id: gofumpt # requires gofumpt to be installed from github.com/mvdan/gofumpt
+ # name: Go formatter
+ # description: Runs a strict Go formatter
+ - id: go-fmt-import
+ name: Go formatter
+ description: Go formatter with fmt and imports
+ # Runs Unit tests
+ - id: go-unit-tests
+ name: Run Unit tests
+ desription: Runs all the unit tests in the repo
+ # Runs static analysis of the Go code
+ - id: go-static-check
+ name: Go Static Check
+ description: Finds bugs and performance issues
+
+# Local hooks
+
+- repo: https://github.com/intelops/gitrepos-templates-policies
+ rev: v0.0.1
+ hooks:
+ - id: check-devcontainer
+ name: Check devcontainer
+ description: Checks for existance of .devcontainer.json in the project
+ - id: check-gitsign
+ name: Check gitsign
+ description: Check if the last commit is signed with Sigstore gitsign
+ - id: check-multistage-dockerfile
+ name: Check multi-stage Dockerfile
+ description: Check the existance of Dockerfile in the project and verify that its a multi-stage Dockerfile
+
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.5.0
+ hooks:
+ - id: check-yaml
+ name: Verify YAML syntax
+ args:
+ - --allow-multiple-documents
+- repo: https://github.com/hadolint/hadolint
+ rev: v2.12.0
+ hooks:
+ - id: hadolint
+ # Rules you want to ignore may be found here: https://github.com/hadolint/hadolint?tab=readme-ov-file#rules
+ name: Dockerfile linter
+ description: Dockerfile linter following best-practices
+ args: [--ignore, DL3051]
+
+- repo: local
+ hooks:
+ - name: Check Dockerfile
+ id: check-dockerfile-sh
+ entry: bash
+ args:
+ - -c
+ - |
+ check_dockerfile() {
+ if [[ $1 == *"Dockerfile"* ]]; then
+ base_image=$(grep '^FROM' "$1" | awk '{print $2}')
+ if [[ $base_image != golang:* ]]; then
+ echo "Error: Base image in $1 is not from cgr.dev/chianguard"
+ return 1
+ fi
+ fi
+ return 0
+ }
+
+ export -f check_dockerfile
+
+ if find . -type f -exec bash -c 'check_dockerfile "$0"' {} \; | grep -q 'Error'; then
+ echo "Commit failed due to non-compliant Dockerfile(s)."
+ exit 1
+ fi
+
+ echo "All Dockerfiles are compliant."
+ exit 0
+ language: system
+ pass_filenames: false
\ No newline at end of file
diff --git a/.readme_assets/GitBridgeNew.jpeg b/.readme_assets/GitBridgeNew.jpeg
new file mode 100644
index 00000000..c851768e
Binary files /dev/null and b/.readme_assets/GitBridgeNew.jpeg differ
diff --git a/.readme_assets/depricatedAPINew.jpeg b/.readme_assets/depricatedAPINew.jpeg
new file mode 100644
index 00000000..b981170d
Binary files /dev/null and b/.readme_assets/depricatedAPINew.jpeg differ
diff --git a/.readme_assets/gitcontainerNew.jpeg b/.readme_assets/gitcontainerNew.jpeg
new file mode 100644
index 00000000..ad3bb14c
Binary files /dev/null and b/.readme_assets/gitcontainerNew.jpeg differ
diff --git a/.readme_assets/kubeDataNew.jpeg b/.readme_assets/kubeDataNew.jpeg
new file mode 100644
index 00000000..a5c7361f
Binary files /dev/null and b/.readme_assets/kubeDataNew.jpeg differ
diff --git a/.readme_assets/sbom.jpeg b/.readme_assets/sbom.jpeg
new file mode 100644
index 00000000..4f57d4b5
Binary files /dev/null and b/.readme_assets/sbom.jpeg differ
diff --git a/.readme_assets/trivyk8sNew.jpeg b/.readme_assets/trivyk8sNew.jpeg
new file mode 100644
index 00000000..81c2abb1
Binary files /dev/null and b/.readme_assets/trivyk8sNew.jpeg differ
diff --git a/.readme_assets/vul-misconfig.jpeg b/.readme_assets/vul-misconfig.jpeg
new file mode 100644
index 00000000..7e2c7a88
Binary files /dev/null and b/.readme_assets/vul-misconfig.jpeg differ
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 00000000..740b2c36
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,11 @@
+FROM golang:1.19 AS builder
+WORKDIR /
+COPY ./ ./
+
+RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o ./build/graphqlserver graphqlserver/server.go
+
+FROM scratch
+COPY --from=builder ./build/graphqlserver server
+
+USER 65532:65532
+ENTRYPOINT ["/server"]
diff --git a/README.md b/README.md
index b329e4bc..4c2605cc 100644
--- a/README.md
+++ b/README.md
@@ -26,7 +26,7 @@
## KubViz
-Visualize Kubernetes & DevSecOps Workflows. Tracks changes/events real-time across your entire K8s clusters, git repos, container registries, etc. , analyzing their effects and providing you with the context you need to troubleshoot efficiently. Get the Observability you need, easily.
+Visualize Kubernetes & DevSecOps Workflows. Tracks changes/events real-time across your entire K8s clusters, git repos, container registries, Container image Vulnerability scanning, misconfiguration, SBOM etc. , analyzing their effects and providing you with the context you need to troubleshoot efficiently. Get the Observability you need, easily.
## Table of Contents
- [How KubViz works](#how-kubviz-works)
@@ -40,7 +40,7 @@ Visualize Kubernetes & DevSecOps Workflows. Tracks changes/events real-time acro
## How KubViz works
-KubViz client can be installed on any Kubernetes cluster. KubViz agent runs in a kubernetes cluster where the changes/events need to be tracked. The agent detects the changes in real time and send those events via NATS JetStream and the same is received in the KubViz client.
+KubViz client can be installed on any Kubernetes cluster. KubViz agent runs in a kubernetes cluster where the changes/events need to be tracked. The agent detects the changes in real time and send those events via NATS JetStream and the same is received in the KubViz client.
KubViz client receives the events and passes it to Clickhouse database. The events present in the Clickhouse database can be visualized through Grafana.
@@ -50,7 +50,8 @@ KubViz offers a seamless integration with Git repositories, empowering you to ef
KubViz also monitors changes in your container registry, providing visibility into image updates. By tracking these changes, KubViz helps you proactively manage container security and compliance.
-It comprehensively scans the kubernetes containers for the security flaws such as vulnerabilities and misconfigurations.
+It comprehensively scans Kubernetes containers for security flaws, such as vulnerabilities and misconfigurations, and creates an SBOM (Software Bill of Materials).
+
## Architecture diagram

@@ -58,7 +59,7 @@ It comprehensively scans the kubernetes containers for the security flaws such a
## How to install and run Kubviz
#### Prerequisites
-* A Kubernetes cluster
+* A Kubernetes cluster
* Helm binary
#### Prepare Namespace
@@ -85,10 +86,20 @@ token=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)
```bash
helm upgrade -i kubviz-client kubviz/client -n kubviz --set "nats.auth.token=$token"
```
-**NOTE:**
+
+**NOTE:**
+- If you want to get a token from a secret, use a secret reference with the secret's name and key.
+
+**NOTE:**
- If you want to enable Grafana with the client deployment, add `--set grafana.enabled=true` to the helm upgrade command.
-- If grafana already exist use the same upgrade command without --set grafana.enabled=true flag.
+- Kubviz provides a setup for Grafana with Postgres data persistence, ensuring that even if the grafana pod/service goes down, the data will persist, safeguarding crucial information for visualization and analysis.
+
+```bash
+helm upgrade -i kubviz-client kubviz/client -n kubviz --set "nats.auth.token=$token" --set grafana.enabled=true --set grafana.postgresql=true
+```
+
+- If grafana already exist use the same upgrade command without --set grafana.enabled=true flag.
```bash
helm upgrade -i kubviz-client kubviz/client -n kubviz --set "nats.auth.token=$token" --set grafana.enabled=true
@@ -97,6 +108,7 @@ helm upgrade -i kubviz-client kubviz/client -n kubviz --set "nats.auth.token=$to
Parameter | Description | Default
--------- | ----------- | -------
`grafana.enabled` | If true, create grafana | `false`
+`grafana.postgresql` | If true, create postgresql | `false`
- The KubViz client will also install NATS and Clickhouse. The NATS service is exposed as a LoadBalancer, and you need to note the external IP of the service **kubviz-client-nats-external** and pass it during the KubViz agent installation.
@@ -118,10 +130,14 @@ kubectl get services kubviz-client-nats-external -n kubviz --output jsonpath='{.
helm upgrade -i kubviz-agent kubviz/agent -n kubviz \
--set "nats.auth.token=$token" \
--set git_bridge.enabled=true \
- --set "git_bridge.ingress.hosts[0].host=",git_bridge.ingress.hosts[0].paths[0].path=/,git_bridge.ingress.hosts[0].paths[0].pathType=Prefix,git_bridge.ingress.tls[0].secretName=,git_bridge.ingress.tls[0].hosts[0]= \
+ --set "git_bridge.ingress.hosts[0].host=",git_bridge.ingress.hosts[0].paths[0].path=/,git_bridge.ingress.hosts[0].paths[0].pathType=Prefix,git_bridge.ingress.tls[0].secretName=,git_bridge.ingress.tls[0].hosts[0]= \
--set container_bridge.enabled=true \
--set "container_bridge.ingress.hosts[0].host=",container_bridge.ingress.hosts[0].paths[0].path=/,container_bridge.ingress.hosts[0].paths[0].pathType=Prefix,container_bridge.ingress.tls[0].secretName=,container_bridge.ingress.tls[0].hosts[0]=
```
+
+**NOTE:**
+If you want to get a token from a secret, use a secret reference with the secret's name and key.
+
3. Replace "INGRESS HOSTNAME" with the desired hostname for the Git Bridge and Container Bridge Ingress configurations.
4. Replace "SECRET-NAME" with the desired secretname for the Git Bridge and Container Bridge Ingress configurations.
@@ -139,7 +155,7 @@ Parameter | Description | Default
`git_bridge.ingress.tls` | git_bridge ingress tls configuration | []
`container_bridge.ingress.tls` | container_bridge ingress tls configuration | []
-**NOTE:**
+**NOTE:**
- Default Annotations for Ingress
@@ -164,10 +180,14 @@ helm upgrade -i kubviz-agent kubviz/agent -f values.yaml -n kubviz
1. Run the following command to deploy the KubViz agent:
```bash
-helm upgrade -i kubviz-agent kubviz/agent -n kubviz --set nats.host= --set "nats.auth.token=$token"
+helm upgrade -i kubviz-agent kubviz/agent -n kubviz --set nats.host= --set "nats.auth.token=$token"
```
2. Replace "" with the IP address of your NATS service **kubviz-client-nats-external**.
+**NOTE:**
+
+The time-based job scheduler is added for each plugin, allowing you to schedule and automate the execution of plugins at specific times or intervals. To activate this scheduler, set 'enabled' to 'true.' Once enabled, each plugin's execution can be configured to run at a precise time or at regular intervals, based on the provided settings. Additionally, if you set the 'schedulingInterval' to '0', it will disable the plugins.
+
#### How to Verify if Everything is Up and Running
After completing the installation of both the client and agent, you can use the following command to verify if they are up and running.
@@ -197,23 +217,64 @@ kubectl get secret --namespace kubviz kubviz-client-grafana -o jsonpath="{.data.
```bash
export POD_NAME=$(kubectl get pods --namespace kubviz -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=kubviz-client" -o jsonpath="{.items[0].metadata.name}")
```
-```bash
+```bash
kubectl --namespace kubviz port-forward $POD_NAME 3000
```
-3. Access "localhost:3000" in your web browser, where you'll be prompted to enter your credentials. Utilize the username "admin" and the password obtained from step 1 to proceed.
+3. Access "localhost:3000" in your web browser, where you'll be prompted to enter your credentials. Utilize the username "admin" and the password obtained from step 1 to proceed.
+
+#### TTL - Time-To-Live Feature
+
+We've implemented a Time-To-Live (TTL) feature to streamline the management of data within your ClickHouse tables. With TTL, historical data can be automatically relocated to alternative storage or purged to optimize storage space. This feature is particularly valuable for scenarios like time-series data or logs where older data gradually loses its relevance over time.
+
+#### Configuring TTL
+
+The TTL value is customizable, empowering you to define the specific duration after which data is marked as 'expired'.
+
+To guide you through the process of setting up a TTL, [please follow these steps](docs/CONFIGURATION_TTL.md)
+
+#### Customizing Security Scanning
+
+KubViz enables you to perform cluster scans, image scans, and SBOM creation in CycloneDX format. Utilizing this scan, vulnerabilities can be identified.
+
+You can customize the security scans by changing the chart values.
+
+- To [Disable](https://github.com/intelops/kubviz/blob/main/charts/agent/values.yaml#L186) the cluster scan you can pass 0 or empty string
+
+```yaml
+schedule:
+ enabled: true
+ trivyclusterscanInterval: 0
+...
+```
+- For changing the interval, pass the interval time
+
+```yaml
+schedule:
+ enabled: true
+ trivyclusterscanInterval: "@every 24h"
+...
+```
+
+Same you can change for [image-scan](https://github.com/intelops/kubviz/blob/main/charts/agent/values.yaml#L184) and [sbom](https://github.com/intelops/kubviz/blob/main/charts/agent/values.yaml#L185)
+
+## Health Check
+
+You can run different types of checks against your Kubernetes cluster to detect any issues or potential problems before they cause any downtime or service disruptions. Check will run in the background and sends data to kubviz. After analysing the data from dashboard you can take corrective action quickly, if any issues are detected.
+
+Please check the [configuration](docs/CONFIGURATION_HEALTHCHECK.md) for health checks
## Use Cases
### Cluster Event Tracking
-
+
Use KubViz to monitor your cluster events, including:
-- State changes
+- State changes
- Errors
- Other messages that occur in the cluster
@@ -221,7 +282,7 @@ Use KubViz to monitor your cluster events, including:
-
+
@@ -235,7 +296,7 @@ Use KubViz to monitor your cluster events, including:
### Git Repository Events Tracking
-
+
@@ -249,7 +310,7 @@ Use KubViz to monitor your cluster events, including:
### Container Registry Events Tracking
-
+
@@ -261,7 +322,7 @@ Use KubViz to monitor your cluster events, including:
### Kubernetes Container Security Tracking
-
+
@@ -270,6 +331,20 @@ Use KubViz to monitor your cluster events, including:
- Detects configuration issues in Kubernetes cluster
+
+
+
+
+### SBOM
+
+
+
+
+
+- Generate reports for Software Bill of Materials (SBOM) from images within your Kubernetes cluster using KubViz in the CycloneDX format. These reports will be available in JSON format.
+
+
+
## Contributing
diff --git a/agent/config/config.go b/agent/config/config.go
index 4fbe3658..775fffdc 100644
--- a/agent/config/config.go
+++ b/agent/config/config.go
@@ -1,13 +1,25 @@
package config
import (
+ "time"
+
"github.com/kelseyhightower/envconfig"
"github.com/pkg/errors"
)
type AgentConfigurations struct {
- SANamespace string `envconfig:"SA_NAMESPACE" default:"default"`
- SAName string `envconfig:"SA_NAME" default:"default"`
+ SANamespace string `envconfig:"SA_NAMESPACE" default:"default"`
+ SAName string `envconfig:"SA_NAME" default:"default"`
+ OutdatedInterval string `envconfig:"OUTDATED_INTERVAL" default:"0"`
+ GetAllInterval string `envconfig:"GETALL_INTERVAL" default:"*/30 * * * *"`
+ KubeScoreInterval string `envconfig:"KUBESCORE_INTERVAL" default:"*/40 * * * *"`
+ RakkessInterval string `envconfig:"RAKKESS_INTERVAL" default:"*/50 * * * *"`
+ KubePreUpgradeInterval string `envconfig:"KUBEPREUPGRADE_INTERVAL" default:"*/60 * * * *"`
+ TrivyImageInterval string `envconfig:"TRIVY_IMAGE_INTERVAL" default:"*/10 * * * *"`
+ TrivySbomInterval string `envconfig:"TRIVY_SBOM_INTERVAL" default:"*/20 * * * *"`
+ TrivyClusterScanInterval string `envconfig:"TRIVY_CLUSTERSCAN_INTERVAL" default:"*/35 * * * *"`
+ SchedulerEnable bool `envconfig:"SCHEDULER_ENABLE" default:"true"`
+ KuberHealthyEnable bool `envconfig:"KUBERHEALTHY_ENABLE" default:"true"`
}
func GetAgentConfigurations() (serviceConf *AgentConfigurations, err error) {
@@ -17,3 +29,16 @@ func GetAgentConfigurations() (serviceConf *AgentConfigurations, err error) {
}
return
}
+
+type KHConfig struct {
+ KuberhealthyURL string `envconfig:"KUBERHEALTHY_URL" required:"true"`
+ PollInterval time.Duration `envconfig:"POLL_INTERVAL" default:"60m"`
+}
+
+func GetKuberHealthyConfig() (khconfig *KHConfig, err error) {
+ khconfig = &KHConfig{}
+ if err = envconfig.Process("", khconfig); err != nil {
+ return nil, errors.WithStack(err)
+ }
+ return
+}
diff --git a/agent/container/api/agent.gen.go b/agent/container/api/agent.gen.go
index 3b9beab7..477b57a9 100644
--- a/agent/container/api/agent.gen.go
+++ b/agent/container/api/agent.gen.go
@@ -27,6 +27,12 @@ type ServerInterface interface {
// Post Dockerhub artifactory events
// (POST /event/docker/hub)
PostEventDockerHub(c *gin.Context)
+ // Post Jfrog Container Registry webhook events
+ // (POST /event/jfrog/container)
+ PostEventJfrogContainer(c *gin.Context)
+ // Post quay Container Registry webhook events
+ // (POST /event/quay/container)
+ PostEventQuayContainer(c *gin.Context)
// Kubernetes readiness and liveness probe endpoint
// (GET /status)
GetStatus(c *gin.Context)
@@ -71,6 +77,26 @@ func (siw *ServerInterfaceWrapper) PostEventDockerHub(c *gin.Context) {
siw.Handler.PostEventDockerHub(c)
}
+// PostEventJfrogContainer operation middleware
+func (siw *ServerInterfaceWrapper) PostEventJfrogContainer(c *gin.Context) {
+
+ for _, middleware := range siw.HandlerMiddlewares {
+ middleware(c)
+ }
+
+ siw.Handler.PostEventJfrogContainer(c)
+}
+
+// PostEventQuayContainer operation middleware
+func (siw *ServerInterfaceWrapper) PostEventQuayContainer(c *gin.Context) {
+
+ for _, middleware := range siw.HandlerMiddlewares {
+ middleware(c)
+ }
+
+ siw.Handler.PostEventQuayContainer(c)
+}
+
// GetStatus operation middleware
func (siw *ServerInterfaceWrapper) GetStatus(c *gin.Context) {
@@ -116,6 +142,10 @@ func RegisterHandlersWithOptions(router *gin.Engine, si ServerInterface, options
router.POST(options.BaseURL+"/event/docker/hub", wrapper.PostEventDockerHub)
+ router.POST(options.BaseURL+"/event/jfrog/container", wrapper.PostEventJfrogContainer)
+
+ router.POST(options.BaseURL+"/event/quay/container", wrapper.PostEventQuayContainer)
+
router.GET(options.BaseURL+"/status", wrapper.GetStatus)
return router
@@ -124,13 +154,14 @@ func RegisterHandlersWithOptions(router *gin.Engine, si ServerInterface, options
// Base64 encoded, gzipped, json marshaled Swagger object
var swaggerSpec = []string{
- "H4sIAAAAAAAC/6SSQWscMQyF/4rQebqzaW9zC01oQwoJ2d5CDh5buyMyaxtJnrJd5r8Xz9K0pCUJ7ckI",
- "3nv6JOuIPu1zihRNsTvODXLcJuyOGEi9cDZOETv8mKI5jiTQC4cdwU2mCHeXm69wfnsFmsnzlr1b5A0a",
- "20iv2zbPbBOJnvqdrdarNc4NpkzRZcYOP6zWqzNsMDsbKiu2LvO7kPxS7MjqkzLJknYVsMNPZOeZL6qk",
- "QSHNKSot8vfr9Z9D3lzjPDeoZb93csAOv7AapG1lVciSJg4UoD+ADQRKMrGnOq3bKXb3mEs/sseHGtLS",
- "RNFa970Itf7nGmrPnPQvqLdJ7bJazqvjaW//xl3DYAmCXz9wRztWkwN8o35I6REWQn2ZPyT/SNIOpX8D",
- "+sUi/lz6/6A+ZQylByfGW+ctyeEVVDVn5cUr2JwUb8HS4j2pbssITzHPQK9LTxLJSEHIBY6kCi4GGHmi",
- "pciSegKKISeO9ju38OSMKniNJKknj939EYuM2GGL88P8IwAA//90yrlhlQMAAA==",
+ "H4sIAAAAAAAC/6yUz2/TThDF/5XVnP2N0y833yJaQSlSS8Ot6mG9O06GOrvLzKyRify/o3VEQQWV9MfJ",
+ "Gum9N595lr0HF3cpBgwq0OynCih0EZo9eBTHlJRigAbexqCWArJpmfwGzWXCYK7P1p/N6urcSEJHHTk7",
+ "yytQ0h7/bVs/sA3Icth3slguljBVEBMGmwgaeLNYLk6ggmR1W1ihton+89HNwwa1PGJCntPOPTTwDnWV",
+ "6LRIKmCUFIPgLP9/ufzzyMsLmKYKJO92lkdo4COJmtgVVjGJ40AevWlHo1s0gjyQw3Kt3Qg0N5By25OD",
+ "2xJS44BBa/s9M9buZw1lZ4ryF9SrKHpWLKviuO/tedwlzMxB5tcbuMYNifJovmG7jfHOzITyOL+P7g65",
+ "3ub2CPTTWfw+ty+gPmRsc2ssK3XWaeTxKNQvHcfNk6r+UByvUfUc9NKqv2Y7Pgn/U7bja9CXxc+HF7Wa",
+ "H/0E1wfFMYCSnUORLvfmPuYB8kVukQMqimG0ngKKGBu86WnAeUgcWzQYfIoU9HdupsEqFvASiVz+N9Dc",
+ "7CFzDw3UMN1OPwIAAP//BN6tkhIFAAA=",
}
// GetSwagger returns the content of the embedded swagger specification file
diff --git a/agent/container/cfg.yaml b/agent/container/cfg.yaml
index 845d5fb1..59622b8b 100644
--- a/agent/container/cfg.yaml
+++ b/agent/container/cfg.yaml
@@ -3,4 +3,5 @@ generate:
gin-server: true
models: true
embedded-spec: true
-output: agent/container/api/agent.gen.go
+output: api/agent.gen.go
+
diff --git a/agent/container/main.go b/agent/container/main.go
index 3e684f3e..72f76654 100755
--- a/agent/container/main.go
+++ b/agent/container/main.go
@@ -1,16 +1,29 @@
package main
import (
+ "context"
"log"
"os"
"os/signal"
"syscall"
"github.com/intelops/kubviz/agent/container/pkg/application"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
)
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
+
+ tp, err := opentelemetry.InitTracer()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer func() {
+ if err := tp.Shutdown(context.Background()); err != nil {
+ log.Printf("Error shutting down tracer provider: %v", err)
+ }
+ }()
+
app := application.New()
go app.GithubContainerWatch()
go app.Start()
diff --git a/agent/container/openapi.yaml b/agent/container/openapi.yaml
index bcd76a59..c3c3829d 100755
--- a/agent/container/openapi.yaml
+++ b/agent/container/openapi.yaml
@@ -44,5 +44,21 @@ paths:
responses:
'200':
description: OK
+ /event/quay/container:
+ post:
+ tags:
+ - public
+ summary: Post quay Container Registry webhook events
+ responses:
+ '200':
+ description: OK
+ /event/jfrog/container:
+ post:
+ tags:
+ - public
+ summary: Post Jfrog Container Registry webhook events
+ responses:
+ '200':
+ description: OK
# oapi-codegen -config ./cfg.yaml ./openapi.yaml
diff --git a/agent/container/pkg/application/application.go b/agent/container/pkg/application/application.go
index 136fa332..6d81a970 100755
--- a/agent/container/pkg/application/application.go
+++ b/agent/container/pkg/application/application.go
@@ -11,7 +11,9 @@ import (
"github.com/intelops/kubviz/agent/container/pkg/clients"
"github.com/intelops/kubviz/agent/container/pkg/config"
"github.com/intelops/kubviz/agent/container/pkg/handler"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
"github.com/kelseyhightower/envconfig"
+ "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
)
type Application struct {
@@ -43,6 +45,14 @@ func New() *Application {
}
r := gin.Default()
+
+ config, err := opentelemetry.GetConfigurations()
+ if err != nil {
+ log.Println("Unable to read open telemetry configurations")
+ }
+
+ r.Use(otelgin.Middleware(config.ServiceName))
+
apiServer.BindRequest(r)
httpServer := &http.Server{
diff --git a/agent/container/pkg/application/handlers.go b/agent/container/pkg/application/handlers.go
index 28dfe829..6d94f33c 100755
--- a/agent/container/pkg/application/handlers.go
+++ b/agent/container/pkg/application/handlers.go
@@ -1,14 +1,26 @@
package application
import (
+ "context"
"io"
"log"
"net/http"
+
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
)
//githubHandler handles the github webhooks post requests.
func (app *Application) localRegistryHandler(w http.ResponseWriter, r *http.Request) {
+
+ ctx:=context.Background()
+ tracer := otel.Tracer("container-gitlab")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "localRegistryHandler")
+ span.SetAttributes(attribute.String("http.method", "POST"))
+ defer span.End()
+
event, err := io.ReadAll(r.Body)
if err != nil {
log.Printf("Event body read failed: %v", err)
diff --git a/agent/container/pkg/clients/nats_client.go b/agent/container/pkg/clients/nats_client.go
index f42185cc..d087bb6c 100755
--- a/agent/container/pkg/clients/nats_client.go
+++ b/agent/container/pkg/clients/nats_client.go
@@ -1,11 +1,16 @@
package clients
import (
+ "context"
"fmt"
"log"
"time"
"github.com/intelops/kubviz/agent/container/pkg/config"
+ "github.com/intelops/kubviz/pkg/mtlsnats"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
"github.com/nats-io/nats.go"
)
@@ -41,9 +46,34 @@ func NewNATSContext(conf *config.Config) (*NATSContext, error) {
fmt.Println("Waiting before connecting to NATS at:", conf.NatsAddress)
time.Sleep(1 * time.Second)
- conn, err := nats.Connect(conf.NatsAddress, nats.Name("Github metrics"), nats.Token(conf.NatsToken))
- if err != nil {
- return nil, err
+ //conn, err := nats.Connect(conf.NatsAddress, nats.Name("Github metrics"), nats.Token(conf.NatsToken))
+ var conn *nats.Conn
+ var err error
+ var mtlsConfig mtlsnats.MtlsConfig
+
+ if mtlsConfig.IsEnabled {
+ tlsConfig, err := mtlsnats.GetTlsConfig()
+ if err != nil {
+ log.Println("error while getting tls config ", err)
+ time.Sleep(time.Minute * 30)
+ } else {
+ conn, err = nats.Connect(conf.NatsAddress,
+ nats.Name("Github metrics"),
+ nats.Token(conf.NatsToken),
+ nats.Secure(tlsConfig),
+ )
+ if err != nil {
+ log.Println("error while connecting with mtls ", err)
+ }
+ }
+
+ }
+
+ if conn == nil {
+ conn, err = nats.Connect(conf.NatsAddress, nats.Name("Github metrics"), nats.Token(conf.NatsToken))
+ if err != nil {
+ return nil, fmt.Errorf("error while connecting with token: %w", err)
+ }
}
ctx := &NATSContext{
@@ -112,6 +142,13 @@ func (n *NATSContext) Close() {
// The repository information in the header can be used by subscribers to filter or route the event based on its origin or destination.
// An error is returned if the publishing process fails, such as if the connection is lost or if there are issues with the JetStream.
func (n *NATSContext) Publish(event []byte, repo string) error {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("container-nats-client")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "ContainerPublish")
+ span.SetAttributes(attribute.String("repo-name", repo))
+ defer span.End()
+
msg := nats.NewMsg(eventSubject)
msg.Data = event
msg.Header.Set("REPO_NAME", repo)
diff --git a/agent/container/pkg/handler/api_handler.go b/agent/container/pkg/handler/api_handler.go
index 5e5e2834..6efee78d 100755
--- a/agent/container/pkg/handler/api_handler.go
+++ b/agent/container/pkg/handler/api_handler.go
@@ -1,11 +1,14 @@
package handler
import (
+ "log"
"net/http"
"github.com/gin-gonic/gin"
"github.com/intelops/kubviz/agent/container/api"
"github.com/intelops/kubviz/agent/container/pkg/clients"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
)
type APIHandler struct {
@@ -29,12 +32,22 @@ func NewAPIHandler(conn *clients.NATSContext) (*APIHandler, error) {
}
func (ah *APIHandler) BindRequest(r *gin.Engine) {
+
+ config, err := opentelemetry.GetConfigurations()
+ if err != nil {
+ log.Println("Unable to read open telemetry configurations")
+ }
+
+ r.Use(otelgin.Middleware(config.ServiceName))
+
apiGroup := r.Group("/")
{
apiGroup.GET("/api-docs", ah.GetApiDocs)
apiGroup.GET("/status", ah.GetStatus)
apiGroup.POST("/event/docker/hub", ah.PostEventDockerHub)
apiGroup.POST("/event/azure/container", ah.PostEventAzureContainer)
+ apiGroup.POST("/event/quay/container", ah.PostEventQuayContainer)
+ apiGroup.POST("/event/jfrog/container", ah.PostEventJfrogContainer)
}
}
diff --git a/agent/container/pkg/handler/azure_container.go b/agent/container/pkg/handler/azure_container.go
index 106d92e0..35a72f3f 100644
--- a/agent/container/pkg/handler/azure_container.go
+++ b/agent/container/pkg/handler/azure_container.go
@@ -9,6 +9,8 @@ import (
"github.com/gin-gonic/gin"
"github.com/intelops/kubviz/model"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
)
var ErrInvalidPayload = errors.New("invalid or malformed Azure Container Registry webhook payload")
@@ -19,6 +21,12 @@ var ErrInvalidPayload = errors.New("invalid or malformed Azure Container Registr
// application to subscribe to these events and respond to changes in the container registry.
// If the payload is invalid or the publishing process fails, an error response is returned.
func (ah *APIHandler) PostEventAzureContainer(c *gin.Context) {
+
+ tracer := otel.Tracer("azure-container")
+ _, span := tracer.Start(c.Request.Context(), "PostEventAzureContainer")
+ span.SetAttributes(attribute.String("http.method", "POST"))
+ defer span.End()
+
defer func() {
_, _ = io.Copy(io.Discard, c.Request.Body)
_ = c.Request.Body.Close()
diff --git a/agent/container/pkg/handler/docker_event_dockerhub.go b/agent/container/pkg/handler/docker_event_dockerhub.go
index d022d9a7..f74bd8ae 100644
--- a/agent/container/pkg/handler/docker_event_dockerhub.go
+++ b/agent/container/pkg/handler/docker_event_dockerhub.go
@@ -7,6 +7,8 @@ import (
"net/http"
"github.com/gin-gonic/gin"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
)
// parse errors
@@ -16,6 +18,12 @@ var (
)
func (ah *APIHandler) PostEventDockerHub(c *gin.Context) {
+
+ tracer := otel.Tracer("dockerhub-container")
+ _, span := tracer.Start(c.Request.Context(), "PostEventDockerHub")
+ span.SetAttributes(attribute.String("http.method", "POST"))
+ defer span.End()
+
defer func() {
_, _ = io.Copy(io.Discard, c.Request.Body)
_ = c.Request.Body.Close()
diff --git a/agent/container/pkg/handler/jfrog_container.go b/agent/container/pkg/handler/jfrog_container.go
new file mode 100644
index 00000000..8d57f272
--- /dev/null
+++ b/agent/container/pkg/handler/jfrog_container.go
@@ -0,0 +1,53 @@
+package handler
+
+import (
+ "encoding/json"
+ "errors"
+ "io"
+ "log"
+ "net/http"
+
+ "github.com/gin-gonic/gin"
+ "github.com/intelops/kubviz/model"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+)
+
+var ErrInvalidPayloads = errors.New("invalid or malformed jfrog Container Registry webhook payload")
+
+func (ah *APIHandler) PostEventJfrogContainer(c *gin.Context) {
+
+ tracer := otel.Tracer("jfrog-container")
+ _, span := tracer.Start(c.Request.Context(), "PostEventJfrogContainer")
+ span.SetAttributes(attribute.String("http.method", "POST"))
+ defer span.End()
+
+ defer func() {
+ _, _ = io.Copy(io.Discard, c.Request.Body)
+ _ = c.Request.Body.Close()
+ }()
+ payload, err := io.ReadAll(c.Request.Body)
+ if err != nil || len(payload) == 0 {
+ log.Printf("%v: %v", ErrReadingBody, err)
+ c.Status(http.StatusBadRequest)
+ return
+ }
+
+ var pushEvent model.JfrogContainerPushEventPayload
+ err = json.Unmarshal(payload, &pushEvent)
+ if err != nil {
+ log.Printf("%v: %v", ErrInvalidPayloads, err)
+ c.JSON(http.StatusBadRequest, gin.H{"error": "Bad Request"})
+ return
+ }
+
+ log.Printf("Received event from jfrog Container Registry: %v", pushEvent)
+
+ err = ah.conn.Publish(payload, "Jfrog_Container_Registry")
+ if err != nil {
+ log.Printf("%v: %v", ErrPublishToNats, err)
+ c.Status(http.StatusInternalServerError)
+ return
+ }
+ c.Status(http.StatusOK)
+}
diff --git a/agent/container/pkg/handler/quay_handler.go b/agent/container/pkg/handler/quay_handler.go
new file mode 100644
index 00000000..b1a2be84
--- /dev/null
+++ b/agent/container/pkg/handler/quay_handler.go
@@ -0,0 +1,48 @@
+package handler
+
+import (
+ "encoding/json"
+ "io"
+ "log"
+ "net/http"
+
+ "github.com/gin-gonic/gin"
+ "github.com/intelops/kubviz/model"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+)
+
+func (ah *APIHandler) PostEventQuayContainer(c *gin.Context) {
+
+ tracer := otel.Tracer("quay-container")
+ _, span := tracer.Start(c.Request.Context(), "PostEventQuayContainer")
+ span.SetAttributes(attribute.String("http.method", "POST"))
+ defer span.End()
+
+ defer func() {
+ _, _ = io.Copy(io.Discard, c.Request.Body)
+ _ = c.Request.Body.Close()
+ }()
+ payload, err := io.ReadAll(c.Request.Body)
+ if err != nil || len(payload) == 0 {
+ log.Printf("%v: %v", ErrReadingBody, err)
+ c.Status(http.StatusBadRequest)
+ return
+ }
+ var pushEvent model.QuayImagePushPayload
+ err = json.Unmarshal(payload, &pushEvent)
+ if err != nil {
+ log.Printf("%v: %v", ErrInvalidPayload, err)
+ c.JSON(http.StatusBadRequest, gin.H{"error": "Bad Request"})
+ return
+ }
+ log.Printf("Received event from Quay Container Registry: %v", pushEvent)
+
+ err = ah.conn.Publish(payload, "Quay_Container_Registry")
+ if err != nil {
+ log.Printf("%v: %v", ErrPublishToNats, err)
+ c.Status(http.StatusInternalServerError)
+ return
+ }
+ c.Status(http.StatusOK)
+}
\ No newline at end of file
diff --git a/agent/git/main.go b/agent/git/main.go
index 9fd6a682..c1d22c27 100644
--- a/agent/git/main.go
+++ b/agent/git/main.go
@@ -1,6 +1,7 @@
package main
import (
+ "context"
"log"
"os"
"os/signal"
@@ -9,6 +10,7 @@ import (
"github.com/intelops/kubviz/agent/git/pkg/application"
"github.com/intelops/kubviz/agent/git/pkg/clients"
"github.com/intelops/kubviz/agent/git/pkg/config"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
"github.com/kelseyhightower/envconfig"
)
@@ -20,6 +22,16 @@ func main() {
log.Fatalf("Could not parse env Config: %v", err)
}
+ tp, err := opentelemetry.InitTracer()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer func() {
+ if err := tp.Shutdown(context.Background()); err != nil {
+ log.Printf("Error shutting down tracer provider: %v", err)
+ }
+ }()
+
// Connect to NATS
natsContext, err := clients.NewNATSContext(cfg)
if err != nil {
diff --git a/agent/git/pkg/application/application.go b/agent/git/pkg/application/application.go
index 8af7ba95..f8bd908d 100644
--- a/agent/git/pkg/application/application.go
+++ b/agent/git/pkg/application/application.go
@@ -11,6 +11,8 @@ import (
"github.com/intelops/kubviz/agent/git/api"
"github.com/intelops/kubviz/agent/git/pkg/clients"
"github.com/intelops/kubviz/agent/git/pkg/config"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
"github.com/gin-gonic/gin"
)
@@ -41,6 +43,14 @@ func New(conf *config.Config, conn *clients.NATSContext) *Application {
func (app *Application) Routes() *gin.Engine {
router := gin.New()
+
+ config, err := opentelemetry.GetConfigurations()
+ if err != nil {
+ log.Println("Unable to read open telemetry configurations")
+ }
+
+ router.Use(otelgin.Middleware(config.ServiceName))
+
api.RegisterHandlers(router, app)
return router
}
diff --git a/agent/git/pkg/application/handlers.go b/agent/git/pkg/application/handlers.go
index b865c380..fef778f5 100644
--- a/agent/git/pkg/application/handlers.go
+++ b/agent/git/pkg/application/handlers.go
@@ -9,10 +9,18 @@ import (
"github.com/intelops/kubviz/agent/git/api"
"github.com/intelops/kubviz/gitmodels/azuremodel"
"github.com/intelops/kubviz/model"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
)
func (app *Application) PostGitea(c *gin.Context) {
log.Println("gitea handler called...")
+
+ tracer := otel.Tracer("gitea-git")
+ _, span := tracer.Start(c.Request.Context(), "PostGitea")
+ span.SetAttributes(attribute.String("http.method", "POST"))
+ defer span.End()
+
defer log.Println("gitea handler exited...")
event := c.Request.Header.Get(string(model.GiteaHeader))
@@ -30,6 +38,12 @@ func (app *Application) PostGitea(c *gin.Context) {
func (app *Application) PostAzure(c *gin.Context) {
log.Println("azure handler called...")
+
+ tracer := otel.Tracer("azure-git")
+ _, span := tracer.Start(c.Request.Context(), "PostAzure")
+ span.SetAttributes(attribute.String("http.method", "POST"))
+ defer span.End()
+
defer log.Println("azure handler exited...")
jsonData, err := c.GetRawData()
@@ -58,8 +72,14 @@ func (app *Application) PostAzure(c *gin.Context) {
// githubHandler handles the github webhooks post requests.
func (app *Application) PostGithub(c *gin.Context) {
log.Println("github handler called...")
+
+ tracer := otel.Tracer("github-git")
+ _, span := tracer.Start(c.Request.Context(), "PostGithub")
+ span.SetAttributes(attribute.String("http.method", "POST"))
+ defer span.End()
+
defer log.Println("github handler exited...")
-
+
event := c.Request.Header.Get(string(model.GithubHeader))
if len(event) == 0 {
log.Println("error getting the github event from header")
@@ -79,6 +99,12 @@ func (app *Application) PostGithub(c *gin.Context) {
// gitlabHandler handles the github webhooks post requests.
func (app *Application) PostGitlab(c *gin.Context) {
log.Println("gitlab handler called...")
+
+ tracer := otel.Tracer("gitlab-git")
+ _, span := tracer.Start(c.Request.Context(), "PostGitlab")
+ span.SetAttributes(attribute.String("http.method", "POST"))
+ defer span.End()
+
defer log.Println("gitlab handler exited...")
event := c.Request.Header.Get(string(model.GitlabHeader))
@@ -100,6 +126,12 @@ func (app *Application) PostGitlab(c *gin.Context) {
// bitBucketHandler handles the github webhooks post requests.
func (app *Application) PostBitbucket(c *gin.Context) {
log.Println("bitbucket handler called...")
+
+ tracer := otel.Tracer("bitbucket-git")
+ _, span := tracer.Start(c.Request.Context(), "PostBitbucket")
+ span.SetAttributes(attribute.String("http.method", "POST"))
+ defer span.End()
+
defer log.Println("bitbucket handler exited...")
event := c.Request.Header.Get(string(model.BitBucketHeader))
diff --git a/agent/git/pkg/clients/nats_client.go b/agent/git/pkg/clients/nats_client.go
index e9ef06c2..7d6830b4 100644
--- a/agent/git/pkg/clients/nats_client.go
+++ b/agent/git/pkg/clients/nats_client.go
@@ -1,10 +1,15 @@
package clients
import (
+ "context"
"fmt"
"github.com/intelops/kubviz/agent/git/pkg/config"
"github.com/intelops/kubviz/model"
+ "github.com/intelops/kubviz/pkg/mtlsnats"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
"log"
"time"
@@ -30,9 +35,35 @@ func NewNATSContext(conf *config.Config) (*NATSContext, error) {
fmt.Println("Waiting before connecting to NATS at:", conf.NatsAddress)
time.Sleep(1 * time.Second)
- conn, err := nats.Connect(conf.NatsAddress, nats.Name("Github metrics"), nats.Token(conf.NatsToken))
- if err != nil {
- return nil, err
+ //conn, err := nats.Connect(conf.NatsAddress, nats.Name("Github metrics"), nats.Token(conf.NatsToken))
+
+ var conn *nats.Conn
+ var err error
+ var mtlsConfig mtlsnats.MtlsConfig
+
+ if mtlsConfig.IsEnabled {
+ tlsConfig, err := mtlsnats.GetTlsConfig()
+ if err != nil {
+ log.Println("error while getting tls config ", err)
+ time.Sleep(time.Minute * 30)
+ } else {
+ conn, err = nats.Connect(conf.NatsAddress,
+ nats.Name("Github metrics"),
+ nats.Token(conf.NatsToken),
+ nats.Secure(tlsConfig),
+ )
+ if err != nil {
+ log.Println("error while connecting with mtls ", err)
+ }
+ }
+
+ }
+
+ if conn == nil {
+ conn, err = nats.Connect(conf.NatsAddress, nats.Name("Github metrics"), nats.Token(conf.NatsToken))
+ if err != nil {
+ return nil, fmt.Errorf("error while connecting with token: %w", err)
+ }
}
ctx := &NATSContext{
@@ -91,6 +122,13 @@ func (n *NATSContext) Close() {
}
func (n *NATSContext) Publish(metric []byte, repo string, eventkey model.EventKey, eventvalue model.EventValue) error {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("git-nats-client")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "GitPublish")
+ span.SetAttributes(attribute.String("repo-name", repo))
+ defer span.End()
+
msg := nats.NewMsg(eventSubject)
msg.Data = metric
msg.Header.Set("GitProvider", repo)
diff --git a/agent/git/pkg/config/configuration.go b/agent/git/pkg/config/configuration.go
index f48954f8..1a5d66a4 100644
--- a/agent/git/pkg/config/configuration.go
+++ b/agent/git/pkg/config/configuration.go
@@ -3,7 +3,7 @@ package config
// var token string = "UfmrJOYwYCCsgQvxvcfJ3BdI6c8WBbnD"
// var natsurl string = "nats://localhost:4222"
-//Config will have the configuration details
+// Config will have the configuration details
type Config struct {
NatsAddress string `envconfig:"NATS_ADDRESS"`
NatsToken string `envconfig:"NATS_TOKEN"`
diff --git a/agent/kubviz/application/application.go b/agent/kubviz/application/application.go
new file mode 100644
index 00000000..301bc161
--- /dev/null
+++ b/agent/kubviz/application/application.go
@@ -0,0 +1,3 @@
+package application
+
+func Start() {}
diff --git a/agent/kubviz/k8smetrics_agent.go b/agent/kubviz/k8smetrics_agent.go
index 7f28d0a3..d0c0c7f6 100644
--- a/agent/kubviz/k8smetrics_agent.go
+++ b/agent/kubviz/k8smetrics_agent.go
@@ -1,37 +1,45 @@
package main
import (
- "encoding/json"
"log"
"os"
- "strconv"
- "strings"
- "sync"
+
+ "os/signal"
+ "syscall"
"time"
+ //"github.com/go-co-op/gocron"
"github.com/go-co-op/gocron"
"github.com/nats-io/nats.go"
"context"
- "github.com/intelops/kubviz/constants"
- "github.com/intelops/kubviz/model"
+ "github.com/intelops/kubviz/pkg/mtlsnats"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
- "fmt"
+ "github.com/intelops/kubviz/agent/config"
+ "github.com/intelops/kubviz/agent/kubviz/plugins/events"
+
+ "github.com/intelops/kubviz/agent/kubviz/plugins/ketall"
+ "github.com/intelops/kubviz/agent/kubviz/plugins/kubepreupgrade"
+
+ "github.com/intelops/kubviz/agent/kubviz/plugins/kuberhealthy"
+ "github.com/intelops/kubviz/agent/kubviz/plugins/kubescore"
+ "github.com/intelops/kubviz/agent/kubviz/plugins/outdated"
+ "github.com/intelops/kubviz/agent/kubviz/plugins/rakkess"
+
+ "github.com/intelops/kubviz/agent/kubviz/plugins/trivy"
+ "github.com/intelops/kubviz/agent/kubviz/scheduler"
- "github.com/ghodss/yaml"
- v1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/fields"
_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
- // _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
- "k8s.io/client-go/tools/cache"
+ "github.com/intelops/kubviz/agent/server"
+ //_ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
"k8s.io/client-go/tools/clientcmd"
)
@@ -50,289 +58,125 @@ var (
ClusterName string = os.Getenv("CLUSTER_NAME")
token string = os.Getenv("NATS_TOKEN")
natsurl string = os.Getenv("NATS_ADDRESS")
+
//for local testing provide the location of kubeconfig
- // inside the civo file paste your kubeconfig
- // uncomment this line from Dockerfile.Kubviz (COPY --from=builder /workspace/civo /etc/myapp/civo)
cluster_conf_loc string = os.Getenv("CONFIG_LOCATION")
schedulingIntervalStr string = os.Getenv("SCHEDULING_INTERVAL")
)
-func runTrivyScans(config *rest.Config, js nats.JetStreamContext, wg *sync.WaitGroup, trivyImagescanChan, trivySbomcanChan, trivyK8sMetricsChan chan error) {
- RunTrivyK8sClusterScan(js, trivyK8sMetricsChan)
- RunTrivyImageScans(config, js, trivyImagescanChan)
- RunTrivySbomScan(config, js, trivySbomcanChan)
- wg.Done()
-}
-
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
env := Production
clusterMetricsChan := make(chan error, 1)
+ cfg, err := config.GetAgentConfigurations()
+ if err != nil {
+ log.Fatal("Failed to retrieve agent configurations", err)
+ }
var (
- wg sync.WaitGroup
config *rest.Config
clientset *kubernetes.Clientset
)
- // connecting with nats ...
- nc, err := nats.Connect(natsurl, nats.Name("K8s Metrics"), nats.Token(token))
- checkErr(err)
- // creating a jetstream connection using the nats connection
+
+ var mtlsConfig mtlsnats.MtlsConfig
+ var nc *nats.Conn
+
+ if mtlsConfig.IsEnabled {
+ tlsConfig, err := mtlsnats.GetTlsConfig()
+ if err != nil {
+ log.Println("error while getting tls config ", err)
+ time.Sleep(time.Minute * 30)
+ } else {
+ nc, err = nats.Connect(
+ natsurl,
+ nats.Name("K8s Metrics"),
+ nats.Token(token),
+ nats.Secure(tlsConfig),
+ )
+ if err != nil {
+ log.Println("error while connecting with mtls ", err)
+ }
+ }
+
+ }
+
+ if nc == nil {
+ nc, err = nats.Connect(natsurl, nats.Name("K8s Metrics"), nats.Token(token))
+ events.CheckErr(err)
+ }
js, err := nc.JetStream()
- checkErr(err)
- // creating a stream with stream name METRICS
- err = createStream(js)
- checkErr(err)
- //setupAgent()
+ events.CheckErr(err)
+ err = events.CreateStream(js)
+ events.CheckErr(err)
if env != Production {
config, err = clientcmd.BuildConfigFromFlags("", cluster_conf_loc)
if err != nil {
log.Fatal(err)
}
- clientset = getK8sClient(config)
+ clientset = events.GetK8sClient(config)
} else {
config, err = rest.InClusterConfig()
if err != nil {
log.Fatal(err)
}
- clientset = getK8sClient(config)
- }
-
- // starting the endless go routine to monitor the cluster
- go publishMetrics(clientset, js, clusterMetricsChan)
-
- // starting all the go routines
- collectAndPublishMetrics := func() {
- // error channels declared for the go routines
- outdatedErrChan := make(chan error, 1)
- kubePreUpgradeChan := make(chan error, 1)
- getAllResourceChan := make(chan error, 1)
- trivyK8sMetricsChan := make(chan error, 1)
- kubescoreMetricsChan := make(chan error, 1)
- trivyImagescanChan := make(chan error, 1)
- trivySbomcanChan := make(chan error, 1)
- RakeesErrChan := make(chan error, 1)
- // Start a goroutine to handle errors
- doneChan := make(chan bool)
- go func() {
- // for loop will wait for the error channels
- // logs if any error occurs
- for {
- select {
- case err := <-outdatedErrChan:
- if err != nil {
- log.Println(err)
- }
- case err := <-kubePreUpgradeChan:
- if err != nil {
- log.Println(err)
- }
- case err := <-getAllResourceChan:
- if err != nil {
- log.Println(err)
- }
- case err := <-clusterMetricsChan:
- if err != nil {
- log.Println(err)
- }
- case err := <-kubescoreMetricsChan:
- if err != nil {
- log.Println(err)
- }
- case err := <-trivyImagescanChan:
- if err != nil {
- log.Println(err)
- }
- case err := <-trivySbomcanChan:
- if err != nil {
- log.Println(err)
- }
- case err := <-trivyK8sMetricsChan:
- if err != nil {
- log.Println(err)
- }
- case err := <-RakeesErrChan:
- if err != nil {
- log.Println(err)
- }
- case <-doneChan:
- return // All other goroutines have finished, so exit the goroutine
- }
- }
- }()
- wg.Add(7) // Initialize the WaitGroup for the seven goroutines
- // ... start other goroutines ...
- go outDatedImages(config, js, &wg, outdatedErrChan)
- go KubePreUpgradeDetector(config, js, &wg, kubePreUpgradeChan)
- go GetAllResources(config, js, &wg, getAllResourceChan)
- go RakeesOutput(config, js, &wg, RakeesErrChan)
- go getK8sEvents(clientset)
- // Run these functions sequentially within a single goroutine using the wrapper function
- go runTrivyScans(config, js, &wg, trivyImagescanChan, trivySbomcanChan, trivyK8sMetricsChan)
- go RunKubeScore(clientset, js, &wg, kubescoreMetricsChan)
- wg.Wait()
- // once the go routines completes we will close the error channels
- close(outdatedErrChan)
- close(kubePreUpgradeChan)
- close(getAllResourceChan)
- // close(clusterMetricsChan)
- close(kubescoreMetricsChan)
- close(trivyImagescanChan)
- close(trivySbomcanChan)
- close(trivyK8sMetricsChan)
- close(RakeesErrChan)
- // Signal that all other goroutines have finished
- doneChan <- true
- close(doneChan)
- }
- collectAndPublishMetrics()
- if schedulingIntervalStr == "" {
- schedulingIntervalStr = "20m" // Default value, e.g., 20 minutes
- }
- schedulingInterval, err := time.ParseDuration(schedulingIntervalStr)
- if err != nil {
- log.Fatalf("Failed to parse SCHEDULING_INTERVAL: %v", err)
- }
- s := gocron.NewScheduler(time.UTC)
- s.Every(schedulingInterval).Do(collectAndPublishMetrics) // Run immediately and then at the scheduled interval
- s.StartBlocking() // Blocks the main function
-}
-
-// publishMetrics publishes stream of events
-// with subject "METRICS.created"
-func publishMetrics(clientset *kubernetes.Clientset, js nats.JetStreamContext, errCh chan error) {
- watchK8sEvents(clientset, js)
-
- errCh <- nil
-}
-
-func publishK8sMetrics(id string, mtype string, mdata *v1.Event, js nats.JetStreamContext) (bool, error) {
- metrics := model.Metrics{
- ID: id,
- Type: mtype,
- Event: mdata,
- ClusterName: ClusterName,
- }
- metricsJson, _ := json.Marshal(metrics)
- _, err := js.Publish(constants.EventSubject, metricsJson)
- if err != nil {
- return true, err
+ clientset = events.GetK8sClient(config)
}
- log.Printf("Metrics with ID:%s has been published\n", id)
- return false, nil
-}
-// createStream creates a stream by using JetStreamContext
-func createStream(js nats.JetStreamContext) error {
- // Check if the METRICS stream already exists; if not, create it.
- stream, err := js.StreamInfo(constants.StreamName)
- log.Printf("Retrieved stream %s", fmt.Sprintf("%v", stream))
+ tp, err := opentelemetry.InitTracer()
if err != nil {
- log.Printf("Error getting stream %s", err)
- }
- if stream == nil {
- log.Printf("creating stream %q and subjects %q", constants.StreamName, constants.StreamSubjects)
- _, err = js.AddStream(&nats.StreamConfig{
- Name: constants.StreamName,
- Subjects: []string{constants.StreamSubjects},
- })
- checkErr(err)
+ log.Fatal(err)
}
- return nil
-
-}
-
-func getK8sClient(config *rest.Config) *kubernetes.Clientset {
- // create the clientset
- clientset, err := kubernetes.NewForConfig(config)
- checkErr(err)
- return clientset
-}
+ defer func() {
+ if err := tp.Shutdown(context.Background()); err != nil {
+ log.Printf("Error shutting down tracer provider: %v", err)
+ }
+ }()
-func getK8sPods(clientset *kubernetes.Clientset) string {
- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
- defer cancel()
- pods, err := clientset.CoreV1().Pods("").List(ctx, metav1.ListOptions{})
- checkErr(err)
- var sb strings.Builder
- for i, pod := range pods.Items {
- sb.WriteString("Name-" + strconv.Itoa(i) + ": ")
- sb.WriteString(pod.Name)
- sb.WriteString(" ")
- sb.WriteString("Namespace-" + strconv.Itoa(i) + ": ")
- sb.WriteString(pod.Namespace)
- sb.WriteString(" ")
+ go events.PublishMetrics(clientset, js, clusterMetricsChan)
+ if cfg.KuberHealthyEnable {
+ go kuberhealthy.StartKuberHealthy(js)
}
- return sb.String()
-}
-
-func getK8sNodes(clientset *kubernetes.Clientset) string {
- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
- defer cancel()
- nodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
- checkErr(err)
- var sb strings.Builder
- for i, node := range nodes.Items {
- sb.WriteString("Name-" + strconv.Itoa(i) + ": ")
- sb.WriteString(node.Name)
+ go server.StartServer()
+ collectAndPublishMetrics := func() {
+ err := outdated.OutDatedImages(config, js)
+ events.LogErr(err)
+ err = kubepreupgrade.KubePreUpgradeDetector(config, js)
+ events.LogErr(err)
+ err = ketall.GetAllResources(config, js)
+ events.LogErr(err)
+ err = rakkess.RakeesOutput(config, js)
+ events.LogErr(err)
+ err = trivy.RunTrivySbomScan(config, js)
+ events.LogErr(err)
+ err = trivy.RunTrivyImageScans(config, js)
+ events.LogErr(err)
+ err = trivy.RunTrivyK8sClusterScan(js)
+ events.LogErr(err)
+ err = kubescore.RunKubeScore(clientset, js)
+ events.LogErr(err)
}
- return sb.String()
-}
-
-func getK8sEvents(clientset *kubernetes.Clientset) string {
- ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
- defer cancel()
- events, err := clientset.CoreV1().Events("").List(ctx, metav1.ListOptions{})
- checkErr(err)
- j, err := json.MarshalIndent(events, "", " ")
- checkErr(err)
- log.Printf(string(j))
- return string(j)
-}
+ collectAndPublishMetrics()
+ if cfg.SchedulerEnable { // Assuming "cfg.Schedule" is a boolean indicating whether to schedule or not.
+ scheduler := scheduler.InitScheduler(config, js, *cfg, clientset)
-func checkErr(err error) {
- if err != nil {
- log.Fatal(err)
- }
-}
+ // Start the scheduler
+ scheduler.Start()
+ signals := make(chan os.Signal, 1)
+ signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
+ <-signals
-func watchK8sEvents(clientset *kubernetes.Clientset, js nats.JetStreamContext) {
- watchlist := cache.NewListWatchFromClient(
- clientset.CoreV1().RESTClient(),
- "events",
- v1.NamespaceAll,
- fields.Everything(),
- )
- _, controller := cache.NewInformer( // also take a look at NewSharedIndexInformer
- watchlist,
- &v1.Event{},
- 0, //Duration is int64
- cache.ResourceEventHandlerFuncs{
- AddFunc: func(obj interface{}) {
- event := obj.(*v1.Event)
- fmt.Printf("Event namespace: %s \n", event.GetNamespace())
- y, err := yaml.Marshal(event)
- if err != nil {
- fmt.Printf("err: %v\n", err)
- }
- fmt.Printf("Add event: %s \n", y)
- publishK8sMetrics(string(event.ObjectMeta.UID), "ADD", event, js)
- },
- DeleteFunc: func(obj interface{}) {
- event := obj.(*v1.Event)
- fmt.Printf("Delete event: %s \n", obj)
- publishK8sMetrics(string(event.ObjectMeta.UID), "DELETE", event, js)
- },
- UpdateFunc: func(oldObj, newObj interface{}) {
- event := newObj.(*v1.Event)
- fmt.Printf("Change event \n")
- publishK8sMetrics(string(event.ObjectMeta.UID), "UPDATE", event, js)
- },
- },
- )
- stop := make(chan struct{})
- defer close(stop)
- go controller.Run(stop)
- for {
- time.Sleep(time.Second)
+ scheduler.Stop()
+ } else {
+ if schedulingIntervalStr == "" {
+ schedulingIntervalStr = "20m"
+ }
+ schedulingInterval, err := time.ParseDuration(schedulingIntervalStr)
+ if err != nil {
+ log.Fatalf("Failed to parse SCHEDULING_INTERVAL: %v", err)
+ }
+ s := gocron.NewScheduler(time.UTC)
+ s.Every(schedulingInterval).Do(func() {
+ collectAndPublishMetrics()
+ })
+ s.StartBlocking()
}
}
diff --git a/agent/kubviz/kube_score.go b/agent/kubviz/kube_score.go
deleted file mode 100644
index f7a2b838..00000000
--- a/agent/kubviz/kube_score.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package main
-
-import (
- "context"
- "encoding/json"
- "github.com/google/uuid"
- "github.com/intelops/kubviz/constants"
- "github.com/intelops/kubviz/model"
- "github.com/nats-io/nats.go"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/kubernetes"
- "log"
- exec "os/exec"
- "sync"
-)
-
-func RunKubeScore(clientset *kubernetes.Clientset, js nats.JetStreamContext, wg *sync.WaitGroup, errCh chan error) {
- defer wg.Done()
-
- nsList, err := clientset.CoreV1().
- Namespaces().
- List(context.Background(), metav1.ListOptions{})
- if err != nil {
- log.Println("Error occurred while getting client set for kube-score: ", err)
- return
- }
-
- log.Printf("Namespace size: %d", len(nsList.Items))
- for _, n := range nsList.Items {
- log.Printf("Publishing kube-score recommendations for namespace: %s\n", n.Name)
- publish(n.Name, js, errCh)
- }
-}
-
-func publish(ns string, js nats.JetStreamContext, errCh chan error) {
- cmd := "kubectl api-resources --verbs=list --namespaced -o name | xargs -n1 -I{} sh -c \"kubectl get {} -n " + ns + " -oyaml && echo ---\" | kube-score score - "
- log.Printf("Command: %#v,", cmd)
- out, err := executeCommand(cmd)
- if err != nil {
- log.Println("Error occurred while running kube-score: ", err)
- errCh <- err
- }
- err = publishKubescoreMetrics(uuid.New().String(), ns, out, js)
- if err != nil {
- errCh <- err
- }
- errCh <- nil
-}
-
-func publishKubescoreMetrics(id string, ns string, recommendations string, js nats.JetStreamContext) error {
- metrics := model.KubeScoreRecommendations{
- ID: id,
- Namespace: ns,
- Recommendations: recommendations,
- ClusterName: ClusterName,
- }
- metricsJson, _ := json.Marshal(metrics)
- _, err := js.Publish(constants.KUBESCORE_SUBJECT, metricsJson)
- if err != nil {
- return err
- }
- log.Printf("Recommendations with ID:%s has been published\n", id)
- log.Printf("Recommendations :%#v", recommendations)
- return nil
-}
-
-func executeCommand(command string) (string, error) {
- cmd := exec.Command("/bin/sh", "-c", command)
- stdout, err := cmd.Output()
-
- if err != nil {
- log.Println("Execute Command Error", err.Error())
- }
-
- // Print the output
- log.Println(string(stdout))
- return string(stdout), nil
-}
diff --git a/agent/kubviz/plugins/events/event_metrics_utils.go b/agent/kubviz/plugins/events/event_metrics_utils.go
new file mode 100644
index 00000000..17ef114f
--- /dev/null
+++ b/agent/kubviz/plugins/events/event_metrics_utils.go
@@ -0,0 +1,228 @@
+package events
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/intelops/kubviz/constants"
+ "github.com/intelops/kubviz/model"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "github.com/nats-io/nats.go"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/cache"
+)
+
+var ClusterName string = os.Getenv("CLUSTER_NAME")
+
+// publishMetrics publishes stream of events
+// with subject "METRICS.created"
+func PublishMetrics(clientset *kubernetes.Clientset, js nats.JetStreamContext, errCh chan error) {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("kubviz-publish-metrics")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "publishMetrics")
+ span.SetAttributes(attribute.String("kubviz-agent", "publish-metrics"))
+ defer span.End()
+
+ watchK8sEvents(clientset, js)
+ errCh <- nil
+}
+
+func publishK8sMetrics(id string, mtype string, mdata *v1.Event, js nats.JetStreamContext, imageName string) (bool, error) {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("kubviz-publish-k8smetrics")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "publishK8sMetrics")
+ span.SetAttributes(attribute.String("kubviz-agent", "publish-k8smetrics"))
+ defer span.End()
+
+ metrics := model.Metrics{
+ ID: id,
+ Type: mtype,
+ Event: mdata,
+ ClusterName: ClusterName,
+ ImageName: imageName,
+ }
+ metricsJson, _ := json.Marshal(metrics)
+ _, err := js.Publish(constants.EventSubject, metricsJson)
+ if err != nil {
+ return true, err
+ }
+ log.Printf("Metrics with ID:%s has been published\n", id)
+ return false, nil
+}
+
+func getK8sPodImages(clientset *kubernetes.Clientset, namespace, podName string) ([]string, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ var images []string
+ for _, container := range pod.Spec.Containers {
+ images = append(images, container.Image)
+ }
+
+ if len(images) == 0 {
+ return nil, errors.New("no containers found in the pod")
+ }
+
+ return images, nil
+}
+
+// createStream creates a stream by using JetStreamContext
+func CreateStream(js nats.JetStreamContext) error {
+ // Check if the METRICS stream already exists; if not, create it.
+ stream, err := js.StreamInfo(constants.StreamName)
+ log.Printf("Retrieved stream %s", fmt.Sprintf("%v", stream))
+ if err != nil {
+ log.Printf("Error getting stream %s", err)
+ }
+ if stream == nil {
+ log.Printf("creating stream %q and subjects %q", constants.StreamName, constants.StreamSubjects)
+ _, err = js.AddStream(&nats.StreamConfig{
+ Name: constants.StreamName,
+ Subjects: []string{constants.StreamSubjects},
+ })
+ CheckErr(err)
+ }
+ return nil
+
+}
+
+func GetK8sClient(config *rest.Config) *kubernetes.Clientset {
+ // create the clientset
+ clientset, err := kubernetes.NewForConfig(config)
+ CheckErr(err)
+ return clientset
+}
+
+func GetK8sPods(clientset *kubernetes.Clientset) string {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ pods, err := clientset.CoreV1().Pods("").List(ctx, metav1.ListOptions{})
+ CheckErr(err)
+ var sb strings.Builder
+ for i, pod := range pods.Items {
+ sb.WriteString("Name-" + strconv.Itoa(i) + ": ")
+ sb.WriteString(pod.Name)
+ sb.WriteString(" ")
+ sb.WriteString("Namespace-" + strconv.Itoa(i) + ": ")
+ sb.WriteString(pod.Namespace)
+ sb.WriteString(" ")
+ }
+ return sb.String()
+}
+
+func GetK8sNodes(clientset *kubernetes.Clientset) string {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ nodes, err := clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
+ CheckErr(err)
+ var sb strings.Builder
+ for i, node := range nodes.Items {
+ sb.WriteString("Name-" + strconv.Itoa(i) + ": ")
+ sb.WriteString(node.Name)
+ }
+ return sb.String()
+}
+
+func GetK8sEvents(clientset *kubernetes.Clientset) string {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ events, err := clientset.CoreV1().Events("").List(ctx, metav1.ListOptions{})
+ CheckErr(err)
+ j, err := json.MarshalIndent(events, "", " ")
+ CheckErr(err)
+ log.Printf("%#v", string(j))
+ return string(j)
+}
+
+func CheckErr(err error) {
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+func LogErr(err error) {
+ if err != nil {
+ log.Println(err)
+ }
+}
+func watchK8sEvents(clientset *kubernetes.Clientset, js nats.JetStreamContext) {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("kubviz-watch-k8sevents")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "watchK8sEvents")
+ span.SetAttributes(attribute.String("kubviz-agent", "watch-k8sevents"))
+ defer span.End()
+
+ watchlist := cache.NewListWatchFromClient(
+ clientset.CoreV1().RESTClient(),
+ "events",
+ v1.NamespaceAll,
+ fields.Everything(),
+ )
+ _, controller := cache.NewInformer(
+ watchlist,
+ &v1.Event{},
+ 0, // Duration is int64
+ cache.ResourceEventHandlerFuncs{
+ AddFunc: func(obj interface{}) {
+ event := obj.(*v1.Event)
+ images, err := getK8sPodImages(clientset, event.InvolvedObject.Namespace, event.InvolvedObject.Name)
+ if err != nil {
+ log.Println("Error retrieving image names:", err)
+ return
+ }
+ for _, image := range images {
+ publishK8sMetrics(string(event.ObjectMeta.UID), "ADD", event, js, image)
+ }
+ },
+ DeleteFunc: func(obj interface{}) {
+ event := obj.(*v1.Event)
+ images, err := getK8sPodImages(clientset, event.InvolvedObject.Namespace, event.InvolvedObject.Name)
+ if err != nil {
+ log.Println("Error retrieving image names:", err)
+ return
+ }
+ for _, image := range images {
+ publishK8sMetrics(string(event.ObjectMeta.UID), "DELETE", event, js, image)
+ }
+ },
+ UpdateFunc: func(oldObj, newObj interface{}) {
+ event := newObj.(*v1.Event)
+ images, err := getK8sPodImages(clientset, event.InvolvedObject.Namespace, event.InvolvedObject.Name)
+ if err != nil {
+ log.Println("Error retrieving image names:", err)
+ return
+ }
+ for _, image := range images {
+ publishK8sMetrics(string(event.ObjectMeta.UID), "UPDATE", event, js, image)
+ }
+ },
+ },
+ )
+ stop := make(chan struct{})
+ defer close(stop)
+ go controller.Run(stop)
+
+ for {
+ time.Sleep(time.Second)
+ }
+}
diff --git a/agent/kubviz/ketall.go b/agent/kubviz/plugins/ketall/ketall.go
similarity index 81%
rename from agent/kubviz/ketall.go
rename to agent/kubviz/plugins/ketall/ketall.go
index f1423564..8d91b6ab 100644
--- a/agent/kubviz/ketall.go
+++ b/agent/kubviz/plugins/ketall/ketall.go
@@ -1,12 +1,16 @@
-package main
+package ketall
import (
"context"
"encoding/json"
- "github.com/intelops/kubviz/constants"
- "sync"
+ "os"
"time"
+ "github.com/intelops/kubviz/constants"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+
"github.com/intelops/kubviz/model"
"github.com/nats-io/nats.go"
log "github.com/sirupsen/logrus"
@@ -16,6 +20,8 @@ import (
"k8s.io/client-go/rest"
)
+var ClusterName string = os.Getenv("CLUSTER_NAME")
+
func PublishAllResources(result model.Resource, js nats.JetStreamContext) error {
metrics := result
metrics.ClusterName = ClusterName
@@ -28,8 +34,14 @@ func PublishAllResources(result model.Resource, js nats.JetStreamContext) error
return nil
}
-func GetAllResources(config *rest.Config, js nats.JetStreamContext, wg *sync.WaitGroup, errCh chan error) {
- defer wg.Done()
+func GetAllResources(config *rest.Config, js nats.JetStreamContext) error {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("ketall")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "GetAllResources")
+ span.SetAttributes(attribute.String("ketall-plugin-agent", "ketall-output"))
+ defer span.End()
+
// TODO: upto this uncomment for production
// Create a new discovery client to discover all resources in the cluster
dc := discovery.NewDiscoveryClientForConfigOrDie(config)
@@ -37,19 +49,16 @@ func GetAllResources(config *rest.Config, js nats.JetStreamContext, wg *sync.Wai
// Create a new dynamic client to list resources in the cluster
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
- log.Error(err)
- errCh <- err
+ return err
}
// Get a list of all available API groups and versions in the cluster
resourceLists, err := dc.ServerPreferredResources()
if err != nil {
- log.Error(err)
- errCh <- err
+ return err
}
gvrs, err := discovery.GroupVersionResources(resourceLists)
if err != nil {
- panic(err)
- errCh <- err
+ return err
}
// Iterate over all available API groups and versions and list all resources in each group
for gvr := range gvrs {
@@ -81,9 +90,9 @@ func GetAllResources(config *rest.Config, js nats.JetStreamContext, wg *sync.Wai
}
err := PublishAllResources(resource, js)
if err != nil {
- errCh <- err
+ return err
}
}
}
- errCh <- nil
+ return nil
}
diff --git a/agent/kubviz/kubePreUpgrade.go b/agent/kubviz/plugins/kubepreupgrade/kubePreUpgrade.go
similarity index 92%
rename from agent/kubviz/kubePreUpgrade.go
rename to agent/kubviz/plugins/kubepreupgrade/kubePreUpgrade.go
index 9c993a59..4fb00b35 100644
--- a/agent/kubviz/kubePreUpgrade.go
+++ b/agent/kubviz/plugins/kubepreupgrade/kubePreUpgrade.go
@@ -1,15 +1,18 @@
-package main
+package kubepreupgrade
import (
"context"
"encoding/json"
"fmt"
- "github.com/intelops/kubviz/constants"
"io"
"net/http"
"os"
"strings"
- "sync"
+
+ "github.com/intelops/kubviz/constants"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -25,6 +28,8 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
+var ClusterName string = os.Getenv("CLUSTER_NAME")
+
const (
baseURL = "https://raw.githubusercontent.com/kubernetes/kubernetes"
fileURL = "api/openapi-spec/swagger.json"
@@ -57,7 +62,6 @@ var result *model.Result
func publishK8sDepricated_Deleted_Api(result *model.Result, js nats.JetStreamContext) error {
for _, deprecatedAPI := range result.DeprecatedAPIs {
deprecatedAPI.ClusterName = ClusterName
- fmt.Println("deprecatedAPI", deprecatedAPI)
deprecatedAPIJson, _ := json.Marshal(deprecatedAPI)
_, err := js.Publish(constants.EventSubject_depricated, deprecatedAPIJson)
if err != nil {
@@ -79,29 +83,34 @@ func publishK8sDepricated_Deleted_Api(result *model.Result, js nats.JetStreamCon
return nil
}
-func KubePreUpgradeDetector(config *rest.Config, js nats.JetStreamContext, wg *sync.WaitGroup, errCh chan error) {
- defer wg.Done()
- swaggerdir, err := os.MkdirTemp("", "kubepug")
+func KubePreUpgradeDetector(config *rest.Config, js nats.JetStreamContext) error {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("kubepreupgrade")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "KubePreUpgradeDetector")
+ span.SetAttributes(attribute.String("kubepug-plugin-agent", "kubepug-output"))
+ defer span.End()
+
+ pvcMountPath := "/mnt/agent/kbz"
+ uniqueDir := fmt.Sprintf("%s/kubepug", pvcMountPath)
+ err := os.MkdirAll(uniqueDir, 0755)
if err != nil {
- errCh <- err
+ return err
}
- filename := fmt.Sprintf("%s/swagger-%s.json", swaggerdir, k8sVersion)
+ filename := fmt.Sprintf("%s/swagger-%s.json", uniqueDir, k8sVersion)
url := fmt.Sprintf("%s/%s/%s", baseURL, k8sVersion, fileURL)
err = downloadFile(filename, url)
if err != nil {
- errCh <- err
+ return err
}
defer os.RemoveAll(filename)
- swaggerfile := filename
- kubernetesAPIs, err := PopulateKubeAPIMap(swaggerfile)
+ kubernetesAPIs, err := PopulateKubeAPIMap(filename)
if err != nil {
- errCh <- err
+ return err
}
result = getResults(config, kubernetesAPIs)
err = publishK8sDepricated_Deleted_Api(result, js)
- errCh <- err
- // b, _ := json.MarshalIndent(result, "", " ")
- // fmt.Printf("%s", string(b))
+ return err
}
func PopulateKubeAPIMap(swagfile string) (model.KubernetesAPIs, error) {
@@ -181,23 +190,32 @@ func getKubeAPIValues(value map[string]interface{}) (model.KubeAPI, bool) {
return model.KubeAPI{}, false
}
func downloadFile(filename, url string) error {
- log.Debugf("Downloading file from %s", url)
resp, err := http.Get(url)
if err != nil {
log.Error(err)
+ return err
}
if resp.StatusCode > 305 {
log.Errorf("could not download the swagger file %s", url)
+ return fmt.Errorf("failed to download file, status code: %d", resp.StatusCode)
}
+ contentLength := resp.ContentLength
+ log.Infof("The size of the file to be downloaded for kubepreupgrade plugin is %d bytes", contentLength)
+
defer resp.Body.Close()
out, err := os.Create(filename)
if err != nil {
log.Error(err)
}
defer out.Close()
- _, err = io.Copy(out, resp.Body)
+ bytesCopied, err := io.Copy(out, resp.Body)
+ if err != nil {
+ log.WithError(err).Error("Failed to copy the file contents")
+ return err
+ }
+ log.Infof("Downloaded %d bytes for file %s", bytesCopied, filename)
- return err
+ return nil
}
func getGroupVersionKind(value map[string]interface{}) (group, version, kind string) {
diff --git a/agent/kubviz/plugins/kuberhealthy/kuberhealthy.go b/agent/kubviz/plugins/kuberhealthy/kuberhealthy.go
new file mode 100644
index 00000000..2ae66ccd
--- /dev/null
+++ b/agent/kubviz/plugins/kuberhealthy/kuberhealthy.go
@@ -0,0 +1,74 @@
+package kuberhealthy
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "time"
+
+ "github.com/intelops/kubviz/agent/config"
+ "github.com/intelops/kubviz/constants"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "github.com/kuberhealthy/kuberhealthy/v2/pkg/health"
+ "github.com/nats-io/nats.go"
+ "go.opentelemetry.io/otel"
+)
+
+func StartKuberHealthy(js nats.JetStreamContext) {
+ khConfig, err := config.GetKuberHealthyConfig()
+ if err != nil {
+ log.Fatalf("Error getting Kuberhealthy config: %v", err)
+ }
+
+ ticker := time.NewTicker(khConfig.PollInterval)
+ defer ticker.Stop()
+
+ for range ticker.C {
+ if err := pollAndPublishKuberhealthy(khConfig.KuberhealthyURL, js); err != nil {
+ log.Printf("Error polling and publishing Kuberhealthy metrics: %v", err)
+ }
+ }
+}
+func pollAndPublishKuberhealthy(url string, js nats.JetStreamContext) error {
+ resp, err := http.Get(url)
+ if err != nil {
+ return fmt.Errorf("error making GET request to Kuberhealthy: %w", err)
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("error reading response body: %w", err)
+ }
+
+ var state health.State
+ if err := json.Unmarshal(body, &state); err != nil {
+ return fmt.Errorf("error unmarshaling response: %w", err)
+ }
+
+ return PublishKuberhealthyMetrics(js, state)
+}
+
+func PublishKuberhealthyMetrics(js nats.JetStreamContext, state health.State) error {
+ ctx := context.Background()
+ tracer := otel.Tracer("kuberhealthy")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "PublishKuberhealthyMetrics")
+ defer span.End()
+
+ metricsJSON, err := json.Marshal(state)
+ if err != nil {
+ log.Printf("Error marshaling metrics of kuberhealthy %v", err)
+ return err
+ }
+
+ if _, err := js.Publish(constants.KUBERHEALTHY_SUBJECT, metricsJSON); err != nil {
+ log.Printf("Error publishing metrics for kuberhealthy %v", err)
+ return err
+ }
+
+ log.Printf("Kuberhealthy metrics have been published")
+ return nil
+}
diff --git a/agent/kubviz/plugins/kubescore/kube_score.go b/agent/kubviz/plugins/kubescore/kube_score.go
new file mode 100644
index 00000000..660aa175
--- /dev/null
+++ b/agent/kubviz/plugins/kubescore/kube_score.go
@@ -0,0 +1,106 @@
+package kubescore
+
+import (
+ "context"
+ "encoding/json"
+ "log"
+ "os"
+ exec "os/exec"
+
+ "github.com/google/uuid"
+ "github.com/intelops/kubviz/constants"
+ "github.com/intelops/kubviz/model"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "github.com/nats-io/nats.go"
+ "github.com/zegl/kube-score/renderer/json_v2"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+)
+
+var ClusterName string = os.Getenv("CLUSTER_NAME")
+
+func RunKubeScore(clientset *kubernetes.Clientset, js nats.JetStreamContext) error {
+ nsList, err := clientset.CoreV1().
+ Namespaces().
+ List(context.Background(), metav1.ListOptions{})
+ if err != nil {
+ log.Println("Error occurred while getting client set for kube-score: ", err)
+ return err
+ }
+
+ log.Printf("Namespace size: %d", len(nsList.Items))
+ for _, n := range nsList.Items {
+ log.Printf("Publishing kube-score recommendations for namespace: %s\n", n.Name)
+ publish(n.Name, js)
+ }
+ return nil
+}
+
+func publish(ns string, js nats.JetStreamContext) error {
+ var report []json_v2.ScoredObject
+ cmd := "kubectl api-resources --verbs=list --namespaced -o name | xargs -n1 -I{} sh -c \"kubectl get {} -n " + ns + " -oyaml && echo ---\" | kube-score score - -o json"
+ log.Printf("Command: %#v,", cmd)
+ out, err := ExecuteCommand(cmd)
+ if err != nil {
+ log.Println("Error occurred while running kube-score: ", err)
+ return err
+ }
+ // // Continue with the rest of the code...
+ err = json.Unmarshal([]byte(out), &report)
+ if err != nil {
+ log.Printf("Error occurred while Unmarshalling json: %v", err)
+ return err
+ }
+
+ publishKubescoreMetrics(report, js)
+ //err = publishKubescoreMetrics(uuid.New().String(), ns, out, js)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func publishKubescoreMetrics(report []json_v2.ScoredObject, js nats.JetStreamContext) error {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("kubescore")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "publishKubescoreMetrics")
+ span.SetAttributes(attribute.String("kubescore-plugin-agent", "kubescore-output"))
+ defer span.End()
+
+ metrics := model.KubeScoreRecommendations{
+ ID: uuid.New().String(),
+ ClusterName: ClusterName,
+ Report: report,
+ }
+ metricsJson, _ := json.Marshal(metrics)
+ _, err := js.Publish(constants.KUBESCORE_SUBJECT, metricsJson)
+ if err != nil {
+ return err
+ }
+ //log.Printf("Recommendations with ID:%s has been published\n", id)
+ log.Printf("Recommendations :%#v", report)
+ return nil
+}
+
+func ExecuteCommand(command string) (string, error) {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("kubescore")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "executeCommand")
+ span.SetAttributes(attribute.String("kubescore-agent", "kubescore-command-running"))
+ defer span.End()
+
+ cmd := exec.Command("/bin/sh", "-c", command)
+ stdout, err := cmd.Output()
+
+ if err != nil {
+ log.Println("Execute Command Error", err.Error())
+ }
+
+ // Print the output
+ log.Println(string(stdout))
+ return string(stdout), nil
+}
diff --git a/agent/kubviz/outdated.go b/agent/kubviz/plugins/outdated/outdated.go
similarity index 93%
rename from agent/kubviz/outdated.go
rename to agent/kubviz/plugins/outdated/outdated.go
index 68ec985e..975c510f 100644
--- a/agent/kubviz/outdated.go
+++ b/agent/kubviz/plugins/outdated/outdated.go
@@ -1,11 +1,10 @@
-package main
+package outdated
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
- "github.com/intelops/kubviz/constants"
"log"
"os"
"regexp"
@@ -15,10 +14,15 @@ import (
"sync"
"time"
+ "github.com/intelops/kubviz/constants"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+
"github.com/intelops/kubviz/model"
"github.com/nats-io/nats.go"
- "github.com/docker/docker/api/types"
+ types "github.com/docker/docker/api/types/registry"
"github.com/genuinetools/reg/registry"
semver "github.com/hashicorp/go-version"
"github.com/pkg/errors"
@@ -27,6 +31,8 @@ import (
"k8s.io/client-go/rest"
)
+var ClusterName string = os.Getenv("CLUSTER_NAME")
+
const (
maxImageLength = 50
maxTagLength = 50
@@ -55,6 +61,13 @@ func truncateTagName(tagName string) string {
return truncatedTagName
}
func PublishOutdatedImages(out model.CheckResultfinal, js nats.JetStreamContext) error {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("outdated-images")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "PublishOutdatedImages")
+ span.SetAttributes(attribute.String("outdated-plugin-agent", "outdated-output"))
+ defer span.End()
+
metrics := out
metrics.ClusterName = ClusterName
metricsJson, _ := json.Marshal(metrics)
@@ -66,12 +79,11 @@ func PublishOutdatedImages(out model.CheckResultfinal, js nats.JetStreamContext)
return nil
}
-func outDatedImages(config *rest.Config, js nats.JetStreamContext, wg *sync.WaitGroup, errCh chan error) {
- defer wg.Done()
+func OutDatedImages(config *rest.Config, js nats.JetStreamContext) error {
images, err := ListImages(config)
if err != nil {
log.Println("unable to list images")
- errCh <- err
+ return err
}
for _, image := range images {
namespace := image.Namespace
@@ -92,7 +104,7 @@ func outDatedImages(config *rest.Config, js nats.JetStreamContext, wg *sync.Wait
final.Pod = pod
err := PublishOutdatedImages(final, js)
if err != nil {
- errCh <- err
+ return err
}
} else {
if checkResult != nil {
@@ -108,7 +120,7 @@ func outDatedImages(config *rest.Config, js nats.JetStreamContext, wg *sync.Wait
final.Pod = pod
err := PublishOutdatedImages(final, js)
if err != nil {
- errCh <- err
+ return err
}
} else {
tagtrunk := truncateTagName(tag)
@@ -125,19 +137,20 @@ func outDatedImages(config *rest.Config, js nats.JetStreamContext, wg *sync.Wait
final.Pod = pod
err := PublishOutdatedImages(final, js)
if err != nil {
- errCh <- err
+ return err
}
}
}
}
}
+ return nil
}
func ParseImageName(imageName string) (string, string, string, error) {
matches := dockerImageNameRegex.FindStringSubmatch(imageName)
if len(matches) != 5 {
- return "", "", "", fmt.Errorf("Expected 5 matches in regex, but found %d", len(matches))
+ return "", "", "", fmt.Errorf("expected 5 matches in regex, but found %d", len(matches))
}
hostname := matches[1]
@@ -184,9 +197,7 @@ func ListImages(config *rest.Config) ([]model.RunningImage, error) {
for _, pod := range pods.Items {
for _, initContainerStatus := range pod.Status.InitContainerStatuses {
pullable := initContainerStatus.ImageID
- if strings.HasPrefix(pullable, "docker-pullable://") {
- pullable = strings.TrimPrefix(pullable, "docker-pullable://")
- }
+ pullable = strings.TrimPrefix(pullable, "docker-pullable://")
runningImage := model.RunningImage{
Pod: pod.Name,
Namespace: pod.Namespace,
@@ -199,9 +210,8 @@ func ListImages(config *rest.Config) ([]model.RunningImage, error) {
for _, containerStatus := range pod.Status.ContainerStatuses {
pullable := containerStatus.ImageID
- if strings.HasPrefix(pullable, "docker-pullable://") {
- pullable = strings.TrimPrefix(pullable, "docker-pullable://")
- }
+ pullable = strings.TrimPrefix(pullable, "docker-pullable://")
+
runningImage := model.RunningImage{
Pod: pod.Name,
Namespace: pod.Namespace,
@@ -393,8 +403,8 @@ func fetchTags(reg *registry.Registry, imageName string) ([]string, error) {
}
func parseTags(tags []string) ([]*semver.Version, []string, error) {
- semverTags := make([]*semver.Version, 0, 0)
- nonSemverTags := make([]string, 0, 0)
+ semverTags := make([]*semver.Version, 0)
+ nonSemverTags := make([]string, 0)
for _, tag := range tags {
v, err := semver.NewVersion(tag)
@@ -449,12 +459,12 @@ func splitOutlierSemvers(allSemverTags []*semver.Version) ([]*semver.Version, []
return outliers, remaining, nil
}
-func homeDir() string {
- if h := os.Getenv("HOME"); h != "" {
- return h
- }
- return os.Getenv("USERPROFILE")
-}
+// func homeDir() string {
+// if h := os.Getenv("HOME"); h != "" {
+// return h
+// }
+// return os.Getenv("USERPROFILE")
+// }
type VersionTag struct {
Sort int `json:"sort"`
@@ -547,7 +557,7 @@ func (c SemverTagCollection) Unique() ([]*semver.Version, error) {
}
}
- result := make([]*semver.Version, 0, 0)
+ result := make([]*semver.Version, 0)
for _, u := range unique {
result = append(result, u)
}
diff --git a/agent/kubviz/rakees_agent.go b/agent/kubviz/plugins/rakkess/rakees_agent.go
similarity index 62%
rename from agent/kubviz/rakees_agent.go
rename to agent/kubviz/plugins/rakkess/rakees_agent.go
index 54e610fb..93414db3 100644
--- a/agent/kubviz/rakees_agent.go
+++ b/agent/kubviz/plugins/rakkess/rakees_agent.go
@@ -1,51 +1,62 @@
-package main
+package rakkess
import (
"context"
"encoding/json"
"fmt"
- "github.com/intelops/kubviz/constants"
"log"
"os"
"os/signal"
- "sync"
"syscall"
- "github.com/intelops/kubviz/agent/kubviz/rakkess"
+ "github.com/intelops/kubviz/constants"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+
"github.com/intelops/kubviz/model"
"github.com/nats-io/nats.go"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
-func accessToOutcome(access rakkess.Access) (rakkess.Outcome, error) {
+var ClusterName string = os.Getenv("CLUSTER_NAME")
+
+func accessToOutcome(access Access) (Outcome, error) {
switch access {
case 0:
- return rakkess.None, nil
+ return None, nil
case 1:
- return rakkess.Up, nil
+ return Up, nil
case 2:
- return rakkess.Down, nil
+ return Down, nil
case 3:
- return rakkess.Err, nil
+ return Err, nil
default:
- return rakkess.None, fmt.Errorf("unknown access code: %d", access)
+ return None, fmt.Errorf("unknown access code: %d", access)
}
}
-func RakeesOutput(config *rest.Config, js nats.JetStreamContext, wg *sync.WaitGroup, errCh chan error) {
+func RakeesOutput(config *rest.Config, js nats.JetStreamContext) error {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("rakees")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "RakeesOutput")
+ span.SetAttributes(attribute.String("rakees-plugin-agent", "rakees-output"))
+ defer span.End()
+
// Create a new Kubernetes client
client, err := kubernetes.NewForConfig(config)
if err != nil {
- errCh <- err
+ return err
}
// Retrieve all available resource types
resourceList, err := client.Discovery().ServerPreferredResources()
if err != nil {
- errCh <- err
+ return err
}
- var opts = rakkess.NewRakkessOptions()
+ var opts = NewRakkessOptions()
opts.Verbs = []string{"list", "create", "update", "delete"}
opts.OutputFormat = "icon-table"
opts.ResourceList = resourceList
@@ -53,46 +64,45 @@ func RakeesOutput(config *rest.Config, js nats.JetStreamContext, wg *sync.WaitGr
ctx, cancel := context.WithCancel(context.Background())
catchCtrlC(cancel)
- res, err := rakkess.Resource(ctx, opts)
+ res, err := Resource(ctx, opts)
if err != nil {
fmt.Println("Error")
- errCh <- err
+ return err
}
fmt.Println("Result..")
for resourceType, access := range res {
createOutcome, err := accessToOutcome(access["create"])
if err != nil {
- errCh <- err
+ return err
}
deleteOutcome, err := accessToOutcome(access["delete"])
if err != nil {
- errCh <- err
+ return err
}
listOutcome, err := accessToOutcome(access["list"])
if err != nil {
- errCh <- err
+ return err
}
updateOutcome, err := accessToOutcome(access["update"])
if err != nil {
- errCh <- err
+ return err
}
metrics := model.RakeesMetrics{
ClusterName: ClusterName,
Name: resourceType,
- Create: rakkess.HumanreadableAccessCode(createOutcome),
- Delete: rakkess.HumanreadableAccessCode(deleteOutcome),
- List: rakkess.HumanreadableAccessCode(listOutcome),
- Update: rakkess.HumanreadableAccessCode(updateOutcome),
+ Create: HumanreadableAccessCode(createOutcome),
+ Delete: HumanreadableAccessCode(deleteOutcome),
+ List: HumanreadableAccessCode(listOutcome),
+ Update: HumanreadableAccessCode(updateOutcome),
}
metricsJson, _ := json.Marshal(metrics)
_, err = js.Publish(constants.EventSubject_rakees, metricsJson)
if err != nil {
- errCh <- err
+ return err
}
log.Printf("Metrics with resource %s has been published", resourceType)
}
- // t := res.Table(opts.Verbs)
- // t.Render(opts.Streams.Out, opts.OutputFormat)
+ return nil
}
diff --git a/agent/kubviz/rakkess/rakkess.go b/agent/kubviz/plugins/rakkess/rakkess.go
similarity index 100%
rename from agent/kubviz/rakkess/rakkess.go
rename to agent/kubviz/plugins/rakkess/rakkess.go
diff --git a/agent/kubviz/plugins/trivy/trivy.go b/agent/kubviz/plugins/trivy/trivy.go
new file mode 100644
index 00000000..625b405a
--- /dev/null
+++ b/agent/kubviz/plugins/trivy/trivy.go
@@ -0,0 +1,110 @@
+package trivy
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ exec "os/exec"
+ "strings"
+
+ "github.com/aquasecurity/trivy/pkg/k8s/report"
+ "github.com/google/uuid"
+ "github.com/intelops/kubviz/constants"
+ "github.com/intelops/kubviz/model"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "github.com/nats-io/nats.go"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+)
+
+var ClusterName string = os.Getenv("CLUSTER_NAME")
+
+func executeCommandTrivy(command string) ([]byte, error) {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("trivy-cluster")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "executeCommandTrivy")
+ span.SetAttributes(attribute.String("trivy-k8s-agent", "command-running"))
+ defer span.End()
+
+ cmd := exec.Command("/bin/sh", "-c", command)
+ var outc, errc bytes.Buffer
+ cmd.Stdout = &outc
+ cmd.Stderr = &errc
+
+ err := cmd.Run()
+
+ if err != nil {
+ log.Println("Execute Trivy Command Error", err.Error())
+ }
+
+ return outc.Bytes(), err
+}
+func RunTrivyK8sClusterScan(js nats.JetStreamContext) error {
+ pvcMountPath := "/mnt/agent/kbz"
+ trivyCacheDir := fmt.Sprintf("%s/trivy-cache", pvcMountPath)
+ err := os.MkdirAll(trivyCacheDir, 0755)
+ if err != nil {
+ log.Printf("Error creating Trivy cache directory: %v\n", err)
+ return err
+ }
+ var report report.ConsolidatedReport
+
+ ctx := context.Background()
+ tracer := otel.Tracer("trivy-cluster")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "RunTrivyK8sClusterScan")
+ span.SetAttributes(attribute.String("cluster-name", report.ClusterName))
+ defer span.End()
+
+ cmdString := fmt.Sprintf("trivy k8s --report summary cluster --exclude-nodes kubernetes.io/arch:amd64 --timeout 60m -f json --cache-dir %s --debug", trivyCacheDir)
+ // clearCacheCmd := "trivy k8s --clear-cache"
+ out, err := executeCommandTrivy(cmdString)
+ if err != nil {
+ log.Printf("Error executing command: %v\n", err)
+ return err
+ }
+ log.Printf("Command output: %s\n", out)
+ outStr := string(out)
+ parts := strings.SplitN(outStr, "{", 2)
+ if len(parts) <= 1 {
+ log.Println("No output from k8s cluster scan command", err)
+ return err
+ }
+ // log.Println("Command logs for k8s cluster scan", parts[0])
+ jsonPart := "{" + parts[1]
+ // log.Println("First 200 k8s cluster scan lines output", jsonPart[:200])
+ // log.Println("Last 200 k8s cluster scan lines output", jsonPart[len(jsonPart)-200:])
+ err = json.Unmarshal([]byte(jsonPart), &report)
+ if err != nil {
+ log.Printf("Error occurred while Unmarshalling json for k8s cluster scan: %v", err)
+ return err
+ }
+ // _, err = executeCommandTrivy(clearCacheCmd)
+ // if err != nil {
+ // log.Printf("Error executing command: %v\n", err)
+ // return err
+ // }
+ err = PublishTrivyK8sReport(report, js)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func PublishTrivyK8sReport(report report.ConsolidatedReport, js nats.JetStreamContext) error {
+ metrics := model.Trivy{
+ ID: uuid.New().String(),
+ ClusterName: ClusterName,
+ Report: report,
+ }
+ metricsJson, _ := json.Marshal(metrics)
+ _, err := js.Publish(constants.TRIVY_K8S_SUBJECT, metricsJson)
+ if err != nil {
+ return err
+ }
+ log.Printf("Trivy k8s cluster report with ID:%s has been published\n", metrics.ID)
+ return nil
+}
diff --git a/agent/kubviz/plugins/trivy/trivy_image.go b/agent/kubviz/plugins/trivy/trivy_image.go
new file mode 100644
index 00000000..c4919a18
--- /dev/null
+++ b/agent/kubviz/plugins/trivy/trivy_image.go
@@ -0,0 +1,186 @@
+package trivy
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ exec "os/exec"
+ "strings"
+
+ "github.com/aquasecurity/trivy/pkg/types"
+ "github.com/google/uuid"
+ "github.com/intelops/kubviz/constants"
+ "github.com/intelops/kubviz/model"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "github.com/nats-io/nats.go"
+ "github.com/pkg/errors"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+)
+
+func RunTrivyImageScans(config *rest.Config, js nats.JetStreamContext) error {
+ pvcMountPath := "/mnt/agent/kbz"
+ trivyImageCacheDir := fmt.Sprintf("%s/trivy-imagecache", pvcMountPath)
+ err := os.MkdirAll(trivyImageCacheDir, 0755)
+ if err != nil {
+ log.Printf("Error creating Trivy Image cache directory: %v\n", err)
+ return err
+ }
+ // clearCacheCmd := "trivy image --clear-cache"
+
+ ctx := context.Background()
+ tracer := otel.Tracer("trivy-image")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "RunTrivyImageScans")
+ span.SetAttributes(attribute.String("trivy-image-scan-agent", "image-scan"))
+ defer span.End()
+
+ images, err := ListImages(config)
+ if err != nil {
+ log.Println("error occured while trying to list images, error :", err.Error())
+ return err
+ }
+
+ for _, image := range images {
+ var report types.Report
+ scanCmd := fmt.Sprintf("trivy image %s --timeout 60m -f json -q --cache-dir %s", image.PullableImage, trivyImageCacheDir)
+ out, err := executeTrivyImage(scanCmd)
+ if err != nil {
+ log.Printf("Error scanning image %s: %v", image.PullableImage, err)
+ continue // Move on to the next image in case of an error
+ }
+
+ parts := strings.SplitN(string(out), "{", 2)
+ if len(parts) <= 1 {
+ log.Println("No output from image scan command", err)
+ continue // Move on to the next image if there's no output
+ }
+
+ // log.Println("Command logs for image", parts[0])
+ jsonPart := "{" + parts[1]
+ // log.Println("First 200 image scan lines output", jsonPart[:200])
+ // log.Println("Last 200 image scan lines output", jsonPart[len(jsonPart)-200:])
+
+ err = json.Unmarshal([]byte(jsonPart), &report)
+ if err != nil {
+ log.Printf("Error occurred while Unmarshalling json for image: %v", err)
+ continue // Move on to the next image in case of an error
+ }
+ // _, err = executeCommandTrivy(clearCacheCmd)
+ // if err != nil {
+ // log.Printf("Error executing command: %v\n", err)
+ // return err
+ // }
+ err = PublishImageScanReports(report, js)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func PublishImageScanReports(report types.Report, js nats.JetStreamContext) error {
+ metrics := model.TrivyImage{
+ ID: uuid.New().String(),
+ ClusterName: ClusterName,
+ Report: report,
+ }
+ metricsJson, _ := json.Marshal(metrics)
+ _, err := js.Publish(constants.TRIVY_IMAGE_SUBJECT, metricsJson)
+ if err != nil {
+ return err
+ }
+ log.Printf("Trivy image report with ID:%s has been published\n", metrics.ID)
+ return nil
+}
+func executeTrivyImage(command string) ([]byte, error) {
+
+ // ctx := context.Background()
+ // tracer := otel.Tracer("trivy-image")
+ // _, span := tracer.Start(opentelemetry.BuildContext(ctx), "executeCommandTrivyImage")
+ // span.SetAttributes(attribute.String("trivy-image-agent", "trivyimage-command-running"))
+ // defer span.End()
+
+ cmd := exec.Command("/bin/sh", "-c", command)
+ var outc, errc bytes.Buffer
+ cmd.Stdout = &outc
+ cmd.Stderr = &errc
+ err := cmd.Run()
+ // if outc.Len() > 0 {
+ // log.Printf("Command Output: %s\n", outc.String())
+ // }
+ if errc.Len() > 0 {
+ log.Printf("Command Error: %s\n", errc.String())
+ }
+ if err != nil {
+ return nil, fmt.Errorf("error while executing trivy image command: %v", err)
+ }
+ return outc.Bytes(), err
+}
+
+func ListImages(config *rest.Config) ([]model.RunningImage, error) {
+ var err error
+ clientset, err := kubernetes.NewForConfig(config)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to create clientset")
+ }
+ ctx := context.Background()
+ namespaces, err := clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to list namespaces")
+ }
+
+ runningImages := []model.RunningImage{}
+ for _, namespace := range namespaces.Items {
+ pods, err := clientset.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to list pods")
+ }
+
+ for _, pod := range pods.Items {
+ for _, initContainerStatus := range pod.Status.InitContainerStatuses {
+ pullable := initContainerStatus.ImageID
+ pullable = strings.TrimPrefix(pullable, "docker-pullable://")
+ runningImage := model.RunningImage{
+ Pod: pod.Name,
+ Namespace: pod.Namespace,
+ InitContainer: &initContainerStatus.Name,
+ Image: initContainerStatus.Image,
+ PullableImage: pullable,
+ }
+ runningImages = append(runningImages, runningImage)
+ }
+
+ for _, containerStatus := range pod.Status.ContainerStatuses {
+ pullable := containerStatus.ImageID
+ pullable = strings.TrimPrefix(pullable, "docker-pullable://")
+
+ runningImage := model.RunningImage{
+ Pod: pod.Name,
+ Namespace: pod.Namespace,
+ Container: &containerStatus.Name,
+ Image: containerStatus.Image,
+ PullableImage: pullable,
+ }
+ runningImages = append(runningImages, runningImage)
+ }
+ }
+ }
+
+ // Remove exact duplicates
+ cleanedImages := []model.RunningImage{}
+ seenImages := make(map[string]bool)
+ for _, runningImage := range runningImages {
+ if !seenImages[runningImage.PullableImage] {
+ cleanedImages = append(cleanedImages, runningImage)
+ seenImages[runningImage.PullableImage] = true
+ }
+ }
+
+ return cleanedImages, nil
+}
diff --git a/agent/kubviz/plugins/trivy/trivy_sbom.go b/agent/kubviz/plugins/trivy/trivy_sbom.go
new file mode 100644
index 00000000..62027761
--- /dev/null
+++ b/agent/kubviz/plugins/trivy/trivy_sbom.go
@@ -0,0 +1,172 @@
+package trivy
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "strings"
+
+ "github.com/google/uuid"
+ "github.com/intelops/kubviz/constants"
+ "github.com/intelops/kubviz/model"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "github.com/nats-io/nats.go"
+ "github.com/pkg/errors"
+ "go.opentelemetry.io/otel"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+)
+
+func PublishTrivySbomReport(report map[string]interface{}, js nats.JetStreamContext) error {
+
+ metrics := model.Sbom{
+ ID: uuid.New().String(),
+ ClusterName: ClusterName,
+ Report: report,
+ }
+ metricsJson, err := json.Marshal(metrics)
+ if err != nil {
+ log.Println("error occurred while marshalling sbom metrics in agent", err.Error())
+ return err
+ }
+ _, err = js.Publish(constants.TRIVY_SBOM_SUBJECT, metricsJson)
+ if err != nil {
+ return err
+ }
+ log.Printf("Trivy sbom report with Id %v has been published\n", metrics.ID)
+ return nil
+}
+
+func executeCommandSbom(command string) ([]byte, error) {
+
+ cmd := exec.Command("/bin/sh", "-c", command)
+ var outc, errc bytes.Buffer
+ cmd.Stdout = &outc
+ cmd.Stderr = &errc
+ err := cmd.Run()
+ if err != nil {
+ log.Println("Execute SBOM Command Error", err.Error())
+ }
+ return outc.Bytes(), err
+}
+
+func RunTrivySbomScan(config *rest.Config, js nats.JetStreamContext) error {
+ log.Println("trivy sbom scan started...")
+ pvcMountPath := "/mnt/agent/kbz"
+ trivySbomCacheDir := fmt.Sprintf("%s/trivy-sbomcache", pvcMountPath)
+ err := os.MkdirAll(trivySbomCacheDir, 0755)
+ if err != nil {
+ log.Printf("Error creating Trivy cache directory: %v\n", err)
+ return err
+ }
+
+ ctx := context.Background()
+ tracer := otel.Tracer("trivy-sbom")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "RunTrivySbomScan")
+ defer span.End()
+
+ images, err := ListImagesforSbom(config)
+
+ if err != nil {
+ log.Printf("failed to list images: %v", err)
+ }
+ for _, image := range images {
+
+ sbomcmd := fmt.Sprintf("trivy image --format cyclonedx %s --cache-dir %s", image.PullableImage, trivySbomCacheDir)
+ out, err := executeCommandSbom(sbomcmd)
+
+ if err != nil {
+ log.Printf("Error executing Trivy for image sbom %s: %v", image.PullableImage, err)
+ continue // Move on to the next image in case of an error
+ }
+ if out == nil {
+ log.Printf("Trivy output is nil for image sbom %s", image.PullableImage)
+ continue
+ }
+ // Check if the output is empty or invalid JSON
+ if len(out) == 0 {
+ log.Printf("Trivy output is empty for image sbom %s", image.PullableImage)
+ continue // Move on to the next image
+ }
+
+ var report map[string]interface{}
+ err = json.Unmarshal(out, &report)
+ if err != nil {
+ log.Printf("Error unmarshaling JSON data for image sbom %s: %v", image.PullableImage, err)
+ continue // Move on to the next image in case of an error
+ }
+ err = PublishTrivySbomReport(report, js)
+ if err != nil {
+ log.Printf("Error publishing Trivy SBOM report for image %s: %v", image.PullableImage, err)
+ continue
+ }
+ }
+ return nil
+}
+
+func ListImagesforSbom(config *rest.Config) ([]model.RunningImage, error) {
+ var err error
+ clientset, err := kubernetes.NewForConfig(config)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to create clientset")
+ }
+ ctx := context.Background()
+ namespaces, err := clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to list namespaces")
+ }
+
+ runningImages := []model.RunningImage{}
+ for _, namespace := range namespaces.Items {
+ pods, err := clientset.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to list pods")
+ }
+
+ for _, pod := range pods.Items {
+ for _, initContainerStatus := range pod.Status.InitContainerStatuses {
+ pullable := initContainerStatus.ImageID
+ pullable = strings.TrimPrefix(pullable, "docker-pullable://")
+ runningImage := model.RunningImage{
+ Pod: pod.Name,
+ Namespace: pod.Namespace,
+ InitContainer: &initContainerStatus.Name,
+ Image: initContainerStatus.Image,
+ PullableImage: pullable,
+ }
+ runningImages = append(runningImages, runningImage)
+ }
+
+ for _, containerStatus := range pod.Status.ContainerStatuses {
+ pullable := containerStatus.ImageID
+ pullable = strings.TrimPrefix(pullable, "docker-pullable://")
+
+ runningImage := model.RunningImage{
+ Pod: pod.Name,
+ Namespace: pod.Namespace,
+ Container: &containerStatus.Name,
+ Image: containerStatus.Image,
+ PullableImage: pullable,
+ }
+ runningImages = append(runningImages, runningImage)
+ }
+ }
+ }
+
+ // Remove exact duplicates
+ cleanedImages := []model.RunningImage{}
+ seenImages := make(map[string]bool)
+ for _, runningImage := range runningImages {
+ if !seenImages[runningImage.PullableImage] {
+ cleanedImages = append(cleanedImages, runningImage)
+ seenImages[runningImage.PullableImage] = true
+ }
+ }
+
+ return cleanedImages, nil
+}
diff --git a/agent/kubviz/scheduler/scheduler.go b/agent/kubviz/scheduler/scheduler.go
new file mode 100644
index 00000000..ccd9564e
--- /dev/null
+++ b/agent/kubviz/scheduler/scheduler.go
@@ -0,0 +1,178 @@
+package scheduler
+
+import (
+ "sync"
+
+ "github.com/nats-io/nats.go"
+ "github.com/pkg/errors"
+ "github.com/robfig/cron/v3"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+
+ "github.com/intelops/go-common/logging"
+ "github.com/intelops/kubviz/agent/config"
+)
+
+type jobHandler interface {
+ CronSpec() string
+ Run()
+}
+
+type Scheduler struct {
+ log logging.Logger
+ jobs map[string]jobHandler
+ cronIDs map[string]cron.EntryID
+ c *cron.Cron
+ cronMutex *sync.Mutex
+}
+
+func NewScheduler(log logging.Logger) *Scheduler {
+ clog := cron.VerbosePrintfLogger(log.(logging.StdLogger))
+ return &Scheduler{
+ log: log,
+ c: cron.New(cron.WithChain(cron.SkipIfStillRunning(clog), cron.Recover(clog))),
+ jobs: map[string]jobHandler{},
+ cronIDs: map[string]cron.EntryID{},
+ cronMutex: &sync.Mutex{},
+ }
+}
+
+func (t *Scheduler) AddJob(jobName string, job jobHandler) error {
+ t.cronMutex.Lock()
+ defer t.cronMutex.Unlock()
+ _, ok := t.cronIDs[jobName]
+ if ok {
+ return errors.Errorf("%s job already exists", jobName)
+ }
+ spec := job.CronSpec()
+ if spec == "" {
+ return errors.Errorf("%s job has no cron spec", jobName)
+ }
+ entryID, err := t.c.AddJob(spec, job)
+ if err != nil {
+ return errors.WithMessagef(err, "%s job cron spec not valid", jobName)
+ }
+
+ t.jobs[jobName] = job
+ t.cronIDs[jobName] = entryID
+ t.log.Infof("%s job added with cron '%s'", jobName, spec)
+ return nil
+}
+
+// RemoveJob ...
+func (t *Scheduler) RemoveJob(jobName string) error {
+ t.cronMutex.Lock()
+ defer t.cronMutex.Unlock()
+ entryID, ok := t.cronIDs[jobName]
+ if !ok {
+ return errors.Errorf("%s job not exist", jobName)
+ }
+
+ t.c.Remove(entryID)
+ delete(t.jobs, jobName)
+ delete(t.cronIDs, jobName)
+ t.log.Infof("%s job removed", jobName)
+ return nil
+}
+
+func (t *Scheduler) Start() {
+ t.c.Start()
+ t.log.Infof("Job scheduler started")
+}
+
+func (t *Scheduler) Stop() {
+ t.c.Stop()
+ t.log.Infof("Job scheduler stopped")
+}
+
+func (t *Scheduler) GetJobs() map[string]jobHandler {
+ t.cronMutex.Lock()
+ defer t.cronMutex.Unlock()
+ return t.jobs
+}
+
+func InitScheduler(config *rest.Config, js nats.JetStreamContext, cfg config.AgentConfigurations, clientset *kubernetes.Clientset) (s *Scheduler) {
+ log := logging.NewLogger()
+ s = NewScheduler(log)
+ if cfg.OutdatedInterval != "" && cfg.OutdatedInterval != "0" {
+ sj, err := NewOutDatedImagesJob(config, js, cfg.OutdatedInterval)
+ if err != nil {
+ log.Fatal("no time interval", err)
+ }
+ err = s.AddJob("Outdated", sj)
+ if err != nil {
+ log.Fatal("failed to do job", err)
+ }
+ }
+ if cfg.GetAllInterval != "" && cfg.GetAllInterval != "0" {
+ sj, err := NewKetallJob(config, js, cfg.GetAllInterval)
+ if err != nil {
+ log.Fatal("no time interval", err)
+ }
+ err = s.AddJob("GetALL", sj)
+ if err != nil {
+ log.Fatal("failed to do job", err)
+ }
+ }
+ if cfg.KubeScoreInterval != "" && cfg.KubeScoreInterval != "0" {
+ sj, err := NewKubescoreJob(clientset, js, cfg.KubeScoreInterval)
+ if err != nil {
+ log.Fatal("no time interval", err)
+ }
+ err = s.AddJob("KubeScore", sj)
+ if err != nil {
+ log.Fatal("failed to do job", err)
+ }
+ }
+ if cfg.RakkessInterval != "" && cfg.RakkessInterval != "0" {
+ sj, err := NewRakkessJob(config, js, cfg.RakkessInterval)
+ if err != nil {
+ log.Fatal("no time interval", err)
+ }
+ err = s.AddJob("Rakkess", sj)
+ if err != nil {
+ log.Fatal("failed to do job", err)
+ }
+ }
+ if cfg.KubePreUpgradeInterval != "" && cfg.KubePreUpgradeInterval != "0" {
+ sj, err := NewKubePreUpgradeJob(config, js, cfg.KubePreUpgradeInterval)
+ if err != nil {
+ log.Fatal("no time interval", err)
+ }
+ err = s.AddJob("KubePreUpgrade", sj)
+ if err != nil {
+ log.Fatal("failed to do job", err)
+ }
+ }
+ if cfg.TrivyImageInterval != "" && cfg.TrivyImageInterval != "0" {
+ sj, err := NewTrivyImagesJob(config, js, cfg.TrivyImageInterval)
+ if err != nil {
+ log.Fatal("no time interval", err)
+ }
+ err = s.AddJob("Trivyimage", sj)
+ if err != nil {
+ log.Fatal("failed to do job", err)
+ }
+ }
+ if cfg.TrivySbomInterval != "" && cfg.TrivySbomInterval != "0" {
+ sj, err := NewTrivySbomJob(config, js, cfg.TrivySbomInterval)
+ if err != nil {
+ log.Fatal("no time interval", err)
+ }
+ err = s.AddJob("Trivysbom", sj)
+ if err != nil {
+ log.Fatal("failed to do job", err)
+ }
+ }
+ if cfg.TrivyClusterScanInterval != "" && cfg.TrivyClusterScanInterval != "0" {
+ sj, err := NewTrivyClusterScanJob(js, cfg.TrivyClusterScanInterval)
+ if err != nil {
+ log.Fatal("no time interval", err)
+ }
+ err = s.AddJob("Trivycluster", sj)
+ if err != nil {
+ log.Fatal("failed to do job", err)
+ }
+ }
+ return
+}
diff --git a/agent/kubviz/scheduler/scheduler_watch.go b/agent/kubviz/scheduler/scheduler_watch.go
new file mode 100644
index 00000000..f2e769af
--- /dev/null
+++ b/agent/kubviz/scheduler/scheduler_watch.go
@@ -0,0 +1,189 @@
+package scheduler
+
+import (
+ "github.com/intelops/kubviz/agent/kubviz/plugins/events"
+ "github.com/intelops/kubviz/agent/kubviz/plugins/ketall"
+ "github.com/intelops/kubviz/agent/kubviz/plugins/kubepreupgrade"
+ "github.com/nats-io/nats.go"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+
+ "github.com/intelops/kubviz/agent/kubviz/plugins/kubescore"
+ "github.com/intelops/kubviz/agent/kubviz/plugins/outdated"
+ "github.com/intelops/kubviz/agent/kubviz/plugins/rakkess"
+ "github.com/intelops/kubviz/agent/kubviz/plugins/trivy"
+)
+
+type OutDatedImagesJob struct {
+ config *rest.Config
+ js nats.JetStreamContext
+ frequency string
+}
+
+type KetallJob struct {
+ config *rest.Config
+ js nats.JetStreamContext
+ frequency string
+}
+type TrivyImageJob struct {
+ config *rest.Config
+ js nats.JetStreamContext
+ frequency string
+}
+type TrivySbomJob struct {
+ config *rest.Config
+ js nats.JetStreamContext
+ frequency string
+}
+type TrivyClusterScanJob struct {
+ //config *rest.Config
+ js nats.JetStreamContext
+ frequency string
+}
+type RakkessJob struct {
+ config *rest.Config
+ js nats.JetStreamContext
+ frequency string
+}
+type KubePreUpgradeJob struct {
+ config *rest.Config
+ js nats.JetStreamContext
+ frequency string
+}
+type KubescoreJob struct {
+ clientset *kubernetes.Clientset
+ js nats.JetStreamContext
+ frequency string
+}
+
+func NewTrivySbomJob(config *rest.Config, js nats.JetStreamContext, frequency string) (*TrivySbomJob, error) {
+ return &TrivySbomJob{
+ config: config,
+ js: js,
+ frequency: frequency,
+ }, nil
+}
+func (v *TrivySbomJob) CronSpec() string {
+ return v.frequency
+}
+
+func (j *TrivySbomJob) Run() {
+ // Call the outDatedImages function with the provided config and js
+ err := trivy.RunTrivySbomScan(j.config, j.js)
+ events.LogErr(err)
+}
+
+func NewTrivyClusterScanJob(js nats.JetStreamContext, frequency string) (*TrivyClusterScanJob, error) {
+ return &TrivyClusterScanJob{
+ // config: config,
+ js: js,
+ frequency: frequency,
+ }, nil
+}
+func (v *TrivyClusterScanJob) CronSpec() string {
+ return v.frequency
+}
+
+func (j *TrivyClusterScanJob) Run() {
+ // Call the outDatedImages function with the provided config and js
+ err := trivy.RunTrivyK8sClusterScan(j.js)
+ events.LogErr(err)
+}
+func NewTrivyImagesJob(config *rest.Config, js nats.JetStreamContext, frequency string) (*TrivyImageJob, error) {
+ return &TrivyImageJob{
+ config: config,
+ js: js,
+ frequency: frequency,
+ }, nil
+}
+func (v *TrivyImageJob) CronSpec() string {
+ return v.frequency
+}
+
+func (j *TrivyImageJob) Run() {
+ // Call the outDatedImages function with the provided config and js
+ err := trivy.RunTrivyImageScans(j.config, j.js)
+ events.LogErr(err)
+}
+func NewOutDatedImagesJob(config *rest.Config, js nats.JetStreamContext, frequency string) (*OutDatedImagesJob, error) {
+ return &OutDatedImagesJob{
+ config: config,
+ js: js,
+ frequency: frequency,
+ }, nil
+}
+func (v *OutDatedImagesJob) CronSpec() string {
+ return v.frequency
+}
+
+func (j *OutDatedImagesJob) Run() {
+ // Call the outDatedImages function with the provided config and js
+ err := outdated.OutDatedImages(j.config, j.js)
+ events.LogErr(err)
+}
+func NewKetallJob(config *rest.Config, js nats.JetStreamContext, frequency string) (*KetallJob, error) {
+ return &KetallJob{
+ config: config,
+ js: js,
+ frequency: frequency,
+ }, nil
+}
+func (v *KetallJob) CronSpec() string {
+ return v.frequency
+}
+
+func (j *KetallJob) Run() {
+ // Call the Ketall function with the provided config and js
+ err := ketall.GetAllResources(j.config, j.js)
+ events.LogErr(err)
+}
+
+func NewKubePreUpgradeJob(config *rest.Config, js nats.JetStreamContext, frequency string) (*KubePreUpgradeJob, error) {
+ return &KubePreUpgradeJob{
+ config: config,
+ js: js,
+ frequency: frequency,
+ }, nil
+}
+func (v *KubePreUpgradeJob) CronSpec() string {
+ return v.frequency
+}
+
+func (j *KubePreUpgradeJob) Run() {
+ // Call the Kubepreupgrade function with the provided config and js
+ err := kubepreupgrade.KubePreUpgradeDetector(j.config, j.js)
+ events.LogErr(err)
+}
+
+func NewKubescoreJob(clientset *kubernetes.Clientset, js nats.JetStreamContext, frequency string) (*KubescoreJob, error) {
+ return &KubescoreJob{
+ clientset: clientset,
+ js: js,
+ frequency: frequency,
+ }, nil
+}
+func (v *KubescoreJob) CronSpec() string {
+ return v.frequency
+}
+
+func (j *KubescoreJob) Run() {
+ // Call the Kubescore function with the provided config and js
+ err := kubescore.RunKubeScore(j.clientset, j.js)
+ events.LogErr(err)
+}
+func NewRakkessJob(config *rest.Config, js nats.JetStreamContext, frequency string) (*RakkessJob, error) {
+ return &RakkessJob{
+ config: config,
+ js: js,
+ frequency: frequency,
+ }, nil
+}
+func (v *RakkessJob) CronSpec() string {
+ return v.frequency
+}
+
+func (j *RakkessJob) Run() {
+ // Call the Rakkes function with the provided config and js
+ err := rakkess.RakeesOutput(j.config, j.js)
+ events.LogErr(err)
+}
diff --git a/agent/kubviz/trivy.go b/agent/kubviz/trivy.go
deleted file mode 100644
index 5951e54f..00000000
--- a/agent/kubviz/trivy.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package main
-
-import (
- "encoding/json"
- "log"
- "strings"
-
- "github.com/aquasecurity/trivy/pkg/k8s/report"
- "github.com/google/uuid"
- "github.com/intelops/kubviz/constants"
- "github.com/intelops/kubviz/model"
- "github.com/nats-io/nats.go"
-)
-
-func RunTrivyK8sClusterScan(js nats.JetStreamContext, errCh chan error) {
- var report report.ConsolidatedReport
- out, err := executeCommand("trivy k8s --report summary cluster --timeout 60m -f json -q --cache-dir /tmp/.cache")
- log.Println("Commnd for k8s cluster scan: trivy k8s --report summary cluster --timeout 60m -f json -q --cache-dir /tmp/.cache")
- parts := strings.SplitN(out, "{", 2)
- if len(parts) <= 1 {
- log.Println("No output from k8s cluster scan command", err)
- errCh <- err
- return
- }
- log.Println("Command logs for k8s cluster scan", parts[0])
- jsonPart := "{" + parts[1]
- log.Println("First 200 k8s cluster scan lines output", jsonPart[:200])
- log.Println("Last 200 k8s cluster scan lines output", jsonPart[len(jsonPart)-200:])
- err = json.Unmarshal([]byte(jsonPart), &report)
- if err != nil {
- log.Printf("Error occurred while Unmarshalling json for k8s cluster scan: %v", err)
- errCh <- err
- }
- publishTrivyK8sReport(report, js, errCh)
-}
-
-func publishTrivyK8sReport(report report.ConsolidatedReport, js nats.JetStreamContext, errCh chan error) {
- metrics := model.Trivy{
- ID: uuid.New().String(),
- ClusterName: ClusterName,
- Report: report,
- }
- metricsJson, _ := json.Marshal(metrics)
- _, err := js.Publish(constants.TRIVY_K8S_SUBJECT, metricsJson)
- if err != nil {
- errCh <- err
- }
- log.Printf("Trivy k8s cluster report with ID:%s has been published\n", metrics.ID)
- errCh <- nil
-}
diff --git a/agent/kubviz/trivy_image.go b/agent/kubviz/trivy_image.go
deleted file mode 100644
index 6e2af2fe..00000000
--- a/agent/kubviz/trivy_image.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package main
-
-import (
- "encoding/json"
- "log"
- "strings"
-
- "github.com/aquasecurity/trivy/pkg/types"
- "github.com/google/uuid"
- "github.com/intelops/kubviz/constants"
- "github.com/intelops/kubviz/model"
- "github.com/nats-io/nats.go"
- "k8s.io/client-go/rest"
-)
-
-func RunTrivyImageScans(config *rest.Config, js nats.JetStreamContext, errCh chan error) {
- images, err := ListImages(config)
- if err != nil {
- log.Fatal(err)
- }
-
- for _, image := range images {
- var report types.Report
- out, err := executeCommand("trivy image " + image.PullableImage + " --timeout 60m -f json -q --cache-dir /tmp/.cache")
- if err != nil {
- log.Printf("Error scanning image %s: %v", image.PullableImage, err)
- continue // Move on to the next image in case of an error
- }
-
- parts := strings.SplitN(out, "{", 2)
- if len(parts) <= 1 {
- log.Println("No output from image scan command", err)
- continue // Move on to the next image if there's no output
- }
-
- log.Println("Command logs for image", parts[0])
- jsonPart := "{" + parts[1]
- log.Println("First 200 image scan lines output", jsonPart[:200])
- log.Println("Last 200 image scan lines output", jsonPart[len(jsonPart)-200:])
-
- err = json.Unmarshal([]byte(jsonPart), &report)
- if err != nil {
- log.Printf("Error occurred while Unmarshalling json for image: %v", err)
- continue // Move on to the next image in case of an error
- }
- publishImageScanReports(report, js, errCh)
- // If you want to publish the report or perform any other action with it, you can do it here
-
- }
-}
-
-func publishImageScanReports(report types.Report, js nats.JetStreamContext, errCh chan error) {
- metrics := model.TrivyImage{
- ID: uuid.New().String(),
- ClusterName: ClusterName,
- Report: report,
- }
- metricsJson, _ := json.Marshal(metrics)
- _, err := js.Publish(constants.TRIVY_IMAGE_SUBJECT, metricsJson)
- if err != nil {
- errCh <- err
- }
- log.Printf("Trivy image report with ID:%s has been published\n", metrics.ID)
- errCh <- nil
-}
diff --git a/agent/kubviz/trivy_sbom.go b/agent/kubviz/trivy_sbom.go
deleted file mode 100644
index 34ca7712..00000000
--- a/agent/kubviz/trivy_sbom.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package main
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "log"
- "os/exec"
-
- "github.com/google/uuid"
- "github.com/intelops/kubviz/constants"
- "github.com/intelops/kubviz/model"
- "github.com/nats-io/nats.go"
- "k8s.io/client-go/rest"
-)
-
-func publishTrivySbomReport(report model.Sbom, js nats.JetStreamContext, errCh chan error) {
- metrics := model.Reports{
- ID: uuid.New().String(),
- Report: report,
- }
- metricsJson, _ := json.Marshal(metrics)
- _, err := js.Publish(constants.TRIVY_SBOM_SUBJECT, metricsJson)
- if err != nil {
- errCh <- err
- }
-
- log.Printf("Trivy report with BomFormat:%v has been published\n", metrics.Report.BomFormat)
- errCh <- nil
-}
-
-func executeCommandSbom(command string) ([]byte, error) {
- cmd := exec.Command("/bin/sh", "-c", command)
- var outc, errc bytes.Buffer
- cmd.Stdout = &outc
- cmd.Stderr = &errc
-
- err := cmd.Run()
-
- if err != nil {
- log.Println("Execute SBOM Command Error", err.Error())
- }
-
- return outc.Bytes(), err
-}
-
-func RunTrivySbomScan(config *rest.Config, js nats.JetStreamContext, errCh chan error) {
- log.Println("trivy sbom run started")
- images, err := ListImages(config)
-
- if err != nil {
- log.Printf("failed to list images: %v", err)
- }
- for _, image := range images {
- fmt.Printf("pullable Image %#v\n", image.PullableImage)
-
- command := fmt.Sprintf("trivy image --format cyclonedx %s %s", image.PullableImage, "--cache-dir /tmp/.cache")
- out, err := executeCommandSbom(command)
-
- if err != nil {
- log.Printf("Error executing Trivy for image sbom %s: %v", image.PullableImage, err)
- continue // Move on to the next image in case of an error
- }
-
- // Check if the output is empty or invalid JSON
- if len(out) == 0 {
- log.Printf("Trivy output is empty for image sbom %s", image.PullableImage)
- continue // Move on to the next image
- }
-
- var report model.Sbom
- err = json.Unmarshal(out, &report)
- if err != nil {
- log.Printf("Error unmarshaling JSON data for image sbom %s: %v", image.PullableImage, err)
- continue // Move on to the next image in case of an error
- }
- log.Println("report", report)
-
- // Publish the report using the given function
- publishTrivySbomReport(report, js, errCh)
- }
-}
diff --git a/agent/server/server.go b/agent/server/server.go
new file mode 100644
index 00000000..db5f910a
--- /dev/null
+++ b/agent/server/server.go
@@ -0,0 +1,47 @@
+package server
+
+import (
+ "log"
+ "net/http"
+ _ "net/http/pprof"
+
+ "github.com/gin-gonic/gin"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin"
+)
+
+func EnableProfile(r *gin.Engine) {
+ pprofGroup := r.Group("/debug/pprof")
+ {
+ pprofGroup.GET("/", gin.WrapH(http.DefaultServeMux))
+ pprofGroup.GET("/cmdline", gin.WrapH(http.DefaultServeMux))
+ pprofGroup.GET("/profile", gin.WrapH(http.DefaultServeMux))
+ pprofGroup.POST("/symbol", gin.WrapH(http.DefaultServeMux))
+ pprofGroup.GET("/symbol", gin.WrapH(http.DefaultServeMux))
+ pprofGroup.GET("/trace", gin.WrapH(http.DefaultServeMux))
+ pprofGroup.GET("/allocs", gin.WrapH(http.DefaultServeMux))
+ pprofGroup.GET("/block", gin.WrapH(http.DefaultServeMux))
+ pprofGroup.GET("/goroutine", gin.WrapH(http.DefaultServeMux))
+ pprofGroup.GET("/heap", gin.WrapH(http.DefaultServeMux))
+ pprofGroup.GET("/mutex", gin.WrapH(http.DefaultServeMux))
+ pprofGroup.GET("/threadcreate", gin.WrapH(http.DefaultServeMux))
+ }
+
+ r.GET("/liveness", func(c *gin.Context) {
+ c.String(http.StatusOK, "Alive")
+ })
+}
+
+func StartServer() {
+ r := gin.Default()
+
+ config, err := opentelemetry.GetConfigurations()
+ if err != nil {
+ log.Println("Unable to read open telemetry configurations")
+ }
+
+ r.Use(otelgin.Middleware(config.ServiceName))
+
+ EnableProfile(r)
+ log.Fatal(r.Run(":8080"))
+}
diff --git a/charts/agent/Chart.yaml b/charts/agent/Chart.yaml
index 64f8fa76..deb5a7fe 100644
--- a/charts/agent/Chart.yaml
+++ b/charts/agent/Chart.yaml
@@ -15,10 +15,15 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
-version: 1.0.0
+version: 1.1.21
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
-appVersion: "v1.0.0"
+appVersion: "v1.1.7"
+dependencies:
+ - name: kuberhealthy
+ condition: kuberhealthy.enabled
+ version: 1.x.x
+ repository: https://kube-tarian.github.io/helmrepo-supporting-tools/
diff --git a/charts/agent/templates/deployment.yaml b/charts/agent/templates/deployment.yaml
index 490049c8..675ff272 100644
--- a/charts/agent/templates/deployment.yaml
+++ b/charts/agent/templates/deployment.yaml
@@ -46,16 +46,69 @@ spec:
# path: /
# port: http
env:
+ - name: KUBERHEALTHY_URL
+ value: {{ .Values.kuberhealthy.url }}
+ - name: POLL_INTERVAL
+ value: {{ .Values.kuberhealthy.pollInterval }}
- name: CLUSTER_NAME
value: {{ .Values.clusterName }}
- name: NATS_TOKEN
+ {{- if .Values.nats.auth.token }}
value: {{ .Values.nats.auth.token }}
+ {{- else if .Values.nats.auth.secret }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.nats.auth.secret.name }}
+ key: {{ .Values.nats.auth.secret.key }}
+ {{- end }}
+ - name: KUBERHEALTHY_ENABLE
+ value: "{{ .Values.kuberhealthy.enabled }}"
- name: NATS_ADDRESS
value: {{ .Values.nats.host }}
- name: SCHEDULING_INTERVAL
- value: {{ .Values.schedulingInterval }}
+ value: "{{ .Values.schedule.schedulingInterval }}"
+ - name: SCHEDULER_ENABLE
+ value: "{{ .Values.schedule.enabled }}"
+ - name: OUTDATED_INTERVAL
+ value: "{{ .Values.schedule.outdatedInterval }}"
+ - name: GETALL_INTERVAL
+ value: "{{ .Values.schedule.getallInterval }}"
+ - name: KUBESCORE_INTERVAL
+ value: "{{ .Values.schedule.kubescoreInterval }}"
+ - name: RAKKESS_INTERVAL
+ value: "{{ .Values.schedule.rakkessInterval }}"
+ - name: KUBEPREUPGRADE_INTERVAL
+ value: "{{ .Values.schedule.kubepreupgradeInterval }}"
+ - name: TRIVY_IMAGE_INTERVAL
+ value: "{{ .Values.schedule.trivyimageInterval }}"
+ - name: TRIVY_SBOM_INTERVAL
+ value: "{{ .Values.schedule.trivysbomInterval }}"
+ - name: TRIVY_CLUSTERSCAN_INTERVAL
+ value: "{{ .Values.schedule.trivyclusterscanInterval }}"
+ - name: IS_OPTEL_ENABLED
+ value: "{{ .Values.opentelemetry.isEnabled }}"
+ - name : OPTEL_URL
+ value: {{ .Values.opentelemetry.url }}
+ - name : APPLICATION_NAME
+ value : {{ .Values.opentelemetry.appName }}
+ {{- if .Values.persistence.enabled }}
+ volumeMounts:
+ - name: data
+ mountPath: {{ .Values.persistence.mountPath }}
+ {{- end }}
resources:
- {{- toYaml .Values.resources | nindent 12 }}
+ limits:
+ cpu: {{ .Values.resources.limits.cpu }}
+ memory: {{ .Values.resources.limits.memory }}
+ {{- if not .Values.persistence.enabled }}
+ ephemeral-storage: {{ .Values.resources.limits.ephemeralstorage }}
+ {{- end }}
+ requests:
+ cpu: {{ .Values.resources.requests.cpu }}
+ memory: {{ .Values.resources.requests.memory }}
+ {{- if not .Values.persistence.enabled }}
+ ephemeral-storage: {{ .Values.resources.requests.ephemeralstorage }}
+ {{- end }}
{{- if .Values.git_bridge.enabled }}
- name: git-bridge
image: "{{ .Values.git_bridge.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
@@ -68,11 +121,40 @@ spec:
- name: CLUSTER_NAME
value: {{ .Values.clusterName }}
- name: NATS_TOKEN
+ {{- if .Values.nats.auth.token }}
value: {{ .Values.nats.auth.token }}
+ {{- else if .Values.nats.auth.secret }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.nats.auth.secret.name }}
+ key: {{ .Values.nats.auth.secret.key }}
+ {{- end }}
- name: NATS_ADDRESS
value: {{ .Values.nats.host }}
+ - name: IS_OPTEL_ENABLED
+ value: "{{ .Values.opentelemetry.isEnabled }}"
+ - name : OPTEL_URL
+ value: {{ .Values.opentelemetry.url }}
+ - name : APPLICATION_NAME
+ value : {{ .Values.opentelemetry.appName }}
+ {{- if .Values.git_bridge.persistence.enabled }}
+ volumeMounts:
+ - name: data
+ mountPath: {{ .Values.git_bridge.persistence.mountPath }}
+ {{- end }}
resources:
- {{- toYaml .Values.git_bridge.resources | nindent 12 }}
+ limits:
+ cpu: {{ .Values.git_bridge.resources.limits.cpu }}
+ memory: {{ .Values.git_bridge.resources.limits.memory }}
+ {{- if not .Values.git_bridge.persistence.enabled }}
+ ephemeral-storage: {{ .Values.git_bridge.resources.limits.ephemeralstorage }}
+ {{- end }}
+ requests:
+ cpu: {{ .Values.git_bridge.resources.requests.cpu }}
+ memory: {{ .Values.git_bridge.resources.requests.memory }}
+ {{- if not .Values.git_bridge.persistence.enabled }}
+ ephemeral-storage: {{ .Values.git_bridge.resources.requests.ephemeralstorage }}
+ {{- end }}
{{- end }}
{{- if .Values.container_bridge.enabled }}
- name: container-bridge
@@ -86,12 +168,47 @@ spec:
- name: CLUSTER_NAME
value: {{ .Values.clusterName }}
- name: NATS_TOKEN
+ {{- if .Values.nats.auth.token }}
value: {{ .Values.nats.auth.token }}
+ {{- else if .Values.nats.auth.secret }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.nats.auth.secret.name }}
+ key: {{ .Values.nats.auth.secret.key }}
+ {{- end }}
- name: NATS_ADDRESS
value: {{ .Values.nats.host }}
+ - name: IS_OPTEL_ENABLED
+ value: "{{ .Values.opentelemetry.isEnabled }}"
+ - name : OPTEL_URL
+ value: {{ .Values.opentelemetry.url }}
+ - name : APPLICATION_NAME
+ value : {{ .Values.opentelemetry.appName }}
+ {{- if .Values.container_bridge.persistence.enabled }}
+ volumeMounts:
+ - name: data
+ mountPath: {{ .Values.container_bridge.persistence.mountPath }}
+ {{- end }}
resources:
- {{- toYaml .Values.container_bridge.resources | nindent 12 }}
+ limits:
+ cpu: {{ .Values.container_bridge.resources.limits.cpu }}
+ memory: {{ .Values.container_bridge.resources.limits.memory }}
+ {{- if not .Values.container_bridge.persistence.enabled }}
+ ephemeral-storage: {{ .Values.container_bridge.resources.limits.ephemeralstorage }}
+ {{- end }}
+ requests:
+ cpu: {{ .Values.container_bridge.resources.requests.cpu }}
+ memory: {{ .Values.container_bridge.resources.requests.memory }}
+ {{- if not .Values.container_bridge.persistence.enabled }}
+ ephemeral-storage: {{ .Values.container_bridge.resources.requests.ephemeralstorage }}
+ {{- end }}
{{- end }}
+ {{- if .Values.persistence.enabled }}
+ volumes:
+ - name: data
+ persistentVolumeClaim:
+ claimName: {{ include "agent.fullname" . }}-data
+ {{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
diff --git a/charts/agent/templates/external_secret.yaml b/charts/agent/templates/external_secret.yaml
new file mode 100644
index 00000000..a856e725
--- /dev/null
+++ b/charts/agent/templates/external_secret.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.externalSecrets.create }}
+apiVersion: external-secrets.io/v1beta1
+kind: ExternalSecret
+metadata:
+ name: nats-external
+spec:
+ refreshInterval: "10s"
+ secretStoreRef:
+ name: vault-store
+ kind: ClusterSecretStore
+ target:
+ name: nats-secret
+ data:
+ - secretKey: nats-token
+ remoteRef:
+ key: secret/generic/nats/auth-token
+ property: nats
+{{- end }}
\ No newline at end of file
diff --git a/charts/agent/templates/pvc.yaml b/charts/agent/templates/pvc.yaml
new file mode 100644
index 00000000..920c3dab
--- /dev/null
+++ b/charts/agent/templates/pvc.yaml
@@ -0,0 +1,13 @@
+{{- if .Values.persistence.enabled }}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ include "agent.fullname" . }}-data
+spec:
+ accessModes:
+ - {{ .Values.persistence.accessMode }}
+ storageClassName: {{ .Values.persistence.storageClass | default "" }}
+ resources:
+ requests:
+ storage: {{ .Values.persistence.size }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/agent/values.yaml b/charts/agent/values.yaml
index 6531a77c..08701e04 100644
--- a/charts/agent/values.yaml
+++ b/charts/agent/values.yaml
@@ -8,7 +8,7 @@ image:
repository: ghcr.io/intelops/kubviz/kubviz-agent
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
- tag: "v1.0.0"
+ tag: "v1.1.7"
imagePullSecrets: []
nameOverride: ""
@@ -27,8 +27,10 @@ serviceAccount:
podAnnotations: {}
-podSecurityContext: {}
- # fsGroup: 2000
+podSecurityContext:
+ fsGroup: 1001
+ runAsUser: 1001
+ runAsGroup: 1001
securityContext: {}
# capabilities:
@@ -42,19 +44,29 @@ service:
type: ClusterIP
port: 80
+
git_bridge:
enabled: false
image:
repository: ghcr.io/intelops/kubviz/git-agent
pullPolicy: Always
- tag: "v1.0.0"
- resources:
- limits:
- cpu: 200m
- memory: 256Mi
- requests:
- cpu: 200m
- memory: 256Mi
+ tag: "v1.1.7"
+ resources:
+ limits:
+ cpu: 200m
+ memory: 256Mi
+ ephemeralstorage: 100Mi
+ requests:
+ cpu: 200m
+ memory: 256Mi
+ ephemeralstorage: 100Mi
+ persistence:
+ enabled: true
+ existingClaim: ""
+ storageClass: ""
+ mountPath: /mnt/agent/gb
+ accessMode: ReadWriteOnce
+ size: 5Gi
ingress:
enabled: true
annotations:
@@ -75,20 +87,28 @@ git_bridge:
# - chart-example.local
-
container_bridge:
enabled: false
image:
repository: ghcr.io/intelops/kubviz/container-agent
pullPolicy: Always
- tag: "v1.0.0"
- resources:
- limits:
- cpu: 200m
- memory: 256Mi
- requests:
- cpu: 200m
- memory: 256Mi
+ tag: "v1.1.7"
+ resources:
+ limits:
+ cpu: 200m
+ memory: 256Mi
+ ephemeralstorage: 100Mi
+ requests:
+ cpu: 200m
+ memory: 256Mi
+ ephemeralstorage: 100Mi
+ persistence:
+ enabled: true
+ existingClaim: ""
+ storageClass: ""
+ mountPath: /mnt/agent/cb
+ accessMode: ReadWriteOnce
+ size: 5Gi
ingress:
enabled: true
annotations:
@@ -108,6 +128,7 @@ container_bridge:
# hosts:
# - chart-example.local
+
ingress:
enabled: false
annotations: {}
@@ -125,9 +146,19 @@ resources:
limits:
cpu: 2
memory: 2Gi
+ ephemeralstorage: 1Gi
requests:
cpu: 200m
memory: 256Mi
+ ephemeralstorage: 256Mi
+
+persistence:
+ enabled: true
+ existingClaim: ""
+ storageClass: ""
+ mountPath: /mnt/agent/kbz
+ accessMode: ReadWriteOnce
+ size: 5Gi
autoscaling:
enabled: false
@@ -142,10 +173,111 @@ tolerations: []
affinity: {}
-schedulingInterval: "24h"
+schedule:
+ enabled: false
+ schedulingInterval: "24h"
+ outdatedInterval: "@every 18h"
+ getallInterval: "@every 19h"
+ kubescoreInterval: "@every 20h"
+ rakkessInterval: "@every 21h"
+ kubepreupgradeInterval: "@every 22h"
+ trivyimageInterval: "@every 24h"
+ trivysbomInterval: "@every 16h"
+ trivyclusterscanInterval: "@every 17h"
+
+kuberhealthy:
+ enabled: true
+ pollInterval: "60m"
+ url: "http://kuberhealthy:80"
+ check:
+ podRestarts:
+ enabled: true
+ runInterval: 5m
+ timeout: 10m
+ image:
+ registry: docker.io
+ repository: kuberhealthy/pod-restarts-check
+ tag: v2.5.0
+ allNamespaces: true
+ extraEnvs:
+ MAX_FAILURES_ALLOWED: "10"
+ nodeSelector: {}
+ tolerations: []
+ #- key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+ resources:
+ requests:
+ cpu: 10m
+ memory: 50Mi
+ podStatus:
+ enabled: true
+ runInterval: 5m
+ timeout: 15m
+ image:
+ registry: docker.io
+ repository: kuberhealthy/pod-status-check
+ tag: v1.3.0
+ allNamespaces: true
+ extraEnvs: {}
+ nodeSelector: {}
+ tolerations: []
+ #- key: "key"
+ # operator: "Equal"
+ # value: "value"
+ # effect: "NoSchedule"
+ resources:
+ requests:
+ cpu: 10m
+ memory: 50Mi
+ imagePullCheck:
+ enabled: true
+ runInterval: 60m
+ timeout: 1m
+ image:
+ repository: kuberhealthy/test-check
+ tag: v1.4.0
+ extraEnvs:
+ REPORT_FAILURE: "false"
+ REPORT_DELAY: "1s"
+ resources:
+ requests:
+ cpu: 10m
+ memory: 50Mi
+ resourceQuota:
+ enabled: true
+ runInterval: 1h
+ timeout: 2m
+ image:
+ repository: kuberhealthy/resource-quota-check
+ tag: v1.3.0
+ extraEnvs:
+ BLACKLIST: "default"
+ WHITELIST: "kube-system,kubviz"
+ resources:
+ requests:
+ cpu: 15m
+ memory: 15Mi
+ limits:
+ cpu: 30m
+
+opentelemetry:
+ isEnabled: false
+ url: "otelcollector.local"
+ appName: "kubviz"
clusterName: "kubviz"
+
+externalSecrets:
+ create: false
+
nats:
- host: kubviz-client-nats
+ host: kubviz-client-nats
auth:
- token: "UfmrJOYwYCCsgQvxvcfJ3BdI6c8WBbnD"
+ # Use token if you want to provide the token via Helm Values
+ token: ""
+ # Use a secret reference if you want to get a token from a secret
+ # secret:
+ # name: ""
+ # key: ""
diff --git a/charts/clickhouse/.helmignore b/charts/clickhouse/.helmignore
deleted file mode 100755
index f0c13194..00000000
--- a/charts/clickhouse/.helmignore
+++ /dev/null
@@ -1,21 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
diff --git a/charts/clickhouse/Chart.yaml b/charts/clickhouse/Chart.yaml
deleted file mode 100755
index e6ff5829..00000000
--- a/charts/clickhouse/Chart.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-appVersion: "19.14"
-description: ClickHouse is an open source column-oriented database management system
- capable of real time generation of analytical data reports using SQL queries
-home: https://clickhouse.yandex/
-icon: https://clickhouse.yandex/images/logo.png
-keywords:
-- clickhouse
-- olap
-- database
-maintainers:
-- email: 411934049@qq.com
- name: liwenhe
-name: clickhouse
-sources:
-- https://github.com/liwenhe1993/charts
-version: 1.0.2
diff --git a/charts/clickhouse/README.md b/charts/clickhouse/README.md
deleted file mode 100755
index ec22a59f..00000000
--- a/charts/clickhouse/README.md
+++ /dev/null
@@ -1,169 +0,0 @@
-# ClickHouse
-
-[ClickHouse](https://clickhouse.yandex/) is an open source column-oriented database management system capable of real time generation of analytical data reports using SQL queries.
-
-## Introduction
-This chart bootstraps a [ClickHouse](https://clickhouse.yandex/) replication cluster deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
-
-## Prerequisites
-
-- Kubernetes 1.10+
-- PV provisioner support in the underlying infrastructure
-
-## Installing the Chart
-
-To install the chart with the release name `my-release`:
-
-```bash
-$ helm repo add liwenhe https://liwenhe1993.github.io/charts/
-$ helm repo update
-$ helm install --name clickhouse liwenhe/clickhouse
-```
-These commands deploy Clickhouse on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
-
-> **Tip**: List all releases using `helm list`
-
-## Uninstalling the Chart
-
-To uninstall/delete the `clickhouse` deployment:
-
-```bash
-$ helm delete --purge clickhouse
-```
-
-The command removes all the Kubernetes components associated with the chart and deletes the release.
-
-## Configuration
-
-The following tables lists the configurable parameters of the Clickhouse chart and their default values.
-
-| Parameter | Description | Default |
-| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
-| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
-| `clusterDomain` | Kubernetes cluster domain | `cluster.local` |
-| `affinity` | Clickhouse Node selectors and tolerations for pod assignment | `nil` |
-| `clickhouse.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees | `Parallel` |
-| `clickhouse.updateStrategy` | StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete | `RollingUpdate` |
-| `clickhouse.rollingUpdatePartition` | Partition update strategy | `nil` |
-| `clickhouse.path` | The path to the directory containing data | `/var/lib/clickhouse` |
-| `clickhouse.http_port` | The port for connecting to the server over HTTP | `8123` |
-| `clickhouse.tcp_port` | Port for communicating with clients over the TCP protocol | `9000` |
-| `clickhouse.interserver_http_port` | Port for exchanging data between ClickHouse servers | `9009` |
-| `clickhouse.replicas` | The instance number of Clickhouse | `3` |
-| `clickhouse.image` | Docker image for Clickhouse | `yandex/clickhouse-server` |
-| `clickhouse.imageVersion` | Docker image version for Clickhouse | `19.14` |
-| `clickhouse.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
-| `clickhouse.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
-| `clickhouse.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
-| `clickhouse.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
-| `clickhouse.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
-| `clickhouse.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
-| `clickhouse.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
-| `clickhouse.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
-| `clickhouse.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
-| `clickhouse.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
-| `clickhouse.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
-| `clickhouse.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
-| `clickhouse.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
-| `clickhouse.persistentVolumeClaim.enabled` | Enable persistence using a `PersistentVolumeClaim` | `false` |
-| `clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled` | Turn on and off dataPersistentVolume | `false` |
-| `clickhouse.persistentVolumeClaim.dataPersistentVolume.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` |
-| `clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName` | Persistent Volume Storage Class | `` |
-| `clickhouse.persistentVolumeClaim.dataPersistentVolume.storage` | Persistent Volume Size | `500Gi` |
-| `clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled` | Turn on and off dataPersistentVolume | `false` |
-| `clickhouse.persistentVolumeClaim.logsPersistentVolume.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` |
-| `clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName` | Persistent Volume Storage Class | `` |
-| `clickhouse.persistentVolumeClaim.logsPersistentVolume.storage` | Persistent Volume Size | `50Gi` |
-| `clickhouse.ingress.enabled` | Enable ingress | `false` |
-| `clickhouse.ingress.host` | Ingress host | `` |
-| `clickhouse.ingress.path` | Ingress path | `` |
-| `clickhouse.ingress.tls.enabled` | Enable ingress tls | `false` |
-| `clickhouse.ingress.tls.hosts` | Ingress tls hosts | `[]` |
-| `clickhouse.ingress.tls.secretName` | Ingress tls `secretName` | `` |
-| `clickhouse.configmap.enabled` | If Configmap's enabled is `true`, Custom `config.xml`, `metrica.xml` and `users.xml` | `true` |
-| `clickhouse.configmap.max_connections` | The maximum number of inbound connections | `4096` |
-| `clickhouse.configmap.keep_alive_timeout` | The number of seconds that ClickHouse waits for incoming requests before closing the connection | `3` |
-| `clickhouse.configmap.max_concurrent_queries` | The maximum number of simultaneously processed requests | `100` |
-| `clickhouse.configmap.uncompressed_cache_size` | Cache size (in bytes) for uncompressed data used by table engines from the MergeTree | `8589934592` |
-| `clickhouse.configmap.mark_cache_size` | Approximate size (in bytes) of the cache of "marks" used by MergeTree | `5368709120` |
-| `clickhouse.configmap.umask` | Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read) | `022` |
-| `clickhouse.configmap.mlock_executable` | Enabling this option is recommended but will lead to increased startup time for up to a few seconds | `false` |
-| `clickhouse.configmap.builtin_dictionaries_reload_interval` | The interval in seconds before reloading built-in dictionaries | `3600` |
-| `clickhouse.configmap.max_session_timeout` | Maximum session timeout, in seconds | `3600` |
-| `clickhouse.configmap.default_session_timeout` | Default session timeout, in seconds | `60` |
-| `clickhouse.configmap.disable_internal_dns_cache` | Uncomment to disable ClickHouse internal DNS caching | `1` |
-| `clickhouse.configmap.max_open_files` | The maximum number of open files | `` |
-| `clickhouse.configmap.interserver_http_host` | The host name that can be used by other servers to access this server | `` |
-| `clickhouse.configmap.logger.path` | The log file path | `/var/log/clickhouse-server` |
-| `clickhouse.configmap.logger.level` | Logging level. Acceptable values: trace, debug, information, warning, error | `trace` |
-| `clickhouse.configmap.logger.size` | Size of the file | `1000M` |
-| `clickhouse.configmap.logger.count` | The number of archived log files that ClickHouse stores | `10` |
-| `clickhouse.configmap.compression.enabled` | Enable data compression settings | `false` |
-| `clickhouse.configmap.compression.cases[].min_part_size` | The minimum size of a table part | `10000000000` |
-| `clickhouse.configmap.compression.cases[].min_part_size_ratio` | The ratio of the minimum size of a table part to the full size of the table | `0.01` |
-| `clickhouse.configmap.compression.cases[].method` | Compression method. Acceptable values : lz4 or zstd(experimental) | `zstd` |
-| `clickhouse.configmap.zookeeper_servers.enabled` | Enable contains settings that allow ClickHouse to interact with a ZooKeeper cluster | `false` |
-| `clickhouse.configmap.zookeeper_servers.session_timeout_ms` | Maximum timeout for the client session in milliseconds | `30000` |
-| `clickhouse.configmap.zookeeper_servers.operation_timeout_ms` | Operation timeout for the client session in milliseconds | `10000` |
-| `clickhouse.configmap.zookeeper_servers.root` | The znode that is used as the root for znodes used by the ClickHouse server. Optional | `` |
-| `clickhouse.configmap.zookeeper_servers.identity` | User and password, that can be required by ZooKeeper to give access to requested znodes. Optional | `` |
-| `clickhouse.configmap.zookeeper_servers.config[].index` | ZooKeeper index | `` |
-| `clickhouse.configmap.zookeeper_servers.config[].host` | ZooKeeper host | `` |
-| `clickhouse.configmap.zookeeper_servers.config[].port` | ZooKeeper port | `` |
-| `clickhouse.configmap.remote_servers.enabled` | Enable configuration of clusters used by the Distributed table engine | `true` |
-| `clickhouse.configmap.remote_servers.internal_replication` | If this parameter is set to 'true', the table where data will be written is going to replicate them itself | `false` |
-| `clickhouse.configmap.remote_servers.replica.user` | Name of the user for connecting to a remote server. Access is configured in the users.xml file. | `default` |
-| `clickhouse.configmap.remote_servers.replica.password` | The password for connecting to a remote server (not masked). | `nil` |
-| `clickhouse.configmap.remote_servers.replica.compression` | Use data compression. | `true` |
-| `clickhouse.configmap.remote_servers.replica.backup.enabled` | Enable replica backup | `false` |
-| `clickhouse.configmap.remote_servers.graphite.enabled` | Enable graphite | `false` |
-| `clickhouse.configmap.remote_servers.graphite.config[].timeout` | The timeout for sending data, in seconds | `0.1` |
-| `clickhouse.configmap.remote_servers.graphite.config[].interval` | The interval for sending, in seconds | `60` |
-| `clickhouse.configmap.remote_servers.graphite.config[].root_path` | Prefix for keys | `one_min` |
-| `clickhouse.configmap.remote_servers.graphite.config[].metrics` | Sending data from a :ref:system_tables-system.metrics table | `true` |
-| `clickhouse.configmap.remote_servers.graphite.config[].events` | Sending deltas data accumulated for the time period from a :ref:system_tables-system.events table | `true` |
-| `clickhouse.configmap.remote_servers.graphite.config[].events_cumulative` | Sending cumulative data from a :ref:system_tables-system.events table | `true` |
-| `clickhouse.configmap.remote_servers.graphite.config[].asynchronous_metrics` | Sending data from a :ref:system_tables-system.asynchronous_metrics table | `true` |
-| `clickhouse.configmap.profiles.enabled` | Enable a settings profiles | `false` |
-| `clickhouse.configmap.profiles.profile[].name` | Tne name of a settings profile | `` |
-| `clickhouse.configmap.profiles.profile[].config` | The config of a settings profile | `{}` |
-| `clickhouse.configmap.users.enabled` | Enable a settings users | `false` |
-| `clickhouse.configmap.users.user[].name` | Tne name of a settings user | `` |
-| `clickhouse.configmap.users.user[].config` | Tne config of a settings user | `{}` |
-| `clickhouse.configmap.quotas.enabled` | Enable a settings quotas | `false` |
-| `clickhouse.configmap.quotas.quota[].name` | Tne name of a settings quota | `` |
-| `clickhouse.configmap.quotas.quota[].config[]` | Tne config of a settings quota | `[]` |
-| `tabix.enabled` | Enable tabix | `false` |
-| `tabix.replicas` | The instance number of Tabix | `1` |
-| `tabix.updateStrategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate | `RollingUpdate` |
-| `tabix.updateStrategy.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `3` |
-| `tabix.updateStrategy.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `1` |
-| `tabix.image` | Docker image name | `spoonest/clickhouse-tabix-web-client` |
-| `tabix.imageVersion` | Docker image version | `stable` |
-| `tabix.imagePullPolicy` | Dcoker image pull policy | `IfNotPresent` |
-| `tabix.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
-| `tabix.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
-| `tabix.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
-| `tabix.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
-| `tabix.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
-| `tabix.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
-| `tabix.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
-| `tabix.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
-| `tabix.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
-| `tabix.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
-| `tabix.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
-| `tabix.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
-| `tabix.security.user` | Tabix login username | `admin` |
-| `tabix.security.password` | Tabix login password | `admin` |
-| `tabix.automaticConnection.chName` | Automatic connection Clickhouse name | `` |
-| `tabix.automaticConnection.chHost` | Automatic connection Clickhouse host | `` |
-| `tabix.automaticConnection.chLogin` | Automatic connection Clickhouse login username | `` |
-| `tabix.automaticConnection.chPassword` | Automatic connection Clickhouse login password | `` |
-| `tabix.automaticConnection.chParams` | Automatic connection Clickhouse params | `` |
-| `tabix.ingress.enabled` | Enable ingress | `false` |
-| `tabix.ingress.host` | Ingress host | `` |
-| `tabix.ingress.path` | Ingress path | `` |
-| `tabix.ingress.tls.enabled` | Enable ingress tls | `false` |
-| `tabix.ingress.tls.hosts` | Ingress tls hosts | `[]` |
-
-For more information please refer to the [liwenhe1993/charts](https://github.com/liwenhe1993/charts.git) documentation.
diff --git a/charts/clickhouse/templates/NOTES.txt b/charts/clickhouse/templates/NOTES.txt
deleted file mode 100755
index f8a6dd14..00000000
--- a/charts/clickhouse/templates/NOTES.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-** Please be patient while the chart is being deployed **
-
-1. Get the Clickhouse URL by running:
-
-{{- if .Values.clickhouse.ingress.enabled }}
-
- export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "clickhouse.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
- echo "Clickhouse URL: http://$HOSTNAME/"
-
-{{- else }}
-
- echo URL : http://127.0.0.1:8080/
- echo Management URL : http://127.0.0.1:8080/manager
- kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 8123:{{ .Values.clickhouse.http_port }}
- kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 9000:{{ .Values.clickhouse.tcp_port }}
- kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 9009:{{ .Values.clickhouse.interserver_http_port }}
-
-{{- end }}
-
-2. Get the Tabix URL by running:
-
-{{- if .Values.tabix.ingress.enabled }}
-
- export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "clickhouse.fullname" . }}-tabix -o jsonpath='{.spec.rules[0].host}')
- echo "Tabix URL: http://$HOSTNAME/"
-
-{{- else }}
-
- kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }}-tabix 80
-
-{{- end }}
diff --git a/charts/clickhouse/templates/_helpers.tpl b/charts/clickhouse/templates/_helpers.tpl
deleted file mode 100755
index e6690cdf..00000000
--- a/charts/clickhouse/templates/_helpers.tpl
+++ /dev/null
@@ -1,56 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "clickhouse.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "clickhouse.fullname" -}}
-{{- if .Values.fullnameOverride -}}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- if contains $name .Release.Name -}}
-{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "clickhouse.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create clickhouse path.
-if .Values.clickhouse.path is empty, default value "/var/lib/clickhouse".
-*/}}
-{{- define "clickhouse.fullpath" -}}
-{{- if .Values.clickhouse.path -}}
-{{- .Values.clickhouse.path | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- printf "%s" "/var/lib/clickhouse" -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Create clickhouse log path.
-if .Values.clickhouse.configmap.logger.path is empty, default value "/var/log/clickhouse-server".
-*/}}
-{{- define "clickhouse.logpath" -}}
-{{- if .Values.clickhouse.configmap.logger.path -}}
-{{- .Values.clickhouse.configmap.logger.path | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- printf "%s" "/var/log/clickhouse-server" -}}
-{{- end -}}
-{{- end -}}
diff --git a/charts/clickhouse/templates/configmap-config.yaml b/charts/clickhouse/templates/configmap-config.yaml
deleted file mode 100755
index 3bfae41e..00000000
--- a/charts/clickhouse/templates/configmap-config.yaml
+++ /dev/null
@@ -1,112 +0,0 @@
-{{- if .Values.clickhouse.configmap.enabled }}
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ include "clickhouse.fullname" . }}-config
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-config
- app.kubernetes.io/instance: {{ .Release.Name }}-config
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-data:
- config.xml: |-
-
-
- {{ include "clickhouse.fullpath" . }}/
- {{ printf "%s/tmp/" (include "clickhouse.fullpath" .) }}
- {{ printf "%s/user_files/" (include "clickhouse.fullpath" .) }}
- {{ printf "%s/format_schemas/" (include "clickhouse.fullpath" .) }}
-
- /etc/clickhouse-server/metrica.d/metrica.xml
-
- users.xml
-
- {{ template "clickhouse.fullname" . }}
- 0.0.0.0
- {{ .Values.clickhouse.http_port | default "8123" }}
- {{ .Values.clickhouse.tcp_port | default "9000" }}
- {{ .Values.clickhouse.interserver_http_port | default "9009" }}
- {{ .Values.clickhouse.configmap.max_connections | default "4096" }}
- {{ .Values.clickhouse.configmap.keep_alive_timeout | default "3" }}
- {{ .Values.clickhouse.configmap.max_concurrent_queries | default "100" }}
- {{ .Values.clickhouse.configmap.uncompressed_cache_size | default "8589934592" }}
- {{ .Values.clickhouse.configmap.mark_cache_size | default "5368709120" }}
- {{ .Values.timezone | default "Asia/Shanghai" }}
- {{ .Values.clickhouse.configmap.umask | default "027" }}
- {{ .Values.clickhouse.configmap.mlock_executable | default "false" }}
-
-
-
- {{ .Values.clickhouse.configmap.builtin_dictionaries_reload_interval | default "3600" }}
- {{ .Values.clickhouse.configmap.max_session_timeout | default "3600" }}
- {{ .Values.clickhouse.configmap.default_session_timeout | default "60" }}
- {{ .Values.clickhouse.configmap.disable_internal_dns_cache | default "1" }}
-
-
- system
-
- toYYYYMM(event_date)
- 7500
-
-
-
- system
-
- toYYYYMM(event_date)
- 7500
-
-
-
- /clickhouse/task_queue/ddl
-
-
- {{- if .Values.clickhouse.configmap.logger }}
-
- {{ .Values.clickhouse.configmap.logger.level | default "trace" }}
- {{ printf "%s/%s" (include "clickhouse.logpath" .) "clickhouse-server.log" }}
- {{ printf "%s/%s" (include "clickhouse.logpath" .) "clickhouse-server.err.log" }}
- {{ .Values.clickhouse.configmap.logger.size | default "1000M" }}
- {{ .Values.clickhouse.configmap.logger.count | default "10" }}
-
- {{- end }}
-
- {{- if .Values.clickhouse.configmap.compression.enabled }}
-
- {{- range .Values.clickhouse.configmap.compression.cases }}
- {{- with . }}
-
- {{ .min_part_size }}
- {{ .min_part_size_ratio }}
- {{ .method }}
-
- {{- end }}
- {{- end }}
-
- {{- end }}
-
- {{- if .Values.clickhouse.configmap.graphite.enabled }}
- {{- range .Values.clickhouse.configmap.graphite.config }}
- {{- with . }}
-
- {{ template "clickhouse.fullname" $ }}-graphite.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}
- {{ $.Values.clickhouse.graphite.service.port }}
- {{ .timeout | default "0.1" }}
- {{ .interval | default "60" }}
- {{ .root_path | default "one_min" }}
- {{ .metrics | default "true" }}
- {{ .events | default "true" }}
- {{ .events_cumulative | default "true" }}
- {{ .asynchronous_metrics | default "true" }}
-
- {{- end }}
- {{- end }}
- {{- end }}
-
- {{- if .Values.clickhouse.configmap.max_open_files }}
- {{ .Values.clickhouse.configmap.max_open_files }}
- {{- end }}
-
- {{- if .Values.clickhouse.configmap.interserver_http_host }}
- {{ .Values.clickhouse.configmap.interserver_http_host }}
- {{- end }}
-
-{{- end }}
diff --git a/charts/clickhouse/templates/configmap-metrika.yaml b/charts/clickhouse/templates/configmap-metrika.yaml
deleted file mode 100755
index 2d14bc99..00000000
--- a/charts/clickhouse/templates/configmap-metrika.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-{{- if .Values.clickhouse.configmap.enabled }}
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ include "clickhouse.fullname" . }}-metrica
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-metrica
- app.kubernetes.io/instance: {{ .Release.Name }}-metrica
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-data:
- metrica.xml: |-
-
-
- {{- if .Values.clickhouse.configmap.zookeeper_servers.enabled }}
-
- {{- range .Values.clickhouse.configmap.zookeeper_servers.config }}
- {{- with . }}
-
- {{ .host }}
- {{ .port }}
-
- {{- end }}
- {{- end }}
- {{ .Values.clickhouse.configmap.zookeeper_servers.session_timeout_ms }}
- {{ .Values.clickhouse.configmap.zookeeper_servers.operation_timeout_ms }}
- {{ .Values.clickhouse.configmap.zookeeper_servers.root }}
- {{ .Values.clickhouse.configmap.zookeeper_servers.identity }}
-
- {{- end }}
-
- {{- if .Values.clickhouse.configmap.remote_servers.enabled }}
-
- <{{ include "clickhouse.fullname" . }}>
- {{- range untilStep 0 (int .Values.clickhouse.replicas) 1 }}
-
-
- {{ $.Values.clickhouse.configmap.remote_servers.internal_replication | default "false" }}
- {{ include "clickhouse.fullname" $ }}-{{ . }}.{{ include "clickhouse.fullname" $ }}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}
- {{ $.Values.clickhouse.tcp_port}}
- {{- if $.Values.clickhouse.configmap.remote_servers.replica.user }}
- {{ $.Values.clickhouse.configmap.remote_servers.replica.user }}
- {{- end }}
- {{- if $.Values.clickhouse.configmap.remote_servers.replica.password }}
- {{ $.Values.clickhouse.configmap.remote_servers.replica.password }}
- {{- end }}
- {{- if $.Values.clickhouse.configmap.remote_servers.replica.secure }}
- {{ $.Values.clickhouse.configmap.remote_servers.replica.secure }}
- {{- end }}
- {{- if $.Values.clickhouse.configmap.remote_servers.replica.compression }}
- {{ $.Values.clickhouse.configmap.remote_servers.replica.compression }}
- {{- end }}
-
- {{- if $.Values.clickhouse.configmap.remote_servers.replica.backup.enabled }}
-
- {{ include "clickhouse.fullname" $ }}-replica-{{ . }}.{{ include "clickhouse.fullname" $ }}-replica-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}
- {{ $.Values.clickhouse.tcp_port}}
- {{- if $.Values.clickhouse.configmap.remote_servers.replica.user }}
- {{ $.Values.clickhouse.configmap.remote_servers.replica.user }}
- {{- end }}
- {{- if $.Values.clickhouse.configmap.remote_servers.replica.password }}
- {{ $.Values.clickhouse.configmap.remote_servers.replica.password }}
- {{- end }}
- {{- if $.Values.clickhouse.configmap.remote_servers.replica.secure }}
- {{ $.Values.clickhouse.configmap.remote_servers.replica.secure }}
- {{- end }}
- {{- if $.Values.clickhouse.configmap.remote_servers.replica.compression }}
- {{ $.Values.clickhouse.configmap.remote_servers.replica.compression }}
- {{- end }}
-
- {{- end }}
-
- {{- end }}
- {{ include "clickhouse.fullname" . }}>
-
- {{- end }}
-
-{{- end }}
diff --git a/charts/clickhouse/templates/configmap-users.yaml b/charts/clickhouse/templates/configmap-users.yaml
deleted file mode 100755
index 99dbdc3c..00000000
--- a/charts/clickhouse/templates/configmap-users.yaml
+++ /dev/null
@@ -1,68 +0,0 @@
-{{- if .Values.clickhouse.configmap.enabled }}
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ include "clickhouse.fullname" . }}-users
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-users
- app.kubernetes.io/instance: {{ .Release.Name }}-users
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-data:
- users.xml: |-
-
-
- {{- if .Values.clickhouse.configmap.profiles.enabled }}
-
- {{- range .Values.clickhouse.configmap.profiles.profile }}
- {{- with . }}
- <{{ .name }}>
- {{- range $k_1, $v_1 := .config }}
- <{{ $k_1 }}>{{ $v_1 }}{{ $k_1 }}>
- {{- end }}
- {{ .name }}>
- {{- end }}
- {{- end }}
-
- {{- end }}
-
- {{- if .Values.clickhouse.configmap.users.enabled }}
-
- {{- range $key, $value := .Values.clickhouse.configmap.users.user }}
- {{- with . }}
- <{{ .name }}>
- {{- range $k_1, $v_1 := .config }}
- {{- if (eq "networks" $k_1) }}
-
- {{- range $v_1 }}
- {{- with .}}
- {{ . }}
- {{- end }}
- {{- end }}
-
- {{- else }}
- <{{ $k_1 }}>{{ $v_1 }}{{ $k_1 }}>
- {{- end }}
- {{- end }}
- {{ .name }}>
- {{- end }}
- {{- end }}
-
- {{- end }}
-
- {{- if .Values.clickhouse.configmap.quotas.enabled }}
-
- {{- range $key, $value := .Values.clickhouse.configmap.quotas.quota }}
- {{- with . }}
- <{{ .name }}>
- {{- range $val := .config }}
- {{- range $k_1, $v_1 := $val }}
- <{{ $k_1 }}>{{ $v_1 }}{{ $k_1 }}>
- {{- end }}
- {{- end }}
- {{ .name }}>
- {{- end }}
- {{- end }}
-
- {{- end }}
-
-{{- end }}
diff --git a/charts/clickhouse/templates/deployment-tabix.yaml b/charts/clickhouse/templates/deployment-tabix.yaml
deleted file mode 100755
index e3c9e453..00000000
--- a/charts/clickhouse/templates/deployment-tabix.yaml
+++ /dev/null
@@ -1,85 +0,0 @@
-{{- if .Values.tabix.enabled }}
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ include "clickhouse.fullname" . }}-tabix
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix
- app.kubernetes.io/instance: {{ .Release.Name }}-tabix
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- replicas: {{ .Values.tabix.replicas }}
- selector:
- matchLabels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix
- app.kubernetes.io/instance: {{ .Release.Name }}-tabix
-{{- if .Values.tabix.updateStrategy }}
- strategy:
- type: {{ .Values.tabix.updateStrategy.type }}
- rollingUpdate:
- maxSurge: {{ .Values.tabix.updateStrategy.maxSurge }}
- maxUnavailable: {{ .Values.tabix.updateStrategy.maxUnavailable }}
-{{- end }}
- template:
- metadata:
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix
- app.kubernetes.io/instance: {{ .Release.Name }}-tabix
- spec:
- {{- if .Values.affinity }}
- affinity:
-{{ toYaml .Values.affinity | indent 8 }}
- {{- end }}
- {{- if .Values.tabix.imagePullSecrets }}
- imagePullSecrets:
- {{- range .Values.tabix.imagePullSecrets }}
- - name: {{ . | quote }}
- {{- end }}
- {{- end }}
- containers:
- - name: {{ include "clickhouse.name" . }}-tabix
- image: {{ .Values.tabix.image }}:{{ .Values.tabix.imageVersion }}
- imagePullPolicy: {{ .Values.tabix.imagePullPolicy }}
- ports:
- - name: http
- containerPort: 80
- env:
- {{- if .Values.tabix.security }}
- - name: USER
- value: {{ .Values.tabix.security.user }}
- - name: PASSWORD
- value: {{ .Values.tabix.security.password }}
- {{- end }}
- {{- if .Values.tabix.automaticConnection }}
- - name: CH_NAME
- value: {{ .Values.tabix.automaticConnection.chName }}
- - name: CH_HOST
- value: {{ .Values.tabix.automaticConnection.chHost }}
- - name: CH_LOGIN
- value: {{ .Values.tabix.automaticConnection.chLogin }}
- - name: CH_PASSWORD
- value: {{ .Values.tabix.automaticConnection.chPassword }}
- - name: CH_PARAMS
- value: {{ .Values.tabix.automaticConnection.chParams }}
- {{- end }}
- {{- if .Values.tabix.livenessProbe.enabled }}
- livenessProbe:
- tcpSocket:
- port: 80
- initialDelaySeconds: {{ .Values.tabix.livenessProbe.initialDelaySeconds }}
- periodSeconds: {{ .Values.tabix.livenessProbe.periodSeconds }}
- timeoutSeconds: {{ .Values.tabix.livenessProbe.timeoutSeconds }}
- failureThreshold: {{ .Values.tabix.livenessProbe.failureThreshold }}
- successThreshold: {{ .Values.tabix.livenessProbe.successThreshold }}
- {{- end }}
- {{- if .Values.tabix.readinessProbe.enabled }}
- readinessProbe:
- tcpSocket:
- port: 80
- initialDelaySeconds: {{ .Values.tabix.readinessProbe.initialDelaySeconds }}
- periodSeconds: {{ .Values.tabix.readinessProbe.periodSeconds }}
- timeoutSeconds: {{ .Values.tabix.readinessProbe.timeoutSeconds }}
- failureThreshold: {{ .Values.tabix.readinessProbe.failureThreshold }}
- successThreshold: {{ .Values.tabix.readinessProbe.successThreshold }}
- {{- end }}
-{{- end }}
diff --git a/charts/clickhouse/templates/ingress-clickhouse.yaml b/charts/clickhouse/templates/ingress-clickhouse.yaml
deleted file mode 100755
index a4a672b7..00000000
--- a/charts/clickhouse/templates/ingress-clickhouse.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-{{- if .Values.clickhouse.ingress.enabled}}
-apiVersion: extensions/v1beta1
-kind: Ingress
-metadata:
- name: {{ include "clickhouse.fullname" . }}
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- rules:
- host: {{ .Values.clickhouse.ingress.host }}
- http:
- paths:
- - path: {{ .Values.clickhouse.ingress.path }}
- backend:
- serviceName: {{ include "clickhouse.fullname" . }}
- servicePort: http
-{{- if .Values.clickhouse.ingress.tls.enabled }}
- tls:
- hosts:
- {{- range .Values.clickhouse.ingress.tls.hosts }}
- - {{ . | quote }}
- {{- end }}
- secretName: {{ .Values.clickhouse.ingress.tls.secretName }}
-{{- end }}
-{{- end }}
diff --git a/charts/clickhouse/templates/ingress-tabix.yaml b/charts/clickhouse/templates/ingress-tabix.yaml
deleted file mode 100755
index 3b85c886..00000000
--- a/charts/clickhouse/templates/ingress-tabix.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
-{{- if .Values.tabix.enabled }}
-{{- if .Values.tabix.ingress.enabled}}
-apiVersion: extensions/v1beta1
-kind: Ingress
-metadata:
- name: {{ include "clickhouse.fullname" . }}-tabix
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix
- app.kubernetes.io/instance: {{ .Release.Name }}-tabix
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- rules:
- host: {{ .Values.tabix.ingress.host }}
- http:
- paths:
- - path: {{ .Values.tabix.ingress.path }}
- backend:
- serviceName: {{ include "clickhouse.fullname" . }}-tabix
- servicePort: http
-{{- if .Values.tabix.ingress.tls.enabled }}
- tls:
- hosts:
- {{- range .Values.tabix.ingress.tls.hosts }}
- - {{ . | quote }}
- {{- end }}
- secretName: {{ .Values.tabix.ingress.tls.secretName }}
-{{- end }}
-{{- end }}
-{{- end }}
diff --git a/charts/clickhouse/templates/statefulset-clickhouse-replica.yaml b/charts/clickhouse/templates/statefulset-clickhouse-replica.yaml
deleted file mode 100755
index 9b1db994..00000000
--- a/charts/clickhouse/templates/statefulset-clickhouse-replica.yaml
+++ /dev/null
@@ -1,184 +0,0 @@
-{{- if .Values.clickhouse.configmap.remote_servers.replica.backup.enabled }}
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
- name: {{ include "clickhouse.fullname" . }}-replica
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica
- app.kubernetes.io/instance: {{ .Release.Name }}-replica
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- replicas: {{ .Values.clickhouse.replicas }}
- podManagementPolicy: {{ .Values.clickhouse.podManagementPolicy }}
- updateStrategy:
- type: {{ .Values.clickhouse.updateStrategy }}
- {{- if (eq "Recreate" .Values.clickhouse.updateStrategy) }}
- rollingUpdate: null
- {{- else if .Values.clickhouse.rollingUpdatePartition }}
- rollingUpdate:
- partition: {{ .Values.clickhouse.rollingUpdatePartition }}
- {{- end }}
- serviceName: {{ include "clickhouse.fullname" . }}-replica-headless
- selector:
- matchLabels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica
- app.kubernetes.io/instance: {{ .Release.Name }}-replica
- template:
- metadata:
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica
- app.kubernetes.io/instance: {{ .Release.Name }}-replica
- spec:
- {{- if .Values.affinity }}
- affinity:
-{{ toYaml .Values.affinity | indent 8 }}
- {{- end }}
- {{- if .Values.clickhouse.imagePullSecrets }}
- imagePullSecrets:
- {{- range .Values.clickhouse.imagePullSecrets }}
- - name: {{ . | quote }}
- {{- end }}
- {{- end }}
- initContainers:
- - name: init
- image: busybox:1.31.0
- imagePullPolicy: IfNotPresent
- args:
- - /bin/sh
- - -c
- - |
- mkdir -p /etc/clickhouse-server/metrica.d
- containers:
- - name: {{ include "clickhouse.fullname" . }}-replica
- image: {{ .Values.clickhouse.image }}:{{ .Values.clickhouse.imageVersion }}
- imagePullPolicy: {{ .Values.clickhouse.imagePullPolicy }}
- ports:
- - name: http-port
- containerPort: {{ .Values.clickhouse.http_port | default "8123" }}
- - name: tcp-port
- containerPort: {{ .Values.clickhouse.tcp_port | default "9000" }}
- - name: inter-http-port
- containerPort: {{ .Values.clickhouse.interserver_http_port | default "9009" }}
- {{- if .Values.clickhouse.livenessProbe.enabled }}
- livenessProbe:
- tcpSocket:
- port: {{ .Values.clickhouse.tcp_port }}
- initialDelaySeconds: {{ .Values.clickhouse.livenessProbe.initialDelaySeconds }}
- periodSeconds: {{ .Values.clickhouse.livenessProbe.periodSeconds }}
- timeoutSeconds: {{ .Values.clickhouse.livenessProbe.timeoutSeconds }}
- failureThreshold: {{ .Values.clickhouse.livenessProbe.failureThreshold }}
- successThreshold: {{ .Values.clickhouse.livenessProbe.successThreshold }}
- {{- end }}
- {{- if .Values.clickhouse.readinessProbe.enabled }}
- readinessProbe:
- tcpSocket:
- port: {{ .Values.clickhouse.tcp_port }}
- initialDelaySeconds: {{ .Values.clickhouse.readinessProbe.initialDelaySeconds }}
- periodSeconds: {{ .Values.clickhouse.readinessProbe.periodSeconds }}
- timeoutSeconds: {{ .Values.clickhouse.readinessProbe.timeoutSeconds }}
- failureThreshold: {{ .Values.clickhouse.readinessProbe.failureThreshold }}
- successThreshold: {{ .Values.clickhouse.readinessProbe.successThreshold }}
- {{- end }}
- volumeMounts:
- - name: {{ include "clickhouse.fullname" . }}-replica-data
- mountPath: {{ include "clickhouse.fullpath" . }}
- - name: {{ include "clickhouse.fullname" . }}-replica-logs
- mountPath: {{ include "clickhouse.logpath" . }}
- - name: {{ include "clickhouse.fullname" . }}-config
- mountPath: /etc/clickhouse-server/config.d
- - name: {{ include "clickhouse.fullname" . }}-metrica
- mountPath: /etc/clickhouse-server/metrica.d
- - name: {{ include "clickhouse.fullname" . }}-users
- mountPath: /etc/clickhouse-server/users.d
- securityContext:
- privileged: true
- runAsUser: 0
- {{- if .Values.clickhouse.imagePullSecrets }}
- imagePullSecrets:
- - name: {{ .Values.clickhouse.imagePullSecrets }}
- {{- end }}
- {{- if .Values.clickhouse.nodeSelector }}
- nodeSelector:
-{{ toYaml .Values.clickhouse.nodeSelector | indent 8 }}
- {{- end }}
- volumes:
- - name: {{ include "clickhouse.fullname" . }}-replica-data
- {{- if .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled }}
- persistentVolumeClaim:
- claimName: {{ include "clickhouse.fullname" . }}-replica-data
- {{- else }}
- emptyDir: {}
- {{- end }}
- - name: {{ include "clickhouse.fullname" . }}-replica-logs
- {{- if .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled }}
- persistentVolumeClaim:
- claimName: {{ include "clickhouse.fullname" . }}-replica-logs
- {{- else }}
- emptyDir: {}
- {{- end }}
- {{- if .Values.clickhouse.configmap.enabled }}
- - name: {{ include "clickhouse.fullname" . }}-config
- configMap:
- name: {{ include "clickhouse.fullname" . }}-config
- items:
- - key: config.xml
- path: config.xml
- - name: {{ include "clickhouse.fullname" . }}-metrica
- configMap:
- name: {{ include "clickhouse.fullname" . }}-metrica
- items:
- - key: metrica.xml
- path: metrica.xml
- - name: {{ include "clickhouse.fullname" . }}-users
- configMap:
- name: {{ include "clickhouse.fullname" . }}-users
- items:
- - key: users.xml
- path: users.xml
- {{- end }}
-{{- if .Values.clickhouse.persistentVolumeClaim.enabled }}
- volumeClaimTemplates:
-{{- if .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled }}
- - metadata:
- name: {{ include "clickhouse.fullname" . }}-replica-data
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica-data
- app.kubernetes.io/instance: {{ .Release.Name }}-replica-data
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- spec:
- accessModes:
- {{- range .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.accessModes }}
- - {{ . | quote }}
- {{- end }}
- {{- if (eq "-" .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName) }}
- storageClassName: ""
- {{- else }}
- storageClassName: {{ .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName }}
- {{- end }}
- resources:
- requests:
- storage: {{ .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storage }}
-{{- end }}
-{{- if .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled }}
- - metadata:
- name: {{ include "clickhouse.fullname" . }}-replica-logs
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica-logs
- app.kubernetes.io/instance: {{ .Release.Name }}-replica-logs
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- spec:
- accessModes:
- {{- range .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.accessModes }}
- - {{ . | quote }}
- {{- end }}
- {{- if (eq "-" .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName) }}
- storageClassName: ""
- {{- else }}
- storageClassName: {{ .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName }}
- {{- end }}
- resources:
- requests:
- storage: {{ .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storage }}
-{{- end }}
-{{- end }}
-{{- end }}
diff --git a/charts/clickhouse/templates/statefulset-clickhouse.yaml b/charts/clickhouse/templates/statefulset-clickhouse.yaml
deleted file mode 100755
index ec871274..00000000
--- a/charts/clickhouse/templates/statefulset-clickhouse.yaml
+++ /dev/null
@@ -1,182 +0,0 @@
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
- name: {{ include "clickhouse.fullname" . }}
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- replicas: {{ .Values.clickhouse.replicas }}
- podManagementPolicy: {{ .Values.clickhouse.podManagementPolicy }}
- updateStrategy:
- type: {{ .Values.clickhouse.updateStrategy }}
- {{- if (eq "Recreate" .Values.clickhouse.updateStrategy) }}
- rollingUpdate: null
- {{- else if .Values.clickhouse.rollingUpdatePartition }}
- rollingUpdate:
- partition: {{ .Values.clickhouse.rollingUpdatePartition }}
- {{- end }}
- serviceName: {{ include "clickhouse.fullname" . }}-headless
- selector:
- matchLabels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- template:
- metadata:
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- spec:
- {{- if .Values.affinity }}
- affinity:
-{{ toYaml .Values.affinity | indent 8 }}
- {{- end }}
- {{- if .Values.clickhouse.imagePullSecrets }}
- imagePullSecrets:
- {{- range .Values.clickhouse.imagePullSecrets }}
- - name: {{ . | quote }}
- {{- end }}
- {{- end }}
- initContainers:
- - name: init
- image: busybox:1.31.0
- imagePullPolicy: IfNotPresent
- args:
- - /bin/sh
- - -c
- - |
- mkdir -p /etc/clickhouse-server/metrica.d
- containers:
- - name: {{ include "clickhouse.fullname" . }}
- image: {{ .Values.clickhouse.image }}:{{ .Values.clickhouse.imageVersion }}
- imagePullPolicy: {{ .Values.clickhouse.imagePullPolicy }}
- ports:
- - name: http-port
- containerPort: {{ .Values.clickhouse.http_port | default "8123" }}
- - name: tcp-port
- containerPort: {{ .Values.clickhouse.tcp_port | default "9000" }}
- - name: inter-http-port
- containerPort: {{ .Values.clickhouse.interserver_http_port | default "9009" }}
- {{- if .Values.clickhouse.livenessProbe.enabled }}
- livenessProbe:
- tcpSocket:
- port: {{ .Values.clickhouse.tcp_port }}
- initialDelaySeconds: {{ .Values.clickhouse.livenessProbe.initialDelaySeconds }}
- periodSeconds: {{ .Values.clickhouse.livenessProbe.periodSeconds }}
- timeoutSeconds: {{ .Values.clickhouse.livenessProbe.timeoutSeconds }}
- failureThreshold: {{ .Values.clickhouse.livenessProbe.failureThreshold }}
- successThreshold: {{ .Values.clickhouse.livenessProbe.successThreshold }}
- {{- end }}
- {{- if .Values.clickhouse.readinessProbe.enabled }}
- readinessProbe:
- tcpSocket:
- port: {{ .Values.clickhouse.tcp_port }}
- initialDelaySeconds: {{ .Values.clickhouse.readinessProbe.initialDelaySeconds }}
- periodSeconds: {{ .Values.clickhouse.readinessProbe.periodSeconds }}
- timeoutSeconds: {{ .Values.clickhouse.readinessProbe.timeoutSeconds }}
- failureThreshold: {{ .Values.clickhouse.readinessProbe.failureThreshold }}
- successThreshold: {{ .Values.clickhouse.readinessProbe.successThreshold }}
- {{- end }}
- volumeMounts:
- - name: {{ include "clickhouse.fullname" . }}-data
- mountPath: {{ include "clickhouse.fullpath" . }}
- - name: {{ include "clickhouse.fullname" . }}-logs
- mountPath: {{ include "clickhouse.logpath" . }}
- - name: {{ include "clickhouse.fullname" . }}-config
- mountPath: /etc/clickhouse-server/config.d
- - name: {{ include "clickhouse.fullname" . }}-metrica
- mountPath: /etc/clickhouse-server/metrica.d
- - name: {{ include "clickhouse.fullname" . }}-users
- mountPath: /etc/clickhouse-server/users.d
- securityContext:
- privileged: true
- runAsUser: 0
- {{- if .Values.clickhouse.imagePullSecrets }}
- imagePullSecrets:
- - name: {{ .Values.clickhouse.imagePullSecrets }}
- {{- end }}
- {{- if .Values.clickhouse.nodeSelector }}
- nodeSelector:
-{{ toYaml .Values.clickhouse.nodeSelector | indent 8 }}
- {{- end }}
- volumes:
- - name: {{ include "clickhouse.fullname" . }}-data
- {{- if .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled }}
- persistentVolumeClaim:
- claimName: {{ include "clickhouse.fullname" . }}-data
- {{- else }}
- emptyDir: {}
- {{- end }}
- - name: {{ include "clickhouse.fullname" . }}-logs
- {{- if .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled }}
- persistentVolumeClaim:
- claimName: {{ include "clickhouse.fullname" . }}-logs
- {{- else }}
- emptyDir: {}
- {{- end }}
- {{- if .Values.clickhouse.configmap.enabled }}
- - name: {{ include "clickhouse.fullname" . }}-config
- configMap:
- name: {{ include "clickhouse.fullname" . }}-config
- items:
- - key: config.xml
- path: config.xml
- - name: {{ include "clickhouse.fullname" . }}-metrica
- configMap:
- name: {{ include "clickhouse.fullname" . }}-metrica
- items:
- - key: metrica.xml
- path: metrica.xml
- - name: {{ include "clickhouse.fullname" . }}-users
- configMap:
- name: {{ include "clickhouse.fullname" . }}-users
- items:
- - key: users.xml
- path: users.xml
- {{- end }}
-{{- if .Values.clickhouse.persistentVolumeClaim.enabled }}
- volumeClaimTemplates:
-{{- if .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled }}
- - metadata:
- name: {{ include "clickhouse.fullname" . }}-data
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-data
- app.kubernetes.io/instance: {{ .Release.Name }}-data
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- spec:
- accessModes:
- {{- range .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.accessModes }}
- - {{ . | quote }}
- {{- end }}
- {{- if (eq "-" .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName) }}
- storageClassName: ""
- {{- else }}
- storageClassName: {{ .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName | quote }}
- {{- end }}
- resources:
- requests:
- storage: {{ .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storage | quote }}
-{{- end }}
-{{- if .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled }}
- - metadata:
- name: {{ include "clickhouse.fullname" . }}-logs
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-logs
- app.kubernetes.io/instance: {{ .Release.Name }}-logs
- app.kubernetes.io/managed-by: {{ .Release.Service }}
- spec:
- accessModes:
- {{- range .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.accessModes }}
- - {{ . | quote }}
- {{- end }}
- {{- if (eq "-" .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName) }}
- storageClassName: ""
- {{- else }}
- storageClassName: {{ .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName | quote }}
- {{- end }}
- resources:
- requests:
- storage: {{ .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storage | quote }}
-{{- end }}
-{{- end }}
diff --git a/charts/clickhouse/templates/svc-clickhouse-headless.yaml b/charts/clickhouse/templates/svc-clickhouse-headless.yaml
deleted file mode 100755
index 980c2773..00000000
--- a/charts/clickhouse/templates/svc-clickhouse-headless.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ include "clickhouse.fullname" . }}-headless
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-headless
- app.kubernetes.io/instance: {{ .Release.Name }}-headless
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- clusterIP: "None"
- ports:
- - port: {{ .Values.clickhouse.tcp_port }}
- targetPort: tcp-port
- protocol: TCP
- name: tcp-port
- - port: {{ .Values.clickhouse.http_port }}
- targetPort: http-port
- protocol: TCP
- name: http-port
- - port: {{ .Values.clickhouse.interserver_http_port }}
- targetPort: inter-http-port
- protocol: TCP
- name: inter-http-port
- selector:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/charts/clickhouse/templates/svc-clickhouse-replica-headless.yaml b/charts/clickhouse/templates/svc-clickhouse-replica-headless.yaml
deleted file mode 100755
index b26448d9..00000000
--- a/charts/clickhouse/templates/svc-clickhouse-replica-headless.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ include "clickhouse.fullname" . }}-replica-headless
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica-headless
- app.kubernetes.io/instance: {{ .Release.Name }}-replica-headless
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- clusterIP: "None"
- ports:
- - port: {{ .Values.clickhouse.tcp_port }}
- targetPort: tcp-port
- protocol: TCP
- name: tcp-port
- - port: {{ .Values.clickhouse.http_port }}
- targetPort: http-port
- protocol: TCP
- name: http-port
- - port: {{ .Values.clickhouse.interserver_http_port }}
- targetPort: inter-http-port
- protocol: TCP
- name: inter-http-port
- selector:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica
- app.kubernetes.io/instance: {{ .Release.Name }}-replica
diff --git a/charts/clickhouse/templates/svc-clickhouse-replica.yaml b/charts/clickhouse/templates/svc-clickhouse-replica.yaml
deleted file mode 100755
index b3fd7eef..00000000
--- a/charts/clickhouse/templates/svc-clickhouse-replica.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ include "clickhouse.fullname" . }}-replica
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica
- app.kubernetes.io/instance: {{ .Release.Name }}-replica
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- ports:
- - port: {{ .Values.clickhouse.tcp_port }}
- targetPort: tcp-port
- protocol: TCP
- name: tcp-port
- - port: {{ .Values.clickhouse.http_port }}
- targetPort: http-port
- protocol: TCP
- name: http-port
- - port: {{ .Values.clickhouse.interserver_http_port }}
- targetPort: inter-http-port
- protocol: TCP
- name: inter-http-port
- selector:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica
- app.kubernetes.io/instance: {{ .Release.Name }}-replica
diff --git a/charts/clickhouse/templates/svc-clickhouse.yaml b/charts/clickhouse/templates/svc-clickhouse.yaml
deleted file mode 100755
index b73c81a5..00000000
--- a/charts/clickhouse/templates/svc-clickhouse.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ include "clickhouse.fullname" . }}
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- ports:
- - port: {{ .Values.clickhouse.tcp_port }}
- targetPort: tcp-port
- protocol: TCP
- name: tcp-port
- - port: {{ .Values.clickhouse.http_port }}
- targetPort: http-port
- protocol: TCP
- name: http-port
- - port: {{ .Values.clickhouse.interserver_http_port }}
- targetPort: inter-http-port
- protocol: TCP
- name: inter-http-port
- selector:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}
- app.kubernetes.io/instance: {{ .Release.Name }}
diff --git a/charts/clickhouse/templates/svc-tabix.yaml b/charts/clickhouse/templates/svc-tabix.yaml
deleted file mode 100755
index 56df5caa..00000000
--- a/charts/clickhouse/templates/svc-tabix.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-{{- if .Values.tabix.enabled }}
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ include "clickhouse.fullname" . }}-tabix
- labels:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix
- app.kubernetes.io/instance: {{ .Release.Name }}-tabix
- app.kubernetes.io/managed-by: {{ .Release.Service }}
-spec:
- ports:
- - port: 80
- targetPort: http
- protocol: TCP
- name: http
- selector:
- app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix
- app.kubernetes.io/instance: {{ .Release.Name }}-tabix
-{{- end }}
diff --git a/charts/clickhouse/values.yaml b/charts/clickhouse/values.yaml
deleted file mode 100755
index 2989bd88..00000000
--- a/charts/clickhouse/values.yaml
+++ /dev/null
@@ -1,374 +0,0 @@
-## Timezone
-timezone: "Asia/Shanghai"
-
-## Cluster domain
-clusterDomain: "cluster.local"
-
-##
-## Clickhouse Node selectors and tolerations for pod assignment
-## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
-## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
-##
-# nodeSelector: {"beta.kubernetes.io/arch": "amd64"}
-# tolerations: []
-## Clickhouse pod/node affinity/anti-affinity
-##
-#affinity:
-# nodeAffinity:
-# requiredDuringSchedulingIgnoredDuringExecution:
-# nodeSelectorTerms:
-# - matchExpressions:
-# - key: "application/clickhouse"
-# operator: In
-# values:
-# - "true"
-
-clickhouse:
- ## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel
- ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy
- ##
- podManagementPolicy: "Parallel"
-
- ## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete
- ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets
- ##
- updateStrategy: "RollingUpdate"
-
- ## Partition update strategy
- ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions
- ##
- # rollingUpdatePartition:
-
- ##
- ## The path to the directory containing data.
- ## Default value: /var/lib/clickhouse
- path: "/var/lib/clickhouse"
- ##
- ## The port for connecting to the server over HTTP
- http_port: "8123"
- ##
- ## Port for communicating with clients over the TCP protocol.
- tcp_port: "9000"
- ##
- ## Port for exchanging data between ClickHouse servers.
- interserver_http_port: "9009"
- ##
- ## The instance number of Clickhouse
- replicas: "3"
- ## Clickhouse image configuration.
- image: "clickhouse/clickhouse-server"
- imageVersion: "23.6.2.18"
- imagePullPolicy: "IfNotPresent"
- #imagePullSecrets:
- ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
- ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
- livenessProbe:
- enabled: true
- initialDelaySeconds: "30"
- periodSeconds: "30"
- timeoutSeconds: "5"
- failureThreshold: "3"
- successThreshold: "1"
- ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
- ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
- readinessProbe:
- enabled: true
- initialDelaySeconds: "30"
- periodSeconds: "30"
- timeoutSeconds: "5"
- failureThreshold: "3"
- successThreshold: "1"
- ## volumeClaimTemplates is a list of claims that pods are allowed to reference.
- ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
- ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
- ## A claim in this list takes precedence over any volumes in the template, with the same name.
- persistentVolumeClaim:
- enabled: false
- ## Clickhouse data volume
- dataPersistentVolume:
- enabled: false
- accessModes:
- - "ReadWriteOnce"
- storageClassName: "-"
- storage: "500Gi"
- ## Clickhouse logs volume
- logsPersistentVolume:
- enabled: false
- accessModes:
- - "ReadWriteOnce"
- storageClassName: "-"
- storage: "50Gi"
- ##
- ## An API object that manages external access to the services in a cluster, typically HTTP.
- ## Ingress can provide load balancing, SSL termination and name-based virtual hosting.
- ingress:
- enabled: false
- # host: "clickhouse.domain.com"
- # path: "/"
- # tls:
- # enabled: false
- # hosts:
- # - "clickhouse.domain.com"
- # - "clickhouse.domain1.com"
- # secretName: "clickhouse-secret"
- ##
- ## Clickhouse config.xml and metrica.xml
- configmap:
- ##
- ## If Configmap's enabled is `true`, Custom config.xml and metrica.xml
- enabled: true
- ##
- ## The maximum number of inbound connections.
- max_connections: "4096"
- ##
- ## The number of seconds that ClickHouse waits for incoming requests before closing the connection.
- keep_alive_timeout: "3"
- ##
- ## The maximum number of simultaneously processed requests.
- max_concurrent_queries: "100"
- ##
- ## Cache size (in bytes) for uncompressed data used by table engines from the MergeTree.
- ## There is one shared cache for the server. Memory is allocated on demand. The cache is used if the option use_uncompressed_cache is enabled.
- ## The uncompressed cache is advantageous for very short queries in individual cases.
- uncompressed_cache_size: "8589934592"
- ##
- ## Approximate size (in bytes) of the cache of "marks" used by MergeTree.
- ## The cache is shared for the server and memory is allocated as needed. The cache size must be at least 5368709120.
- mark_cache_size: "5368709120"
- ##
- ## You can specify umask here (see "man umask"). Server will apply it on startup.
- ## Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
- umask: "022"
- ##
- ## Perform mlockall after startup to lower first queries latency and to prevent clickhouse executable from being paged out under high IO load.
- ## Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
- mlock_executable: false
- ##
- ## The interval in seconds before reloading built-in dictionaries.
- ## ClickHouse reloads built-in dictionaries every x seconds. This makes it possible to edit dictionaries "on the fly" without restarting the server.
- builtin_dictionaries_reload_interval: "3600"
- ##
- ## Maximum session timeout, in seconds.
- max_session_timeout: "3600"
- ##
- ## Default session timeout, in seconds.
- default_session_timeout: "60"
- ##
- ## Uncomment to disable ClickHouse internal DNS caching.
- disable_internal_dns_cache: "1"
- ##
- ## The maximum number of open files.
- ## We recommend using this option in Mac OS X, since the getrlimit() function returns an incorrect value.
- #max_open_files:
- ##
- ## The host name that can be used by other servers to access this server.
- ## If omitted, it is defined in the same way as the hostname-f command.
- ## Useful for breaking away from a specific network interface.
- #interserver_http_host:
- ##
- ## Logging settings.
- # path – The log path. Default value: /var/log/clickhouse-server.
- # level – Logging level. Acceptable values: trace, debug, information, warning, error. Default value: /var/log/clickhouse-server
- # size – Size of the file. Applies to loganderrorlog. Once the file reaches size, ClickHouse archives and renames it, and creates a new log file in its place.
- # count – The number of archived log files that ClickHouse stores.
- logger:
- path: "/var/log/clickhouse-server"
- level: "trace"
- size: "1000M"
- count: "10"
- ##
- ## Data compression settings.
- # min_part_size – The minimum size of a table part.
- # min_part_size_ratio – The ratio of the minimum size of a table part to the full size of the table.
- # method – Compression method. Acceptable values : lz4 or zstd(experimental).
- compression:
- enabled: false
- cases:
- - min_part_size: "10000000000"
- min_part_size_ratio: "0.01"
- method: "zstd"
- ##
- ## Contains settings that allow ClickHouse to interact with a ZooKeeper cluster.
- ## ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted.
- # node — ZooKeeper endpoint. You can set multiple endpoints.
- # session_timeout — Maximum timeout for the client session in milliseconds.
- # root — The znode that is used as the root for znodes used by the ClickHouse server. Optional.
- # identity — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional.
- zookeeper_servers:
- enabled: false
- session_timeout_ms: "30000"
- operation_timeout_ms: "10000"
- #root: "/path/to/zookeeper/node"
- #identity: "user:password"
- config:
- - index: ""
- host: ""
- port: ""
- ##
- ## Configuration of clusters used by the Distributed table engine.
- ## The parameters host, port, and optionally user, password, secure, compression are specified for each server:
- # host – The address of the remote server.
- # port – The TCP port for messenger activity ('tcp_port' in the config, usually set to 9000).
- # user – Name of the user for connecting to a remote server. Access is configured in the users.xml file. For more information, see the section "Access rights".
- # password – The password for connecting to a remote server (not masked).
- # secure - Use ssl for connection, usually you also should define port = 9440. Server should listen on 9440 and have correct certificates.
- # compression - Use data compression. Default value: true.
- remote_servers:
- enabled: true
- internal_replication: false
- replica:
- user: "default"
- #password: ""
- compression: true
- backup:
- enabled: true
- ##
- ## Sending data to Graphite.
- # interval – The interval for sending, in seconds.
- # timeout – The timeout for sending data, in seconds.
- # root_path – Prefix for keys.
- # metrics – Sending data from a :ref:system_tables-system.metrics table.
- # events – Sending data from a :ref:system_tables-system.events table.
- # asynchronous_metrics – Sending data from a :ref:system_tables-system.asynchronous_metrics table.
- ## You can configure multiple clauses. For instance, you can use this for sending different data at different intervals.
- graphite:
- enabled: false
- config:
- - timeout: "0.1"
- interval: "60"
- root_path: "one_min"
- metrics: true
- events: true
- events_cumulative: true
- asynchronous_metrics: true
- ##
- ## A settings profile is a collection of settings grouped under the same name.
- ## Each ClickHouse user has a profile.
- ## To apply all the settings in a profile, set the profile setting.
- ## More info: https://clickhouse.yandex/docs/en/operations/settings/settings_profiles/
- profiles:
- enabled: false
- profile:
- - name: "default"
- config:
- max_memory_usage: "10000000000"
- use_uncompressed_cache: "0"
- load_balancing: "random"
- ##
- ## The users section of the user.xml configuration file contains user settings.
- ## More info: https://clickhouse.yandex/docs/en/operations/settings/settings_users/
- users:
- enabled: false
- user:
- - name: "default"
- config:
- #password: ""
- networks:
- - "::/0"
- profile: "default"
- quota: "default"
- ##
- ## Quotas allow you to limit resource usage over a period of time, or simply track the use of resources.
- ## Quotas are set up in the user config. This is usually 'users.xml'.
- ## More info: https://clickhouse.yandex/docs/en/operations/quotas/
- quotas:
- enabled: false
- quota:
- - name: "default"
- config:
- - duration: "3600"
- queries: "0"
- errors: "0"
- result_rows: "0"
- read_rows: "0"
- execution_time: "0"
-
-##
-## Web interface for ClickHouse in the Tabix project.
-## Features:
-# Works with ClickHouse directly from the browser, without the need to install additional software.
-# Query editor with syntax highlighting.
-# Auto-completion of commands.
-# Tools for graphical analysis of query execution.
-# Color scheme options.
-tabix:
- ##
- ## Enable Tabix
- enabled: true
- ##
- ## ## The instance number of Tabix
- replicas: "1"
- ##
- ## The deployment strategy to use to replace existing pods with new ones.
- updateStrategy:
- ##
- ## Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
- type: "RollingUpdate"
- ##
- ## The maximum number of pods that can be scheduled above the desired number of pods.
- maxSurge: 3
- ##
- ## The maximum number of pods that can be unavailable during the update.
- maxUnavailable: 1
- ##
- ## Docker image name.
- image: "spoonest/clickhouse-tabix-web-client"
- ##
- ## Docker image version
- imageVersion: "stable"
- ##
- ## Image pull policy. One of Always, Never, IfNotPresent.
- ## Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated.
- ## More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
- imagePullPolicy: "IfNotPresent"
- ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
- ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
- livenessProbe:
- enabled: true
- initialDelaySeconds: "30"
- periodSeconds: "30"
- timeoutSeconds: "5"
- failureThreshold: "3"
- successThreshold: "1"
- ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
- ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
- readinessProbe:
- enabled: true
- initialDelaySeconds: "30"
- periodSeconds: "30"
- timeoutSeconds: "5"
- failureThreshold: "3"
- successThreshold: "1"
- ##
- ## ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
- ## If specified, these secrets will be passed to individual puller implementations for them to use.
- ## For example, in the case of docker, only DockerConfig type secrets are honored.
- ## More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
- #imagePullSecrets:
- ##
- ## You can limit access to your tabix.ui application on the proxy level.
- ## User and Password parameters to restrict access only for specified user.
- security:
- user: "admin"
- password: "admin"
- ##
- ## You can automatically connect to a Clickhouse server by specifying chName, chHost, chHost, chPassword and/or chParams environment variables.
- #automaticConnection:
- # chName: "test"
- # chHost: "test"
- # chLogin: "test"
- # chPassword: "test"
- # chParams: ""
- ##
- ## An API object that manages external access to the services in a cluster, typically HTTP.
- ## Ingress can provide load balancing, SSL termination and name-based virtual hosting.
- ingress:
- enabled: false
- # host: "tabix.domain.com"
- # path: "/"
- # tls:
- # enabled: false
- # hosts:
- # - "tabix.domain.com"
- # - "tabix.domain1.com"
- # secretName: "tabix-secret"
diff --git a/charts/client/Chart.yaml b/charts/client/Chart.yaml
index edfa51cb..024cd3bb 100644
--- a/charts/client/Chart.yaml
+++ b/charts/client/Chart.yaml
@@ -15,23 +15,24 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
-version: 1.0.2
+version: 1.1.26
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
-appVersion: "v1.0.0"
+appVersion: "v1.1.7"
dependencies:
- name: nats
condition: nats.enabled
- version: 0.13.4
- repository: https://intelops.github.io/kubviz/
+ version: 1.0.0
+ repository: https://kube-tarian.github.io/helmrepo-supporting-tools/
- name: clickhouse
condition: clickhouse.enabled
version: 1.0.2
- repository: https://intelops.github.io/kubviz/
+ repository: https://kube-tarian.github.io/helmrepo-supporting-tools/
- name: grafana
condition: grafana.enabled
- version: 1.0.3
+ version: 1.0.5
repository: https://kube-tarian.github.io/helmrepo-supporting-tools/
+
diff --git a/charts/client/templates/configmap-azure-dashboard.yaml b/charts/client/templates/configmap-azure-dashboard.yaml
new file mode 100644
index 00000000..98739e6a
--- /dev/null
+++ b/charts/client/templates/configmap-azure-dashboard.yaml
@@ -0,0 +1,512 @@
+{{- if .Values.dashboards.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "client.fullname" . }}-azure-dashboard
+ annotations:
+ grafana_folder: "Gitbridge"
+ labels:
+ {{ .Values.dashboards.label }}: {{ .Values.dashboards.labelValue | quote }}
+data:
+ azure.json: |-
+ {
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 53,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 2,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "// Check if data.series exists\nif (context.panel.data.series && context.panel.data.series.length > 0) {\n const eventTypes = context.panel.data.series[0].fields[0].values;\n const authors = context.panel.data.series[0].fields[1].values;\n const repoNames = context.panel.data.series[0].fields[2].values;\n const total = context.panel.data.series[0].fields[3].values; // Assuming you have a field named \"Total\"\n\n // Create nodes and links\n const nodes = [];\n const links = [];\n\n authors.forEach((author, index) => {\n const sourceNode = {\n name: author,\n category: 0, // Category for source nodes\n symbolSize: 60, // Size for source nodes\n };\n\n const eventTypeNode = {\n name: eventTypes[index],\n category: 1, // Category for eventType nodes\n symbolSize: 40, // Size for eventType nodes\n };\n\n const repoNode = {\n name: repoNames[index],\n category: 2, // Category for repo nodes\n symbolSize: 30, // Size for repo nodes\n };\n\n const totalNode = {\n name: `Total: ${total[index]}`, // Assuming you have an array \"total\"\n category: 3, // Category for total nodes\n symbolSize: 20, // Size for total nodes\n };\n\n // Ensure source, eventType, repo, and total nodes are unique before adding them\n if (!nodes.some((node) => node.name === sourceNode.name)) {\n nodes.push(sourceNode);\n }\n\n if (!nodes.some((node) => node.name === eventTypeNode.name)) {\n nodes.push(eventTypeNode);\n }\n\n if (!nodes.some((node) => node.name === repoNode.name)) {\n nodes.push(repoNode);\n }\n\n if (!nodes.some((node) => node.name === totalNode.name)) {\n nodes.push(totalNode);\n }\n\n // Create links between author, eventType, repo, and total nodes\n links.push({\n source: author,\n target: eventTypes[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: repoNames[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: totalNode.name,\n });\n });\n\n // Create categories for nodes\n const categories = [\n {\n name: 'Authors',\n },\n {\n name: 'Event Type',\n },\n {\n name: 'Repo Names',\n },\n {\n name: 'Total', // Add a category for \"Total\" nodes\n },\n ];\n\n // Create ECharts option\n const option = {\n tooltip: {\n trigger: 'item',\n formatter: '{b}',\n },\n legend: {\n x: 'left',\n data: ['Authors', 'Event Types', 'Repo Names', 'Total'], // Add \"Total\" to legend data\n },\n series: [\n {\n type: 'graph',\n layout: 'circular',\n roam: true,\n label: {\n show: true,\n textStyle: { // Add textStyle property to configure text style\n color: '#000', // Set the text color to a brighter color, such as white (#FFF)\n },\n },\n force: {\n repulsion: 100,\n gravity: 0.1,\n edgeLength: 150,\n },\n data: nodes,\n links: links,\n draggable: true,\n categories: categories,\n edgeSymbol: [\"none\", \"arrow\"],\n\n // Increase the size of arrow marks\n edgeSymbolSize: [12, 12], // Set the arrow size here\n lineStyle: {\n color: \"#000000\",\n curveness: 0,\n opacity: 0.3,\n },\n },\n ],\n };\n\n // Return the ECharts option\n return option;\n} else {\n // Display a custom message when data is not available\n const option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n\n return option;\n}",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"azure_devops\" \nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author IN ($Author)\nGROUP BY EventType, Author, RepoName",
+ "rawQuery": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"azure_devops\" \nWHERE TimeStamp >= toDateTime(1694534229) AND TimeStamp <= toDateTime(1694620629) AND EventType IN ('git.push') AND Author IN ('Anila Soman')\nGROUP BY EventType, Author, RepoName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Azure Contributions",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 10
+ },
+ "id": 5,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let options; // Initialize the options variable\n\nif (!context.panel.data || !context.panel.data.series || context.panel.data.series.length === 0 || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n options = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract Author and Push_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const pushEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Push_Events').values;\n\n // Create the ECharts options\n options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Push_Events'],\n orient: 'vertical', // Change the orientation to vertical\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: pushEvents,\n type: 'line',\n areaStyle: {\n color: 'rgba(0, 128, 255, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'blue', // Set the line color\n },\n name: 'Push_Events',\n },\n ],\n };\n}\n\nreturn options;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Push_Events\nFROM default.azure_devops\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'git.push'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Push_Events\nFROM default.azure_devops\nWHERE TimeStamp >= toDateTime(1694534188) AND TimeStamp <= toDateTime(1694620588) AND EventType = 'git.push'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of Azure push events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 10
+ },
+ "id": 6,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\n// Define a default options object\nconst defaultOptions = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n};\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = defaultOptions;\n} else {\n // Extract Author and Merge_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const mergeEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Merge_Events').values;\n\n // Create the ECharts options\n const options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Merge_Events'],\n orient: 'vertical',\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: mergeEvents,\n type: 'line',\n name: 'Merge_Events',\n areaStyle: {\n color: 'rgba(255, 0, 0, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'red', // Set the line color\n },\n },\n ],\n };\n\n option = options; // Assign the options to the outer variable\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Merge_Events\nFROM default.azure_devops \nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'git.pullrequest.merged'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Merge_Events\nFROM default.azure_devops \nWHERE TimeStamp >= toDateTime(1694534208) AND TimeStamp <= toDateTime(1694620608) AND EventType = 'git.pullrequest.merged'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of Azure Merge events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "light-red",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 18
+ },
+ "id": 3,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'git.push'",
+ "rawQuery": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE TimeStamp >= toDateTime(1694534146) AND TimeStamp <= toDateTime(1694620546) AND EventType = 'git.push'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Azure Push event counts",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "light-red",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 12,
+ "y": 18
+ },
+ "id": 4,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'git.pullrequest.merged'",
+ "rawQuery": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE TimeStamp >= toDateTime(1694534169) AND TimeStamp <= toDateTime(1694620569) AND EventType = 'git.pullrequest.merged'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Azure Merge events count",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 24
+ },
+ "id": 1,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.azure_devops\nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author IN ($Author)",
+ "rawQuery": "SELECT * FROM default.azure_devops\nWHERE TimeStamp >= toDateTime(1694534128) AND TimeStamp <= toDateTime(1694620528) AND EventType IN ('git.push') AND Author IN ('Anila Soman')",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Azure Events",
+ "type": "table"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "definition": "SELECT EventType FROM default.azure_devops",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "eventType",
+ "options": [],
+ "query": "SELECT EventType FROM default.azure_devops",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "definition": "SELECT Author FROM default.azure_devops",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "Author",
+ "options": [],
+ "query": "SELECT Author FROM default.azure_devops",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "Azure",
+ "uid": "dd66838a-ffda-4de2-944f-1828d1671fc9",
+ "version": 2,
+ "weekStart": ""
+ }
+
+{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/configmap-bitbucket-dashboard.yaml b/charts/client/templates/configmap-bitbucket-dashboard.yaml
new file mode 100644
index 00000000..3b2722db
--- /dev/null
+++ b/charts/client/templates/configmap-bitbucket-dashboard.yaml
@@ -0,0 +1,511 @@
+{{- if .Values.dashboards.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "client.fullname" . }}-bitbucket-dashboard
+ annotations:
+ grafana_folder: "Gitbridge"
+ labels:
+ {{ .Values.dashboards.label }}: {{ .Values.dashboards.labelValue | quote }}
+data:
+ bitbucket.json: |-
+ {
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 60,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 2,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "// Check if data.series exists\nif (context.panel.data.series && context.panel.data.series.length > 0) {\n const eventTypes = context.panel.data.series[0].fields[0].values;\n const authors = context.panel.data.series[0].fields[1].values;\n const repoNames = context.panel.data.series[0].fields[2].values;\n const total = context.panel.data.series[0].fields[3].values; // Assuming you have a field named \"Total\"\n\n // Create nodes and links\n const nodes = [];\n const links = [];\n\n authors.forEach((author, index) => {\n const sourceNode = {\n name: author,\n category: 0, // Category for source nodes\n symbolSize: 60, // Size for source nodes\n };\n\n const eventTypeNode = {\n name: eventTypes[index],\n category: 1, // Category for eventType nodes\n symbolSize: 40, // Size for eventType nodes\n };\n\n const repoNode = {\n name: repoNames[index],\n category: 2, // Category for repo nodes\n symbolSize: 30, // Size for repo nodes\n };\n\n const totalNode = {\n name: `Total: ${total[index]}`, // Assuming you have an array \"total\"\n category: 3, // Category for total nodes\n symbolSize: 20, // Size for total nodes\n };\n\n // Ensure source, eventType, repo, and total nodes are unique before adding them\n if (!nodes.some((node) => node.name === sourceNode.name)) {\n nodes.push(sourceNode);\n }\n\n if (!nodes.some((node) => node.name === eventTypeNode.name)) {\n nodes.push(eventTypeNode);\n }\n\n if (!nodes.some((node) => node.name === repoNode.name)) {\n nodes.push(repoNode);\n }\n\n if (!nodes.some((node) => node.name === totalNode.name)) {\n nodes.push(totalNode);\n }\n\n // Create links between author, eventType, repo, and total nodes\n links.push({\n source: author,\n target: eventTypes[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: repoNames[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: totalNode.name,\n });\n });\n\n // Create categories for nodes\n const categories = [\n {\n name: 'Authors',\n },\n {\n name: 'Event Type',\n },\n {\n name: 'Repo Names',\n },\n {\n name: 'Total', // Add a category for \"Total\" nodes\n },\n ];\n\n // Create ECharts option\n const option = {\n tooltip: {\n trigger: 'item',\n formatter: '{b}',\n },\n legend: {\n x: 'left',\n data: ['Authors', 'Event Types', 'Repo Names', 'Total'], // Add \"Total\" to legend data\n },\n series: [\n {\n type: 'graph',\n layout: 'circular',\n roam: true,\n label: {\n show: true,\n textStyle: { // Add textStyle property to configure text style\n color: '#000', // Set the text color to a brighter color, such as white (#FFF)\n },\n },\n force: {\n repulsion: 100,\n gravity: 0.1,\n edgeLength: 150,\n },\n data: nodes,\n links: links,\n draggable: true,\n categories: categories,\n edgeSymbol: [\"none\", \"arrow\"],\n\n // Increase the size of arrow marks\n edgeSymbolSize: [12, 12], // Set the arrow size here\n lineStyle: {\n color: \"#000000\",\n curveness: 0,\n opacity: 0.3,\n },\n },\n ],\n };\n\n // Return the ECharts option\n return option;\n} else {\n // Display a custom message when data is not available\n const option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n\n return option;\n}",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"bitbucket\" \nWHERE $timeFilterByColumn(TimeStamp) AND EventType In ($eventType) AND Author IN ($Author)\nGROUP BY EventType, Author, RepoName",
+ "rawQuery": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"bitbucket\" \nWHERE TimeStamp >= toDateTime(1694536440) AND TimeStamp <= toDateTime(1694622840) AND EventType In ('repo:push','pullrequest:created','pullrequest:fulfilled') AND Author IN ('')\nGROUP BY EventType, Author, RepoName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "BitBucket Events",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 9
+ },
+ "id": 5,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let options; // Initialize the options variable\n\nif (!context.panel.data || !context.panel.data.series || context.panel.data.series.length === 0 || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n options = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract Author and Push_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const pushEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Push_Events').values;\n\n // Create the ECharts options\n options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Push_Events'],\n orient: 'vertical', // Change the orientation to vertical\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: pushEvents,\n type: 'line',\n areaStyle: {\n color: 'rgba(0, 128, 255, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'blue', // Set the line color\n },\n name: 'Push_Events',\n },\n ],\n };\n}\n\nreturn options;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Push_Events\nFROM default.bitbucket\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'repo:push'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Push_Events\nFROM default.bitbucket\nWHERE TimeStamp >= toDateTime(1694536383) AND TimeStamp <= toDateTime(1694622783) AND EventType = 'repo:push'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of BitBucket Push events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 9
+ },
+ "id": 6,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\n// Define a default options object\nconst defaultOptions = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n};\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = defaultOptions;\n} else {\n // Extract Author and Merge_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const mergeEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Merge_Events').values;\n\n // Create the ECharts options\n const options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Merge_Events'],\n orient: 'vertical',\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: mergeEvents,\n type: 'line',\n name: 'Merge_Events',\n areaStyle: {\n color: 'rgba(255, 0, 0, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'red', // Set the line color\n },\n },\n ],\n };\n\n option = options; // Assign the options to the outer variable\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Merge_Events\nFROM default.bitbucket\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'pullrequest:fulfilled'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Merge_Events\nFROM default.bitbucket\nWHERE TimeStamp >= toDateTime(1694536421) AND TimeStamp <= toDateTime(1694622821) AND EventType = 'pullrequest:fulfilled'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of BitBucket Merge events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "light-blue",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 17
+ },
+ "id": 3,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'repo:push'",
+ "rawQuery": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE TimeStamp >= toDateTime(1694536343) AND TimeStamp <= toDateTime(1694622743) AND EventType = 'repo:push'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "BitBucket Push Events Count",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "light-blue",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 12,
+ "y": 17
+ },
+ "id": 4,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'pullrequest:fulfilled'",
+ "rawQuery": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE TimeStamp >= toDateTime(1694536364) AND TimeStamp <= toDateTime(1694622764) AND EventType = 'pullrequest:fulfilled'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "BitBucket Merge events count",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 24,
+ "x": 0,
+ "y": 23
+ },
+ "id": 1,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.bitbucket\nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author In ($Author)",
+ "rawQuery": "SELECT * FROM default.bitbucket\nWHERE TimeStamp >= toDateTime(1694536323) AND TimeStamp <= toDateTime(1694622723) AND EventType IN ('repo:push','pullrequest:created','pullrequest:fulfilled') AND Author In ('')",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "BitBucket Events",
+ "type": "table"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "definition": "SELECT EventType FROM default.bitbucket",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "eventType",
+ "options": [],
+ "query": "SELECT EventType FROM default.bitbucket",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "definition": "SELECT Author FROM default.bitbucket",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "Author",
+ "options": [],
+ "query": "SELECT Author FROM default.bitbucket",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "BitBucket",
+ "uid": "a7772dd5-76c7-48f3-8462-b39fbc20941c",
+ "version": 2,
+ "weekStart": ""
+ }
+
+{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/configmap-clickhouse-datasource.yaml b/charts/client/templates/configmap-clickhouse-datasource.yaml
index fac0f087..4052616a 100644
--- a/charts/client/templates/configmap-clickhouse-datasource.yaml
+++ b/charts/client/templates/configmap-clickhouse-datasource.yaml
@@ -13,6 +13,25 @@ data:
type: grafana-clickhouse-datasource
jsonData:
port: 9000
- server: kubviz-client-clickhouse
+ {{- if .Values.clickhouse.enabled }}
+ server: {{ include "client.fullname" . }}-clickhouse
tlsSkipVerify: true
+ username: {{ .Values.clickhouse.user }}
+ secureJsonData:
+ password: {{ .Values.clickhouse.password }}
+ {{- else }}
+ server: {{ .Values.existingClickhouse.host }}
+ tlsSkipVerify: true
+ {{- if not .Values.existingClickhouse.secret }}
+ username: {{ .Values.existingClickhouse.username }}
+ {{- else }}
+ username: $CLICKHOUSE_USERNAME
+ {{- end }}
+ secureJsonData:
+ {{- if not .Values.existingClickhouse.secret }}
+ password: {{ .Values.existingClickhouse.password }}
+ {{- else }}
+ password: $CLICKHOUSE_PASSWORD
+ {{- end }}
+ {{- end }}
{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/configmap-containerbridge-dashboard.yaml b/charts/client/templates/configmap-containerbridge-dashboard.yaml
new file mode 100644
index 00000000..fa45e2e5
--- /dev/null
+++ b/charts/client/templates/configmap-containerbridge-dashboard.yaml
@@ -0,0 +1,341 @@
+{{- if .Values.dashboards.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "client.fullname" . }}-containerbridge-dashboard
+ annotations:
+ grafana_folder: "Containerbridge"
+ labels:
+ {{ .Values.dashboards.label }}: {{ .Values.dashboards.labelValue | quote }}
+data:
+ containerbridge.json: |-
+ {
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 10,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 4,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.quaycontainerpush\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime",
+ "rawQuery": "SELECT * FROM default.quaycontainerpush\nWHERE EventTime >= toDateTime(1694165676) AND EventTime <= toDateTime(1694252076)\nORDER BY EventTime",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Quay Container Registry",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 8
+ },
+ "id": 3,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.jfrogcontainerpush\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.jfrogcontainerpush\nWHERE EventTime >= toDateTime(1694226084) AND EventTime <= toDateTime(1694247684)\nORDER BY EventTime DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "JFrog Container Registry",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 16
+ },
+ "id": 2,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.azurecontainerpush\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.azurecontainerpush\nWHERE EventTime >= toDateTime(1694165623) AND EventTime <= toDateTime(1694252023)\nORDER BY EventTime DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Azure Container Registry",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 24
+ },
+ "id": 1,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.dockerhubbuild\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC\n",
+ "rawQuery": "SELECT * FROM default.dockerhubbuild\nWHERE EventTime >= toDateTime(1694165564) AND EventTime <= toDateTime(1694251964)\nORDER BY EventTime DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "DockerHub",
+ "type": "table"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "Container-Bridge",
+ "uid": "cf8cf066-b241-48c8-9e3d-863eed33bcf3",
+ "version": 2,
+ "weekStart": ""
+ }
+{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/configmap-features-dashboard.yaml b/charts/client/templates/configmap-features-dashboard.yaml
index 031aa7aa..27ef8634 100644
--- a/charts/client/templates/configmap-features-dashboard.yaml
+++ b/charts/client/templates/configmap-features-dashboard.yaml
@@ -3,6 +3,8 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "client.fullname" . }}-features-dashboard
+ annotations:
+ grafana_folder: "Kubviz"
labels:
{{ .Values.dashboards.label }}: {{ .Values.dashboards.labelValue | quote }}
data:
@@ -30,7 +32,7 @@ data:
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
- "id": 10,
+ "id": 2,
"links": [],
"liveNow": false,
"panels": [
@@ -47,7 +49,9 @@ data:
},
"custom": {
"align": "center",
- "displayMode": "color-text",
+ "cellOptions": {
+ "type": "color-text"
+ },
"filterable": true,
"inspect": false
},
@@ -76,7 +80,9 @@ data:
},
"id": 8,
"options": {
+ "cellHeight": "sm",
"footer": {
+ "countRows": false,
"fields": "",
"reducer": [
"sum"
@@ -85,7 +91,7 @@ data:
},
"showHeader": true
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -97,15 +103,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.getall_resources",
- "rawQuery": "SELECT * FROM default.getall_resources",
+ "query": "SELECT * FROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.getall_resources\nWHERE EventTime >= toDateTime(1694219529) AND EventTime <= toDateTime(1694241129)\nORDER BY EventTime DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Kubernetes Resources",
- "transparent": true,
"type": "table"
},
{
@@ -121,7 +126,9 @@ data:
},
"custom": {
"align": "center",
- "displayMode": "color-text",
+ "cellOptions": {
+ "type": "color-text"
+ },
"filterable": true,
"inspect": false
},
@@ -150,7 +157,9 @@ data:
},
"id": 6,
"options": {
+ "cellHeight": "sm",
"footer": {
+ "countRows": false,
"fields": "",
"reducer": [
"sum"
@@ -159,7 +168,7 @@ data:
},
"showHeader": true
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -171,15 +180,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.outdated_images\nWHERE VersionsBehind > 0",
- "rawQuery": "SELECT * FROM default.outdated_images\nWHERE VersionsBehind > 0",
+ "query": "SELECT * FROM default.outdated_images\nWHERE $timeFilterByColumn(EventTime) AND VersionsBehind > 0\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.outdated_images\nWHERE EventTime >= toDateTime(1694219614) AND EventTime <= toDateTime(1694241214) AND VersionsBehind > 0\nORDER BY EventTime DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Outdated Images",
- "transparent": true,
"type": "table"
},
{
@@ -195,7 +203,9 @@ data:
},
"custom": {
"align": "center",
- "displayMode": "color-text",
+ "cellOptions": {
+ "type": "color-text"
+ },
"filterable": true,
"inspect": false
},
@@ -224,7 +234,9 @@ data:
},
"id": 4,
"options": {
+ "cellHeight": "sm",
"footer": {
+ "countRows": false,
"fields": "",
"reducer": [
"sum"
@@ -233,7 +245,7 @@ data:
},
"showHeader": true
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -245,15 +257,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.DeletedAPIs",
- "rawQuery": "SELECT * FROM default.DeletedAPIs",
+ "query": "SELECT * FROM default.DeletedAPIs\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.DeletedAPIs\nWHERE EventTime >= toDateTime(1694219743) AND EventTime <= toDateTime(1694241343)\nORDER BY EventTime DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "DeletedAPIs",
- "transparent": true,
"type": "table"
},
{
@@ -269,7 +280,9 @@ data:
},
"custom": {
"align": "center",
- "displayMode": "color-text",
+ "cellOptions": {
+ "type": "color-text"
+ },
"filterable": true,
"inspect": false
},
@@ -298,7 +311,9 @@ data:
},
"id": 2,
"options": {
+ "cellHeight": "sm",
"footer": {
+ "countRows": false,
"fields": "",
"reducer": [
"sum"
@@ -307,7 +322,7 @@ data:
},
"showHeader": true
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -319,26 +334,26 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.DeprecatedAPIs",
- "rawQuery": "SELECT * FROM default.DeprecatedAPIs",
+ "query": "SELECT * FROM default.DeprecatedAPIs\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.DeprecatedAPIs\nWHERE EventTime >= toDateTime(1694219780) AND EventTime <= toDateTime(1694241380)\nORDER BY EventTime DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "DeprecatedAPIs",
- "transparent": true,
"type": "table"
}
],
- "schemaVersion": 37,
+ "refresh": "",
+ "schemaVersion": 38,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
- "from": "now-6h",
+ "from": "now-24h",
"to": "now"
},
"timepicker": {},
diff --git a/charts/client/templates/configmap-gitbridge-dashboard.yaml b/charts/client/templates/configmap-gitbridge-dashboard.yaml
deleted file mode 100644
index d2917553..00000000
--- a/charts/client/templates/configmap-gitbridge-dashboard.yaml
+++ /dev/null
@@ -1,1752 +0,0 @@
-{{- if .Values.dashboards.enabled }}
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ include "client.fullname" . }}-gitbridge-dashboard
- labels:
- {{ .Values.dashboards.label }}: {{ .Values.dashboards.labelValue | quote }}
-data:
- gitbridge.json: |-
- {
- "annotations": {
- "list": [
- {
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "target": {
- "limit": 100,
- "matchAny": false,
- "tags": [],
- "type": "dashboard"
- },
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "fiscalYearStartMonth": 0,
- "graphTooltip": 0,
- "id": 8,
- "links": [],
- "liveNow": false,
- "panels": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "description": "This panel displays the total number of merge request done from all the Git providers",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 12,
- "x": 0,
- "y": 0
- },
- "id": 4,
- "options": {
- "colorMode": "background",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "8.4.6",
- "targets": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "intervalFactor": 1,
- "query": "SELECT count(*) AS GitHub FROM default.github\nWHERE EventType = 'pull_request'",
- "rawQuery": "SELECT count(*) AS GitHub FROM default.github\nWHERE EventType = 'pull_request'",
- "refId": "A",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS GitLab FROM default.gitlab\nWHERE EventType = 'Merge Request Hook'",
- "rawQuery": "SELECT count(*) AS GitLab FROM default.gitlab\nWHERE EventType = 'Merge Request Hook'",
- "refId": "B",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE EventType = 'pullrequest:fulfilled'",
- "rawQuery": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE EventType = 'pullrequest:fulfilled'",
- "refId": "C",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Gitea FROM default.gitea\nWHERE EventType = 'pull_request'",
- "rawQuery": "SELECT count(*) AS Gitea FROM default.gitea\nWHERE EventType = 'pull_request'",
- "refId": "D",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE EventType = 'git.pullrequest.merged'",
- "rawQuery": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE EventType = 'git.pullrequest.merged'",
- "refId": "E",
- "round": "0s",
- "skip_comments": true
- }
- ],
- "title": "Number of Merge events from all Git providers",
- "transparent": true,
- "type": "stat"
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "description": "This panel displays the total number push events triggered from different Git Providers",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "#7c766c",
- "mode": "palette-classic"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 12,
- "x": 12,
- "y": 0
- },
- "id": 2,
- "options": {
- "colorMode": "background",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "8.4.6",
- "targets": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "intervalFactor": 1,
- "query": "SELECT count(*) AS GitHub FROM default.github\nWHERE EventType = 'push'",
- "rawQuery": "SELECT count(*) AS GitHub FROM default.github\nWHERE EventType = 'push'",
- "refId": "A",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) Gitlab FROM default.gitlab\nWHERE EventType = 'Push Hook'",
- "rawQuery": "SELECT count(*) Gitlab FROM default.gitlab\nWHERE EventType = 'Push Hook'",
- "refId": "B",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS GiTea FROM default.gitea\nWHERE EventType = 'push'",
- "rawQuery": "SELECT count(*) AS GiTea FROM default.gitea\nWHERE EventType = 'push'",
- "refId": "C",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE EventType = 'repo:push'",
- "rawQuery": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE EventType = 'repo:push'",
- "refId": "D",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE EventType = 'git.push'",
- "rawQuery": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE EventType = 'git.push'",
- "refId": "E",
- "round": "0s",
- "skip_comments": true
- }
- ],
- "title": "Number of Push Events from all Git Providers",
- "transparent": true,
- "type": "stat"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of Push events on Azure by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "#17bcc1",
- "mode": "fixed"
- },
- "custom": {
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 5
- },
- "id": 36,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom"
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Azure_Push_Events\nFROM default.azure_devops\nWHERE EventType = 'git.push'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "Azure Push Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of Merge events on Azure by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "#17bcc1",
- "mode": "fixed"
- },
- "custom": {
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 5
- },
- "id": 34,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom"
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Azure_Merge_Events\nFROM default.azure_devops\nWHERE EventType = 'git.pullrequest.merged'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "Azure Merge Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of Push events on BitBucket by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "#e8f808",
- "mode": "fixed"
- },
- "custom": {
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 13
- },
- "id": 28,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom"
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Push_Events\nFROM default.bitbucket\nWHERE EventType = 'repo:push'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "BitBucket Push Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of Merge events on BitBucket by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "#e8f808",
- "mode": "fixed"
- },
- "custom": {
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 13
- },
- "id": 30,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom"
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Merge_Events\nFROM default.bitbucket\nWHERE EventType = 'pullrequest:fulfilled'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "BitBucket Merge Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of Push events on GiTea by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "#f46565",
- "mode": "fixed"
- },
- "custom": {
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 21
- },
- "id": 24,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom"
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Push_Events\nFROM default.gitea\nWHERE EventType = 'push'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GiTea Push Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of Merge events on GiTea by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "#f46565",
- "mode": "fixed"
- },
- "custom": {
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 21
- },
- "id": 26,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom"
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "builderOptions": {
- "database": "default",
- "fields": [],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [],
- "table": ""
- },
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "database": "default",
- "fields": [],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [],
- "table": ""
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Merge_Events\nFROM default.gitea\nWHERE EventType = 'pull_request'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GiTea Merge Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of push events on GitLab by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "light-blue",
- "mode": "fixed"
- },
- "custom": {
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 0,
- "y": 29
- },
- "id": 20,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom"
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Push_Events\nFROM default.gitlab\nWHERE EventType = 'Push Hook'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GitLab Push Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of merge events on GitLab by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "light-blue",
- "mode": "fixed"
- },
- "custom": {
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 12,
- "y": 29
- },
- "id": 22,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom"
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Merge_Events\nFROM default.gitlab\nWHERE EventType = 'Merge Request Hook'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GitLab Merge Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of push events on Github by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisLabel": "",
- "axisPlacement": "left",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 0,
- "y": 39
- },
- "id": 16,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom"
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Push_Events\nFROM default.github\nWHERE EventType = 'push'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "Github Push Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of merge events on Github by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 12,
- "y": 39
- },
- "id": 18,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom"
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Merge_Events\nFROM default.github\nWHERE EventType = 'pull_request'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "Github Merge Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "This panel displays all the events occured in the Azure Procider",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "custom": {
- "align": "center",
- "displayMode": "color-text",
- "filterable": true
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 7,
- "w": 24,
- "x": 0,
- "y": 49
- },
- "id": 14,
- "options": {
- "footer": {
- "fields": "",
- "reducer": [
- "sum"
- ],
- "show": false
- },
- "showHeader": true
- },
- "pluginVersion": "8.4.6",
- "targets": [
- {
- "builderOptions": {
- "database": "default",
- "fields": [
- "Author",
- "CommitID",
- "CommitUrl",
- "EventType",
- "RepoName",
- "TimeStamp"
- ],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [
- {
- "dir": "DESC",
- "name": "TimeStamp"
- }
- ],
- "table": "azure_devops"
- },
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT Author, CommitID, CommitUrl, EventType, RepoName, TimeStamp FROM default.\"azure_devops\" ORDER BY TimeStamp DESC LIMIT 100",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "Azure",
- "transparent": true,
- "type": "table"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "This panel displays all the events occured in the BitBucket Providers",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "custom": {
- "align": "center",
- "displayMode": "color-text",
- "filterable": true
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 56
- },
- "id": 12,
- "options": {
- "footer": {
- "fields": "",
- "reducer": [
- "sum"
- ],
- "show": false
- },
- "showHeader": true
- },
- "pluginVersion": "8.4.6",
- "targets": [
- {
- "builderOptions": {
- "database": "default",
- "fields": [
- "Author",
- "CommitID",
- "CommitUrl",
- "EventType",
- "RepoName",
- "TimeStamp"
- ],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [
- {
- "dir": "DESC",
- "name": "TimeStamp"
- }
- ],
- "table": "bitbucket"
- },
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT Author, CommitID, CommitUrl, EventType, RepoName, TimeStamp FROM default.\"bitbucket\" ORDER BY TimeStamp DESC LIMIT 100",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "BitBucket",
- "transparent": true,
- "type": "table"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "This panel displays all the events occured in the GiTea provider",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "custom": {
- "align": "center",
- "displayMode": "color-text",
- "filterable": true
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 9,
- "w": 24,
- "x": 0,
- "y": 64
- },
- "id": 10,
- "options": {
- "footer": {
- "fields": "",
- "reducer": [
- "sum"
- ],
- "show": false
- },
- "showHeader": true
- },
- "pluginVersion": "8.4.6",
- "targets": [
- {
- "builderOptions": {
- "database": "default",
- "fields": [
- "Author",
- "CommitID",
- "CommitUrl",
- "EventType",
- "RepoName",
- "TimeStamp"
- ],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [
- {
- "dir": "DESC",
- "name": "TimeStamp"
- }
- ],
- "table": "gitea"
- },
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT Author, CommitID, CommitUrl, EventType, RepoName, TimeStamp FROM default.\"gitea\" ORDER BY TimeStamp DESC LIMIT 100",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GiTea",
- "transparent": true,
- "type": "table"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "This panel displays all the events occured in the GitLab provider.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "custom": {
- "align": "center",
- "displayMode": "color-text",
- "filterable": true
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 73
- },
- "id": 8,
- "options": {
- "footer": {
- "fields": "",
- "reducer": [
- "sum"
- ],
- "show": false
- },
- "showHeader": true
- },
- "pluginVersion": "8.4.6",
- "targets": [
- {
- "builderOptions": {
- "database": "default",
- "fields": [
- "Author",
- "CommitID",
- "CommitUrl",
- "EventType",
- "RepoName",
- "TimeStamp"
- ],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [
- {
- "dir": "DESC",
- "name": "TimeStamp"
- }
- ],
- "table": "gitlab"
- },
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT Author, CommitID, CommitUrl, EventType, RepoName, TimeStamp FROM default.\"gitlab\" ORDER BY TimeStamp DESC LIMIT 100",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GitLab",
- "transparent": true,
- "type": "table"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "This panel displays all the events occured in the GitHub provider.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "custom": {
- "align": "center",
- "displayMode": "color-text",
- "filterable": true
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 9,
- "w": 24,
- "x": 0,
- "y": 81
- },
- "id": 6,
- "options": {
- "footer": {
- "fields": "",
- "reducer": [
- "sum"
- ],
- "show": false
- },
- "showHeader": true
- },
- "pluginVersion": "8.4.6",
- "targets": [
- {
- "builderOptions": {
- "database": "default",
- "fields": [
- "Author",
- "CommitID",
- "CommitUrl",
- "EventType",
- "RepoName",
- "TimeStamp"
- ],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [
- {
- "dir": "DESC",
- "name": "TimeStamp"
- }
- ],
- "table": "github"
- },
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT Author, CommitID, CommitUrl, EventType, RepoName, TimeStamp FROM default.\"github\" ORDER BY TimeStamp DESC LIMIT 100",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GitHub",
- "transparent": true,
- "type": "table"
- }
- ],
- "schemaVersion": 35,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": []
- },
- "time": {
- "from": "now-6h",
- "to": "now"
- },
- "timepicker": {},
- "timezone": "",
- "title": "GitBridge",
- "uid": "u3EJcUqVk",
- "version": 15,
- "weekStart": ""
- }
-{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/configmap-gitea-dashboard.yaml b/charts/client/templates/configmap-gitea-dashboard.yaml
new file mode 100644
index 00000000..5d53ed2b
--- /dev/null
+++ b/charts/client/templates/configmap-gitea-dashboard.yaml
@@ -0,0 +1,516 @@
+{{- if .Values.dashboards.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "client.fullname" . }}-gitea-dashboard
+ annotations:
+ grafana_folder: "Gitbridge"
+ labels:
+ {{ .Values.dashboards.label }}: {{ .Values.dashboards.labelValue | quote }}
+data:
+ gitea.json: |-
+ {
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 61,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 2,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "// Check if data.series exists\nif (context.panel.data.series && context.panel.data.series.length > 0) {\n const eventTypes = context.panel.data.series[0].fields[0].values;\n const authors = context.panel.data.series[0].fields[1].values;\n const repoNames = context.panel.data.series[0].fields[2].values;\n const total = context.panel.data.series[0].fields[3].values; // Assuming you have a field named \"Total\"\n\n // Create nodes and links\n const nodes = [];\n const links = [];\n\n authors.forEach((author, index) => {\n const sourceNode = {\n name: author,\n category: 0, // Category for source nodes\n symbolSize: 60, // Size for source nodes\n };\n\n const eventTypeNode = {\n name: eventTypes[index],\n category: 1, // Category for eventType nodes\n symbolSize: 40, // Size for eventType nodes\n };\n\n const repoNode = {\n name: repoNames[index],\n category: 2, // Category for repo nodes\n symbolSize: 30, // Size for repo nodes\n };\n\n const totalNode = {\n name: `Total: ${total[index]}`, // Assuming you have an array \"total\"\n category: 3, // Category for total nodes\n symbolSize: 20, // Size for total nodes\n };\n\n // Ensure source, eventType, repo, and total nodes are unique before adding them\n if (!nodes.some((node) => node.name === sourceNode.name)) {\n nodes.push(sourceNode);\n }\n\n if (!nodes.some((node) => node.name === eventTypeNode.name)) {\n nodes.push(eventTypeNode);\n }\n\n if (!nodes.some((node) => node.name === repoNode.name)) {\n nodes.push(repoNode);\n }\n\n if (!nodes.some((node) => node.name === totalNode.name)) {\n nodes.push(totalNode);\n }\n\n // Create links between author, eventType, repo, and total nodes\n links.push({\n source: author,\n target: eventTypes[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: repoNames[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: totalNode.name,\n });\n });\n\n // Create categories for nodes\n const categories = [\n {\n name: 'Authors',\n },\n {\n name: 'Event Type',\n },\n {\n name: 'Repo Names',\n },\n {\n name: 'Total', // Add a category for \"Total\" nodes\n },\n ];\n\n // Create ECharts option\n const option = {\n tooltip: {\n trigger: 'item',\n formatter: '{b}',\n },\n legend: {\n x: 'left',\n data: ['Authors', 'Event Types', 'Repo Names', 'Total'], // Add \"Total\" to legend data\n },\n series: [\n {\n type: 'graph',\n layout: 'circular',\n roam: true,\n label: {\n show: true,\n textStyle: { // Add textStyle property to configure text style\n color: '#000', // Set the text color to a brighter color, such as white (#FFF)\n },\n },\n force: {\n repulsion: 100,\n gravity: 0.1,\n edgeLength: 150,\n },\n data: nodes,\n links: links,\n draggable: true,\n categories: categories,\n edgeSymbol: [\"none\", \"arrow\"],\n\n // Increase the size of arrow marks\n edgeSymbolSize: [12, 12], // Set the arrow size here\n lineStyle: {\n color: \"#000000\",\n curveness: 0,\n opacity: 0.3,\n },\n },\n ],\n };\n\n // Return the ECharts option\n return option;\n} else {\n // Display a custom message when data is not available\n const option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n\n return option;\n}",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"gitea\" \nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author IN ($Author)\nGROUP BY EventType, Author, RepoName",
+ "rawQuery": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"gitea\" \nWHERE TimeStamp >= toDateTime(1694535218) AND TimeStamp <= toDateTime(1694621618) AND EventType IN ('push','pull_request') AND Author IN ('')\nGROUP BY EventType, Author, RepoName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GiTea Contributions",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 10
+ },
+ "id": 5,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let options; // Initialize the options variable\n\nif (!context.panel.data || !context.panel.data.series || context.panel.data.series.length === 0 || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n options = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract Author and Push_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const pushEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Push_Events').values;\n\n // Create the ECharts options\n options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Push_Events'],\n orient: 'vertical', // Change the orientation to vertical\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: pushEvents,\n type: 'line',\n areaStyle: {\n color: 'rgba(0, 128, 255, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'blue', // Set the line color\n },\n name: 'Push_Events',\n },\n ],\n };\n}\n\nreturn options;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Push_Events\nFROM default.gitea\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'push'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Push_Events\nFROM default.gitea\nWHERE TimeStamp >= toDateTime(1694535157) AND TimeStamp <= toDateTime(1694621557) AND EventType = 'push'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of Gitea push events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 10
+ },
+ "id": 6,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\n// Define a default options object\nconst defaultOptions = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n};\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = defaultOptions;\n} else {\n // Extract Author and Merge_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const mergeEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Merge_Events').values;\n\n // Create the ECharts options\n const options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Merge_Events'],\n orient: 'vertical',\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: mergeEvents,\n type: 'line',\n name: 'Merge_Events',\n areaStyle: {\n color: 'rgba(255, 0, 0, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'red', // Set the line color\n },\n },\n ],\n };\n\n option = options; // Assign the options to the outer variable\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Merge_Events\nFROM default.gitea\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'pull_request'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Merge_Events\nFROM default.gitea\nWHERE TimeStamp >= toDateTime(1694535179) AND TimeStamp <= toDateTime(1694621579) AND EventType = 'pull_request'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of Gitea Merge events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 18
+ },
+ "id": 3,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS GiTea FROM default.gitea\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'push'",
+ "rawQuery": "SELECT count(*) AS GiTea FROM default.gitea\nWHERE TimeStamp >= toDateTime(1694535111) AND TimeStamp <= toDateTime(1694621511) AND EventType = 'push'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": false
+ }
+ ],
+ "title": "BitBucket Push event count",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 12,
+ "y": 18
+ },
+ "id": 4,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS Gitea FROM default.gitea\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'pull_request'",
+ "rawQuery": "SELECT count(*) AS Gitea FROM default.gitea\nWHERE TimeStamp >= toDateTime(1694535137) AND TimeStamp <= toDateTime(1694621537) AND EventType = 'pull_request'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": false
+ }
+ ],
+ "title": "BitBucket Merge events count",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 24
+ },
+ "id": 1,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.gitea\nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author IN ($Author)",
+ "rawQuery": "SELECT * FROM default.gitea\nWHERE TimeStamp >= toDateTime(1694535089) AND TimeStamp <= toDateTime(1694621489) AND EventType IN ('push','pull_request') AND Author IN ('')",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GiTea Events",
+ "type": "table"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": true,
+ "text": [
+ "All"
+ ],
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "definition": "SELECT EventType FROM default.gitea",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "eventType",
+ "options": [],
+ "query": "SELECT EventType FROM default.gitea",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "definition": "SELECT Author FROM default.gitea",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "Author",
+ "options": [],
+ "query": "SELECT Author FROM default.gitea",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "GiTea",
+ "uid": "a1c6d705-91b0-4718-99b2-d93b0221bca9",
+ "version": 2,
+ "weekStart": ""
+ }
+
+{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/configmap-github-dashboard.yaml b/charts/client/templates/configmap-github-dashboard.yaml
new file mode 100644
index 00000000..38a98a43
--- /dev/null
+++ b/charts/client/templates/configmap-github-dashboard.yaml
@@ -0,0 +1,510 @@
+{{- if .Values.dashboards.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "client.fullname" . }}-github-dashboard
+ annotations:
+ grafana_folder: "Gitbridge"
+ labels:
+ {{ .Values.dashboards.label }}: {{ .Values.dashboards.labelValue | quote }}
+data:
+ github.json: |-
+ {
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 57,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 1,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "// Check if data.series exists\nif (context.panel.data.series && context.panel.data.series.length > 0) {\n const eventTypes = context.panel.data.series[0].fields[0].values;\n const authors = context.panel.data.series[0].fields[1].values;\n const repoNames = context.panel.data.series[0].fields[2].values;\n const total = context.panel.data.series[0].fields[3].values; // Assuming you have a field named \"Total\"\n\n // Create nodes and links\n const nodes = [];\n const links = [];\n\n authors.forEach((author, index) => {\n const sourceNode = {\n name: author,\n category: 0, // Category for source nodes\n symbolSize: 60, // Size for source nodes\n };\n\n const eventTypeNode = {\n name: eventTypes[index],\n category: 1, // Category for eventType nodes\n symbolSize: 40, // Size for eventType nodes\n };\n\n const repoNode = {\n name: repoNames[index],\n category: 2, // Category for repo nodes\n symbolSize: 30, // Size for repo nodes\n };\n\n const totalNode = {\n name: `Total: ${total[index]}`, // Assuming you have an array \"total\"\n category: 3, // Category for total nodes\n symbolSize: 20, // Size for total nodes\n };\n\n // Ensure source, eventType, repo, and total nodes are unique before adding them\n if (!nodes.some((node) => node.name === sourceNode.name)) {\n nodes.push(sourceNode);\n }\n\n if (!nodes.some((node) => node.name === eventTypeNode.name)) {\n nodes.push(eventTypeNode);\n }\n\n if (!nodes.some((node) => node.name === repoNode.name)) {\n nodes.push(repoNode);\n }\n\n if (!nodes.some((node) => node.name === totalNode.name)) {\n nodes.push(totalNode);\n }\n\n // Create links between author, eventType, repo, and total nodes\n links.push({\n source: author,\n target: eventTypes[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: repoNames[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: totalNode.name,\n });\n });\n\n // Create categories for nodes\n const categories = [\n {\n name: 'Authors',\n },\n {\n name: 'Event Type',\n },\n {\n name: 'Repo Names',\n },\n {\n name: 'Total', // Add a category for \"Total\" nodes\n },\n ];\n\n // Create ECharts option\n const option = {\n tooltip: {\n trigger: 'item',\n formatter: '{b}',\n },\n legend: {\n x: 'left',\n data: ['Authors', 'Event Types', 'Repo Names', 'Total'], // Add \"Total\" to legend data\n },\n series: [\n {\n type: 'graph',\n layout: 'circular',\n roam: true,\n label: {\n show: true,\n textStyle: { // Add textStyle property to configure text style\n color: '#000', // Set the text color to a brighter color, such as white (#FFF)\n },\n },\n force: {\n repulsion: 100,\n gravity: 0.1,\n edgeLength: 150,\n },\n data: nodes,\n links: links,\n draggable: true,\n categories: categories,\n edgeSymbol: [\"none\", \"arrow\"],\n\n // Increase the size of arrow marks\n edgeSymbolSize: [12, 12], // Set the arrow size here\n lineStyle: {\n color: \"#000000\",\n curveness: 0,\n opacity: 0.3,\n },\n },\n ],\n };\n\n // Return the ECharts option\n return option;\n} else {\n // Handle the case when data.series does not exist\n return {};\n}",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"github\" \nWHERE $timeFilterByColumn(TimeStamp) AND Author IN ($Author) AND EventType IN ($eventType)\nGROUP BY EventType, Author, RepoName",
+ "rawQuery": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"github\" \nWHERE TimeStamp >= toDateTime(1694597007) AND TimeStamp <= toDateTime(1694618607) AND Author IN ('ahinvinith') AND EventType IN ('push','pull_request')\nGROUP BY EventType, Author, RepoName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitHub Contributors",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 10
+ },
+ "id": 5,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let options; // Initialize the options variable\n\nif (!context.panel.data || !context.panel.data.series || context.panel.data.series.length === 0 || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n options = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract Author and Push_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const pushEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Push_Events').values;\n\n // Create the ECharts options\n options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Push_Events'],\n orient: 'vertical', // Change the orientation to vertical\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: pushEvents,\n type: 'line',\n areaStyle: {\n color: 'rgba(0, 128, 255, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'blue', // Set the line color\n },\n name: 'Push_Events',\n },\n ],\n };\n}\n\nreturn options;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Push_Events\nFROM default.github\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'push'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Push_Events\nFROM default.github\nWHERE TimeStamp >= toDateTime(1694596958) AND TimeStamp <= toDateTime(1694618558) AND EventType = 'push'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of GitHub push events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 10
+ },
+ "id": 6,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\n// Define a default options object\nconst defaultOptions = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n};\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = defaultOptions;\n} else {\n // Extract Author and Merge_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const mergeEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Merge_Events').values;\n\n // Create the ECharts options\n const options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Merge_Events'],\n orient: 'vertical',\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: mergeEvents,\n type: 'line',\n name: 'Merge_Events',\n areaStyle: {\n color: 'rgba(255, 0, 0, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'red', // Set the line color\n },\n },\n ],\n };\n\n option = options; // Assign the options to the outer variable\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Merge_Events\nFROM default.github\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'pull_request'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Merge_Events\nFROM default.github\nWHERE TimeStamp >= toDateTime(1694596980) AND TimeStamp <= toDateTime(1694618580) AND EventType = 'pull_request'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of GitHub Merge events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 18
+ },
+ "id": 3,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "const values = context.panel.data.series[0].fields[0].values;\n\nreturn {\n series: [\n {\n type: 'liquidFill',\n radius: '90%',\n data: values, // Use the raw values here\n label: {\n formatter: '{a|{c}}',\n rich: {\n a: {\n color: '#000', // You can set the text color here\n },\n },\n },\n tooltip: {\n formatter: '{a}: {c}', // This formatter displays the raw values\n },\n },\n ],\n};\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS GitHub FROM default.github\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'pull_request'",
+ "rawQuery": "SELECT count(*) AS GitHub FROM default.github\nWHERE TimeStamp >= toDateTime(1694596901) AND TimeStamp <= toDateTime(1694618501) AND EventType = 'pull_request'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitHub Merge event Count",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 18
+ },
+ "id": 4,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "const values = context.panel.data.series[0].fields[0].values;\n\nreturn {\n series: [\n {\n type: 'liquidFill',\n radius: '90%',\n data: values, // Use the raw values here\n label: {\n formatter: '{a|{c}}',\n rich: {\n a: {\n color: '#000', // You can set the text color here\n },\n },\n },\n tooltip: {\n formatter: '{a}: {c}', // This formatter displays the raw values\n },\n },\n ],\n};\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS GitHub FROM default.github\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'push'",
+ "rawQuery": "SELECT count(*) AS GitHub FROM default.github\nWHERE TimeStamp >= toDateTime(1694596932) AND TimeStamp <= toDateTime(1694618532) AND EventType = 'push'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitHub Push events count",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 25
+ },
+ "id": 2,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.github\nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author IN ($Author)\nORDER BY TimeStamp DESC",
+ "rawQuery": "SELECT * FROM default.github\nWHERE TimeStamp >= toDateTime(1694596869) AND TimeStamp <= toDateTime(1694618469) AND EventType IN ('push','pull_request') AND Author IN ('ahinvinith')\nORDER BY TimeStamp DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Github Events",
+ "type": "table"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": true,
+ "text": [
+ "ahinvinith"
+ ],
+ "value": [
+ "ahinvinith"
+ ]
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "definition": "select Author from default.github",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "Author",
+ "options": [],
+ "query": "select Author from default.github",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "current": {
+ "selected": true,
+ "text": [
+ "push"
+ ],
+ "value": [
+ "push"
+ ]
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "definition": "select EventType from default.github",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "eventType",
+ "options": [],
+ "query": "select EventType from default.github",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "GitHub",
+ "uid": "ef91218c-94cb-48b1-be1d-0bafe848b75c",
+ "version": 2,
+ "weekStart": ""
+ }
+
+{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/configmap-gitlab-dashboard.yaml b/charts/client/templates/configmap-gitlab-dashboard.yaml
new file mode 100644
index 00000000..fd1840dc
--- /dev/null
+++ b/charts/client/templates/configmap-gitlab-dashboard.yaml
@@ -0,0 +1,516 @@
+{{- if .Values.dashboards.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "client.fullname" . }}-gitlab-dashboard
+ annotations:
+ grafana_folder: "Gitbridge"
+ labels:
+ {{ .Values.dashboards.label }}: {{ .Values.dashboards.labelValue | quote }}
+data:
+ gitlab.json: |-
+ {
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 59,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 2,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "// Check if data.series exists\nif (context.panel.data.series && context.panel.data.series.length > 0) {\n const eventTypes = context.panel.data.series[0].fields[0].values;\n const authors = context.panel.data.series[0].fields[1].values;\n const repoNames = context.panel.data.series[0].fields[2].values;\n const total = context.panel.data.series[0].fields[3].values; // Assuming you have a field named \"Total\"\n\n // Create nodes and links\n const nodes = [];\n const links = [];\n\n authors.forEach((author, index) => {\n const sourceNode = {\n name: author,\n category: 0, // Category for source nodes\n symbolSize: 60, // Size for source nodes\n };\n\n const eventTypeNode = {\n name: eventTypes[index],\n category: 1, // Category for eventType nodes\n symbolSize: 40, // Size for eventType nodes\n };\n\n const repoNode = {\n name: repoNames[index],\n category: 2, // Category for repo nodes\n symbolSize: 30, // Size for repo nodes\n };\n\n const totalNode = {\n name: `Total: ${total[index]}`, // Assuming you have an array \"total\"\n category: 3, // Category for total nodes\n symbolSize: 20, // Size for total nodes\n };\n\n // Ensure source, eventType, repo, and total nodes are unique before adding them\n if (!nodes.some((node) => node.name === sourceNode.name)) {\n nodes.push(sourceNode);\n }\n\n if (!nodes.some((node) => node.name === eventTypeNode.name)) {\n nodes.push(eventTypeNode);\n }\n\n if (!nodes.some((node) => node.name === repoNode.name)) {\n nodes.push(repoNode);\n }\n\n if (!nodes.some((node) => node.name === totalNode.name)) {\n nodes.push(totalNode);\n }\n\n // Create links between author, eventType, repo, and total nodes\n links.push({\n source: author,\n target: eventTypes[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: repoNames[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: totalNode.name,\n });\n });\n\n // Create categories for nodes\n const categories = [\n {\n name: 'Authors',\n },\n {\n name: 'Event Type',\n },\n {\n name: 'Repo Names',\n },\n {\n name: 'Total', // Add a category for \"Total\" nodes\n },\n ];\n\n // Create ECharts option\n const option = {\n tooltip: {\n trigger: 'item',\n formatter: '{b}',\n },\n legend: {\n x: 'left',\n data: ['Authors', 'Event Types', 'Repo Names', 'Total'], // Add \"Total\" to legend data\n },\n series: [\n {\n type: 'graph',\n layout: 'circular',\n roam: true,\n label: {\n show: true,\n textStyle: { // Add textStyle property to configure text style\n color: '#000', // Set the text color to a brighter color, such as white (#FFF)\n },\n },\n force: {\n repulsion: 100,\n gravity: 0.1,\n edgeLength: 150,\n },\n data: nodes,\n links: links,\n draggable: true,\n categories: categories,\n edgeSymbol: [\"none\", \"arrow\"],\n\n // Increase the size of arrow marks\n edgeSymbolSize: [12, 12], // Set the arrow size here\n lineStyle: {\n color: \"#000000\",\n curveness: 0,\n opacity: 0.3,\n },\n },\n ],\n };\n\n // Return the ECharts option\n return option;\n} else {\n // Display a custom message when data is not available\n const option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n\n return option;\n}",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"gitlab\" \nWHERE $timeFilterByColumn(TimeStamp) AND EventType In ($eventType) AND Author IN ($Author)\nGROUP BY EventType, Author, RepoName",
+ "rawQuery": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"gitlab\" \nWHERE TimeStamp >= toDateTime(1694533326) AND TimeStamp <= toDateTime(1694619726) AND EventType In ('Push Hook') AND Author IN ('Ahin Vinith')\nGROUP BY EventType, Author, RepoName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitLab Contributions",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 10
+ },
+ "id": 5,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let options; // Initialize the options variable\n\nif (!context.panel.data || !context.panel.data.series || context.panel.data.series.length === 0 || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n options = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract Author and Push_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const pushEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Push_Events').values;\n\n // Create the ECharts options\n options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Push_Events'],\n orient: 'vertical', // Change the orientation to vertical\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: pushEvents,\n type: 'line',\n areaStyle: {\n color: 'rgba(0, 128, 255, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'blue', // Set the line color\n },\n name: 'Push_Events',\n },\n ],\n };\n}\n\nreturn options;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Push_Events\nFROM default.gitlab\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'Push Hook'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Push_Events\nFROM default.gitlab\nWHERE TimeStamp >= toDateTime(1694533277) AND TimeStamp <= toDateTime(1694619677) AND EventType = 'Push Hook'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of GitLab Push events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 10
+ },
+ "id": 6,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\n// Define a default options object\nconst defaultOptions = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n};\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = defaultOptions;\n} else {\n // Extract Author and Merge_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const mergeEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Merge_Events').values;\n\n // Create the ECharts options\n const options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Merge_Events'],\n orient: 'vertical',\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: mergeEvents,\n type: 'line',\n name: 'Merge_Events',\n areaStyle: {\n color: 'rgba(255, 0, 0, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'red', // Set the line color\n },\n },\n ],\n };\n\n option = options; // Assign the options to the outer variable\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Merge_Events\nFROM default.gitlab\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'Merge Request Hook'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Merge_Events\nFROM default.gitlab\nWHERE TimeStamp >= toDateTime(1694533298) AND TimeStamp <= toDateTime(1694619698) AND EventType = 'Merge Request Hook'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of GitLab Merge events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 18
+ },
+ "id": 3,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) Gitlab FROM default.gitlab\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'Push Hook'",
+ "rawQuery": "SELECT count(*) Gitlab FROM default.gitlab\nWHERE TimeStamp >= toDateTime(1694533230) AND TimeStamp <= toDateTime(1694619630) AND EventType = 'Push Hook'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitLab Push Events",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 18
+ },
+ "id": 4,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS GitLab FROM default.gitlab\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'Merge Request Hook'",
+ "rawQuery": "SELECT count(*) AS GitLab FROM default.gitlab\nWHERE TimeStamp >= toDateTime(1694533256) AND TimeStamp <= toDateTime(1694619656) AND EventType = 'Merge Request Hook'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitLab Merge events",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 23
+ },
+ "id": 1,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.gitlab\nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author IN ($Author)",
+ "rawQuery": "SELECT * FROM default.gitlab\nWHERE TimeStamp >= toDateTime(1694533200) AND TimeStamp <= toDateTime(1694619600) AND EventType IN ('Push Hook') AND Author IN ('Ahin Vinith')",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitLab Events",
+ "type": "table"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "definition": "SELECT EventType FROM default.gitlab",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "eventType",
+ "options": [],
+ "query": "SELECT EventType FROM default.gitlab",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "definition": "SELECT Author FROM default.gitlab",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "Author",
+ "options": [],
+ "query": "SELECT Author FROM default.gitlab",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "GitLab",
+ "uid": "ec8b9cb1-f9ae-4139-b270-4824b6508eff",
+ "version": 2,
+ "weekStart": ""
+ }
+
+{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/configmap-kubedata-dashboard.yaml b/charts/client/templates/configmap-kubedata-dashboard.yaml
index f68b63e1..d479fd9b 100644
--- a/charts/client/templates/configmap-kubedata-dashboard.yaml
+++ b/charts/client/templates/configmap-kubedata-dashboard.yaml
@@ -3,6 +3,8 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "client.fullname" . }}-kubedatas-dashboard
+ annotations:
+ grafana_folder: "Kubviz"
labels:
{{ .Values.dashboards.label }}: {{ .Values.dashboards.labelValue | quote }}
data:
@@ -30,10 +32,472 @@ data:
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
- "id": 15,
+ "id": 56,
"links": [],
"liveNow": false,
"panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 11,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract data from your JSON as before\n const clusterNames = context.panel.data.series[0].fields[0].values;\n const namespaces = context.panel.data.series[0].fields[1].values;\n const reasons = context.panel.data.series[0].fields[2].values;\n const pods = context.panel.data.series[0].fields[3].values;\n\n // Create a hierarchical structure from the data without a root node\n const hierarchy = {\n name: 'root', // Use 'root' as a placeholder\n children: [],\n };\n\n const seenClusterNames = new Set();\n const seenNamespaces = new Set();\n\n for (let i = 0; i < clusterNames.length; i++) {\n const clusterName = clusterNames[i];\n const namespace = namespaces[i];\n const reason = reasons[i];\n const pod = pods[i];\n\n if (!seenClusterNames.has(clusterName)) {\n seenClusterNames.add(clusterName);\n const clusterNode = { name: clusterName, children: [] };\n hierarchy.children.push(clusterNode);\n seenNamespaces.clear(); // Reset seenNamespaces for each cluster\n }\n\n const clusterNode = hierarchy.children.find((node) => node.name === clusterName);\n\n if (!seenNamespaces.has(namespace)) {\n seenNamespaces.add(namespace);\n const namespaceNode = { name: namespace, children: [] };\n clusterNode.children.push(namespaceNode);\n }\n\n const namespaceNode = clusterNode.children.find((node) => node.name === namespace);\n const reasonNode = { name: reason, children: [{ name: `Count: ${pod}` }] };\n namespaceNode.children.push(reasonNode);\n }\n\n // Create the tree chart using ECharts\n option = {\n tooltip: {\n trigger: 'item',\n triggerOn: 'mousemove',\n formatter: function (params) {\n const node = params.data;\n let tooltip = '';\n if (node.column) {\n tooltip += `${node.column}: ${node.name}`;\n } else {\n tooltip += node.name;\n }\n return tooltip;\n },\n },\n series: [\n {\n type: 'tree',\n data: hierarchy.children, // Use the children directly\n top: '1%',\n left: '7%',\n bottom: '1%',\n right: '20%',\n symbolSize: 7,\n label: {\n position: 'left',\n verticalAlign: 'middle',\n align: 'centre',\n fontSize: 15, // Increase the text size for regular nodes\n fontWeight: 'bold', // Set the font weight to bold\n },\n leaves: {\n label: {\n position: 'right',\n verticalAlign: 'middle',\n align: 'left',\n fontSize: 15, // Increase the text size for leaves\n fontWeight: 'bold', // Set the font weight to bold\n },\n },\n emphasis: {\n focus: 'descendant',\n },\n expandAndCollapse: true,\n animationDuration: 550,\n animationDurationUpdate: 750,\n },\n ],\n };\n}\n\nreturn option;",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Namespace, Reason, count(*) AS Pods\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN ('Pod') AND ClusterName IN ($clusterName) AND Namespace IN ($namespace) AND Reason IN ($reason)\nGROUP BY ClusterName, Namespace, Reason",
+ "rawQuery": "SELECT ClusterName, Namespace, Reason, count(*) AS Pods\nFROM default.events\nWHERE EventTime >= toDateTime(1702949773) AND EventTime <= toDateTime(1702992973) AND Kind IN ('Pod') AND ClusterName IN ('capten-controlplane','kubviz','dev') AND Namespace IN ('argo-cd','crossplane','default','kubescape-prometheus','kubviz','kyverno','linkerd','observability','openebs-cstor','testkube','capten','falco','tekton','crossplane-system','test5','tek','tekton-pipelines','harbor','tekton-pipelines-resolvers','quality-trace','kube-system','cert-manager','emojivoto','local-path-storage','test-linkerd','external-secrets','policy-reporter','velero','tracetest') AND Reason IN ('BackOff','FailedMount','Unhealthy','SyncPackage','SelectComposition','SyncFailed','CannotUpdateExternalResource','RenderCRD','OfferClaim','EstablishComposite','SuccessfulCreate','InjectionSkipped','Scheduled','Pulled','Created','Started','Completed','SawCompletedJob','BindClusterRole','ApplyClusterRoles','CannotInitializeManagedResource','InstallPackageRevision','CreatedUsers','CreatedSuperuser','ApplyRoles','Synced','Killing','Init','ExternalProvisioning','Provisioning','Running','FailedScheduling','ProvisioningSucceeded','ScalingReplicaSet','Pending','Updated','FailUpdate','Degraded','Pulling','Healthy','Succeeded','IssuedLeafCertificate','Failed','Offline','EvictionThresholdMet','NodeHasDiskPressure','NodeHasNoDiskPressure','FreeDiskSpaceFailed','CreateCertificate','Issuing','Generated','Requested','cert-manager.io','OrderCreated','OrderPending','Presented','DomainVerified','Complete','CertificateIssued','SuccessfulDelete','ConfigureCompositeResource','BindCompositeResource','ImageGCFailed','ClaimLost','Evicted','RecreatingFailedPod','LeaderElection','InternalError','Reused','IssuerUpdated','OperationStarted','ResourceUpdated','OperationCompleted','MultiplePodDisruptionBudgets','MissingJob','InvalidOrder','TaintManagerEviction','SystemOOM','FailedKillPod','NodeHasSufficientMemory','NodeHasSufficientPID','NodeNotReady','FailedPreStopHook','NodeReady','Pool Imported','AlreadyPresent','StartingCassandra','UpdateCompleted','LabeledPodAsSeed','StartedCassandra','ComposeResources','UpdatedExternalResource','CannotDeleteExternalResource','DeletedExternalResource','ReconcileInProgress','ReconcileCompleted','ReconcileStarted','ProgressHostsCompleted','ReconcileFailed','UnlabeledPodAsSeed','CannotObserveExternalResource','DeletingStuckPod','DeleteCompositeResource','ResourceDeleted','PublishConnectionSecret','UnpackPackage','ResolveDependencies','ExceededGracePeriod','ProvisioningFailed','CannotCreateExternalResource','FailStatusSync','FailCreate','CannotUpdateManagedResource','CannotResolveResourceReferences','CreatedExternalResource','FailedSync','RegisteredNode','OwnerRefInvalidNamespace','NoPods','DeadlineExceeded','Create','FailedGetScale','NodeAllocatableEnforced','Starting','LintPackage','SandboxChanged','WaitForFirstConsumer','FailedGetResourceMetric','FailedCreate','Injected','Resizing','ExternalExpanding','VolumeResizeFailed','VolumeResizeSuccessful','FileSystemResizeRequired','FileSystemResizeSuccessful','CreatedResource','FailedToUpdateEndpoint','UpdateCertificate','FailedUpdateStatus','UpdateFailed','FailedComputeMetricsReplicas','FailedToCreateEndpoint','FailedAttachVolume','FailedToUpdateEndpointSlices','FailedDelete','Pool Expansion','Error')\nGROUP BY ClusterName, Namespace, Reason",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Pod Scenario Counts by Cluster, Namespace",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 9
+ },
+ "id": 12,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract data from your JSON as before\n const reasons = context.panel.data.series[0].fields[0].values;\n const counts = context.panel.data.series[0].fields[1].values;\n\n // Check if reasons and counts are defined and not empty\n if (!reasons || !counts || reasons.length === 0 || counts.length === 0) {\n // Display a message when no data is available\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n } else {\n // Data is available, proceed with the chart creation\n // Create an array of data items, each containing name and value\n const seriesData = reasons.map((reason, index) => ({\n name: reason,\n value: counts[index],\n }));\n\n // Define a custom color for the bars\n const customColor = 'rgb(0, 123, 255)'; // Change this to your desired color\n\n // Apache ECharts option\n option = {\n xAxis: {\n type: 'category',\n data: reasons, // Use the reasons directly for xAxis data\n axisLabel: {\n interval: 0, // Display all labels on the xAxis\n },\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Pods'], // Legend name\n left: 'left', // Position the legend on the left side\n bottom: 'bottom', // Position the legend at the bottom\n },\n series: [\n {\n name: 'Pods', // Series name for the legend\n data: seriesData,\n type: 'bar',\n label: {\n show: true,\n position: 'top',\n formatter: '{c}',\n },\n itemStyle: {\n barBorderRadius: [5, 5, 0, 0], // Adjust the values to control the curvature\n color: customColor, // Set the custom color for the bars\n },\n },\n ],\n };\n }\n}\n\nreturn option;",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Reason, count(Reason) AS Pods\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN ('Pod') AND ClusterName IN ($clusterName) AND Namespace In ($namespace)\nGROUP BY Reason",
+ "rawQuery": "SELECT Reason, count(Reason) AS Pods\nFROM default.events\nWHERE EventTime >= toDateTime(1702949951) AND EventTime <= toDateTime(1702993151) AND Kind IN ('Pod') AND ClusterName IN ('capten-controlplane','kubviz','dev') AND Namespace In ('quality-trace','crossplane-system','observability','default','testkube','openebs-cstor','kyverno','kubescape-prometheus','tekton-pipelines','capten','tek','test-linkerd','linkerd','argo-cd','tracetest','emojivoto','falco','kube-system','crossplane','kubviz','tekton','test5','harbor','tekton-pipelines-resolvers','cert-manager','local-path-storage','external-secrets','policy-reporter','velero')\nGROUP BY Reason",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Total Pod Counts by Reason",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 14,
+ "w": 24,
+ "x": 0,
+ "y": 19
+ },
+ "id": 6,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n\n // Define the data from your JSON\n const clusterNames = context.panel.data.series[0].fields[0].values;\n const kinds = context.panel.data.series[0].fields[1].values;\n const reasons = context.panel.data.series[0].fields[2].values;\n const counts = context.panel.data.series[0].fields[3].values;\n\n // Create the Sankey chart configuration\n option = {\n series: {\n type: 'sankey',\n layout: 'none',\n emphasis: {\n focus: 'adjacency',\n },\n data: [],\n links: [],\n },\n tooltip: {\n trigger: 'item',\n formatter: (params) => {\n if (params.dataType === 'node') {\n return params.name;\n }\n if (params.dataType === 'edge') {\n return `Count: ${counts[params.dataIndex]}`; // Display count values\n }\n return '';\n },\n },\n };\n\n // Create nodes for ClusterName, Kind, and Reason\n const uniqueClusterNames = Array.from(new Set(clusterNames));\n const uniqueKinds = Array.from(new Set(kinds));\n const uniqueReasons = Array.from(new Set(reasons));\n\n uniqueClusterNames.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n uniqueKinds.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n uniqueReasons.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n\n // Create links from Kind to Reason\n kinds.forEach((kind, index) => {\n const sourceIndex = uniqueClusterNames.length + uniqueKinds.indexOf(kind);\n const targetIndex = 1 * uniqueClusterNames.length + uniqueKinds.length + uniqueReasons.indexOf(reasons[index]);\n option.series.links.push({\n source: sourceIndex,\n target: targetIndex,\n value: counts[index], // Use count values\n });\n });\n\n // Create links from ClusterName to Kind\n clusterNames.forEach((clusterName, index) => {\n const sourceIndex = uniqueClusterNames.indexOf(clusterName);\n const targetIndex = uniqueClusterNames.length + uniqueKinds.indexOf(kinds[index]);\n option.series.links.push({\n source: sourceIndex,\n target: targetIndex,\n value: 1,\n });\n });\n}\n\n\nreturn option;\n// Render the chart\nmyChart.setOption(option);",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Kind, Reason, COUNT(*) as Count\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND ClusterName IN ($clusterName) AND Kind IN ('Pod', 'Node', 'Deployment')\nGROUP BY ClusterName, Kind, Reason\nORDER BY Count DESC",
+ "rawQuery": "SELECT ClusterName, Kind, Reason, COUNT(*) as Count\nFROM default.events\nWHERE EventTime >= toDateTime(1698927439) AND EventTime <= toDateTime(1698927739) AND ClusterName IN ('beta-cluster') AND Kind IN ('Pod', 'Node', 'Deployment')\nGROUP BY ClusterName, Kind, Reason\nORDER BY Count DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "K8s metrics status",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 33
+ },
+ "id": 8,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n\n // Define the data from your JSON\n const clusterNames = context.panel.data.series[0].fields[0].values;\n const kinds = context.panel.data.series[0].fields[1].values;\n const reasons = context.panel.data.series[0].fields[2].values;\n const counts = context.panel.data.series[0].fields[3].values;\n\n // Create the Sankey chart configuration\n option = {\n series: {\n type: 'sankey',\n layout: 'none',\n emphasis: {\n focus: 'adjacency',\n },\n data: [],\n links: [],\n },\n tooltip: {\n trigger: 'item',\n formatter: (params) => {\n if (params.dataType === 'node') {\n return params.name;\n }\n if (params.dataType === 'edge') {\n return `Count: ${counts[params.dataIndex]}`; // Display count values\n }\n return '';\n },\n },\n };\n\n // Create nodes for ClusterName, Kind, and Reason\n const uniqueClusterNames = Array.from(new Set(clusterNames));\n const uniqueKinds = Array.from(new Set(kinds));\n const uniqueReasons = Array.from(new Set(reasons));\n\n uniqueClusterNames.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n uniqueKinds.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n uniqueReasons.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n\n // Create links from Kind to Reason\n kinds.forEach((kind, index) => {\n const sourceIndex = uniqueClusterNames.length + uniqueKinds.indexOf(kind);\n const targetIndex = 1 * uniqueClusterNames.length + uniqueKinds.length + uniqueReasons.indexOf(reasons[index]);\n option.series.links.push({\n source: sourceIndex,\n target: targetIndex,\n value: counts[index], // Use count values\n });\n });\n\n // Create links from ClusterName to Kind\n clusterNames.forEach((clusterName, index) => {\n const sourceIndex = uniqueClusterNames.indexOf(clusterName);\n const targetIndex = uniqueClusterNames.length + uniqueKinds.indexOf(kinds[index]);\n option.series.links.push({\n source: sourceIndex,\n target: targetIndex,\n value: 1,\n });\n });\n}\n\n\n\nreturn option;\n// Render the chart\nmyChart.setOption(option);",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Kind, Reason, COUNT(*) as Count\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND ClusterName IN ($clusterName) AND Kind IN ('Service')\nGROUP BY ClusterName, Kind, Reason\nORDER BY Count DESC",
+ "rawQuery": "SELECT ClusterName, Kind, Reason, COUNT(*) as Count\nFROM default.events\nWHERE EventTime >= toDateTime(1698931712) AND EventTime <= toDateTime(1698932012) AND ClusterName IN ('beta-cluster') AND Kind IN ('Service')\nGROUP BY ClusterName, Kind, Reason\nORDER BY Count DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Service Events by Cluster and Reason",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 33
+ },
+ "id": 9,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n\n // Define the data from your JSON\n const clusterNames = context.panel.data.series[0].fields[0].values;\n const kinds = context.panel.data.series[0].fields[1].values;\n const reasons = context.panel.data.series[0].fields[2].values;\n const counts = context.panel.data.series[0].fields[3].values;\n\n // Create the Sankey chart configuration\n option = {\n series: {\n type: 'sankey',\n layout: 'none',\n emphasis: {\n focus: 'adjacency',\n },\n data: [],\n links: [],\n },\n tooltip: {\n trigger: 'item',\n formatter: (params) => {\n if (params.dataType === 'node') {\n return params.name;\n }\n if (params.dataType === 'edge') {\n return `Count: ${counts[params.dataIndex]}`; // Display count values\n }\n return '';\n },\n },\n };\n\n // Create nodes for ClusterName, Kind, and Reason\n const uniqueClusterNames = Array.from(new Set(clusterNames));\n const uniqueKinds = Array.from(new Set(kinds));\n const uniqueReasons = Array.from(new Set(reasons));\n\n uniqueClusterNames.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n uniqueKinds.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n uniqueReasons.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n\n // Create links from Kind to Reason\n kinds.forEach((kind, index) => {\n const sourceIndex = uniqueClusterNames.length + uniqueKinds.indexOf(kind);\n const targetIndex = 1 * uniqueClusterNames.length + uniqueKinds.length + uniqueReasons.indexOf(reasons[index]);\n option.series.links.push({\n source: sourceIndex,\n target: targetIndex,\n value: counts[index], // Use count values\n });\n });\n\n // Create links from ClusterName to Kind\n clusterNames.forEach((clusterName, index) => {\n const sourceIndex = uniqueClusterNames.indexOf(clusterName);\n const targetIndex = uniqueClusterNames.length + uniqueKinds.indexOf(kinds[index]);\n option.series.links.push({\n source: sourceIndex,\n target: targetIndex,\n value: 1,\n });\n });\n}\n\n\nreturn option;\n// Render the chart\nmyChart.setOption(option);\n\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Kind, Reason, COUNT(*) as Count\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN ('PersistentVolume','PersistentVolumeClaim')\nGROUP BY ClusterName, Kind, Reason\nORDER BY Count DESC\n",
+ "rawQuery": "SELECT ClusterName, Kind, Reason, COUNT(*) as Count\nFROM default.events\nWHERE EventTime >= toDateTime(1698931738) AND EventTime <= toDateTime(1698932038) AND Kind IN ('PersistentVolume','PersistentVolumeClaim')\nGROUP BY ClusterName, Kind, Reason\nORDER BY Count DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "PV, PVC events by cluster and reason",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 24,
+ "x": 0,
+ "y": 41
+ },
+ "id": 7,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract data from your JSON\n const clusters = context.panel.data.series[0].fields[0].values;\n const hosts = context.panel.data.series[0].fields[1].values;\n const reasons = context.panel.data.series[0].fields[2].values;\n const eventTimes = context.panel.data.series[0].fields[3].values;\n\n // Create a hierarchical structure for the tree chart starting with ClusterName\n const hierarchy = {\n name: 'Root', // You can customize the name of the root node if needed\n children: [],\n };\n\n for (let i = 0; i < clusters.length; i++) {\n const cluster = clusters[i];\n const host = hosts[i];\n const reason = reasons[i];\n const eventTime = eventTimes[i];\n\n // Find or create the cluster node\n let clusterNode = hierarchy.children.find((node) => node.name === cluster);\n if (!clusterNode) {\n clusterNode = { name: cluster, children: [] };\n hierarchy.children.push(clusterNode);\n }\n\n // Find or create the host node under the cluster\n let hostNode = clusterNode.children.find((node) => node.name === host);\n if (!hostNode) {\n hostNode = { name: host, children: [] };\n clusterNode.children.push(hostNode);\n }\n\n // Find or create the reason node under the host\n let reasonNode = hostNode.children.find((node) => node.name === reason);\n if (!reasonNode) {\n reasonNode = { name: reason, children: [] };\n hostNode.children.push(reasonNode);\n }\n\n // Create the eventTime node under the reason\n reasonNode.children.push({ name: eventTime });\n }\n\n // Create the tree chart using ECharts\n option = {\n tooltip: {\n trigger: 'item',\n triggerOn: 'mousemove',\n },\n series: [\n {\n type: 'tree',\n data: hierarchy.children, // Use the children directly as root nodes\n top: '1%',\n left: '7%',\n bottom: '1%',\n right: '20%', // Adjust the right margin to provide more space for labels\n symbolSize: 7,\n label: {\n position: 'inside', // Position labels inside the node\n verticalAlign: 'middle',\n align: 'center', // Center-align labels\n fontSize: 15,\n fontWeight: 'bold',\n },\n leaves: {\n label: {\n position: 'right', // Position labels inside the node\n verticalAlign: 'middle',\n align: 'left', // Center-align labels\n fontSize: 15,\n fontWeight: 'bold',\n },\n },\n emphasis: {\n focus: 'descendant',\n },\n expandAndCollapse: true,\n animationDuration: 550,\n animationDurationUpdate: 750,\n },\n ],\n };\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Host, Reason, EventTime\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND ClusterName IN ($clusterName) AND Kind = 'Node' AND Kind != ' '\nGROUP BY ClusterName, Host, Reason, EventTime",
+ "rawQuery": "SELECT ClusterName, Host, Reason, EventTime\nFROM default.events\nWHERE EventTime >= toDateTime(1698927382) AND EventTime <= toDateTime(1698927682) AND ClusterName IN ('beta-cluster') AND Kind = 'Node' AND Kind != ' '\nGROUP BY ClusterName, Host, Reason, EventTime",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Node Events by Cluster, Host, Reason, and EventTime",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 24,
+ "x": 0,
+ "y": 54
+ },
+ "id": 5,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract data from your JSON as before\n const clusterNames = context.panel.data.series[0].fields[0].values; // New column for ClusterName\n const namespaces = context.panel.data.series[0].fields[1].values;\n const kinds = context.panel.data.series[0].fields[2].values; // Adjusted index for Kind\n const counts = context.panel.data.series[0].fields[3].values; // New column for Count\n\n // Create a hierarchical structure from the data without a root node\n const hierarchy = {\n name: 'root', // Use 'root' as a placeholder\n children: [],\n };\n\n const seenClusterNames = new Set();\n const seenNamespaces = new Set();\n\n for (let i = 0; i < clusterNames.length; i++) {\n const clusterName = clusterNames[i];\n const namespace = namespaces[i];\n const kind = kinds[i];\n const count = counts[i]; // Get the count value\n\n if (!seenClusterNames.has(clusterName)) {\n seenClusterNames.add(clusterName);\n const clusterNode = { name: clusterName, children: [] };\n hierarchy.children.push(clusterNode);\n seenNamespaces.clear(); // Reset seenNamespaces for each cluster\n }\n\n const clusterNode = hierarchy.children.find((node) => node.name === clusterName);\n\n if (!seenNamespaces.has(namespace)) {\n seenNamespaces.add(namespace);\n const namespaceNode = { name: namespace, children: [] };\n clusterNode.children.push(namespaceNode);\n }\n\n const namespaceNode = clusterNode.children.find((node) => node.name === namespace);\n const kindNode = { name: kind, children: [{ name: `Count: ${count}` }] }; // Include the count as a child node\n namespaceNode.children.push(kindNode);\n }\n\n // Create the tree chart using ECharts\n option = {\n tooltip: {\n trigger: 'item',\n triggerOn: 'mousemove',\n formatter: function (params) {\n const node = params.data;\n let tooltip = '';\n if (node.column) {\n tooltip += `${node.column}: ${node.name}`;\n } else {\n tooltip += node.name;\n }\n return tooltip;\n },\n },\n series: [\n {\n type: 'tree',\n data: hierarchy.children, // Use the children directly\n top: '1%',\n left: '7%',\n bottom: '1%',\n right: '20%',\n symbolSize: 7,\n label: {\n position: 'left',\n verticalAlign: 'middle',\n align: 'right',\n fontSize: 15, // Increase the text size for regular nodes\n fontWeight: 'bold', // Set the font weight to bold\n },\n leaves: {\n label: {\n position: 'right',\n verticalAlign: 'middle',\n align: 'left',\n fontSize: 15, // Increase the text size for leaves\n fontWeight: 'bold', // Set the font weight to bold\n },\n },\n emphasis: {\n focus: 'descendant',\n },\n expandAndCollapse: true,\n animationDuration: 550,\n animationDurationUpdate: 750,\n },\n ],\n };\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Namespace, Kind, count(*) AS count\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND ClusterName IN ($clusterName) AND Namespace IN ($namespace)\nGROUP BY ClusterName, Namespace, Kind",
+ "rawQuery": "SELECT ClusterName, Namespace, Kind, count(*) AS count\nFROM default.events\nWHERE EventTime >= toDateTime(1694603943) AND EventTime <= toDateTime(1694604243) AND ClusterName IN ('dev') AND Namespace IN ('kubviz','argocd','observability','default','tracetestdemo','sonarqube','kube-system','tek','quality','tekton-pipelines','sample','tekton-pipelines-resolvers','tekton-chains','cert-manager','qtapp','otel-collector','mysql','traefik')\nGROUP BY ClusterName, Namespace, Kind",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Kubernetes Workload",
+ "type": "volkovlabs-echarts-panel"
+ },
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
@@ -51,8 +515,7 @@ data:
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -67,7 +530,7 @@ data:
"h": 7,
"w": 24,
"x": 0,
- "y": 0
+ "y": 65
},
"id": 4,
"options": {
@@ -84,7 +547,7 @@ data:
},
"textMode": "auto"
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -96,8 +559,8 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT\n count()\nFROM default.events\n\nWHERE Kind IN ('Pod') AND Reason IN ('Created') AND Namespace IN ($namespace)\n",
- "rawQuery": "SELECT\n count()\nFROM default.events\n\nWHERE Kind IN ('Pod') AND Reason IN ('Created') AND Namespace IN ('argocd','quality','default','sonarqube','observability')",
+ "query": "SELECT\n count()\nFROM default.events\n\nWHERE $timeFilterByColumn(EventTime) AND Kind IN ('Pod') AND Reason IN ('Created') AND Namespace IN ($namespace)\n",
+ "rawQuery": "SELECT\n count()\nFROM default.events\n\nWHERE EventTime >= toDateTime(1693485465) AND EventTime <= toDateTime(1693486365) AND Kind IN ('Pod') AND Reason IN ('Created') AND Namespace IN ('argocd','kubviz','default','observability','otel-collector','tek','sonarqube','tekton-pipelines','cert-manager','kube-system','quality','traefik','tracetestdemo')",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -119,7 +582,9 @@ data:
},
"custom": {
"align": "center",
- "displayMode": "color-text",
+ "cellOptions": {
+ "type": "color-text"
+ },
"filterable": true,
"inspect": false
},
@@ -128,8 +593,7 @@ data:
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -144,11 +608,13 @@ data:
"h": 16,
"w": 24,
"x": 0,
- "y": 7
+ "y": 72
},
"id": 2,
"options": {
+ "cellHeight": "sm",
"footer": {
+ "countRows": false,
"fields": "",
"reducer": [
"sum"
@@ -157,7 +623,7 @@ data:
},
"showHeader": true
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -169,19 +635,19 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.events\nWHERE Namespace IN ($namespace) AND Reason IN ($reason) AND Kind IN ($kind) AND ClusterName IN ($clusterName)",
- "rawQuery": "SELECT * FROM default.events\nWHERE Namespace IN ('argocd','quality','default','sonarqube','observability') AND Reason IN ('OperationStarted','ResourceUpdated','OperationCompleted','FailedMount','ApplyClusterRoles','ApplyRoles','UpdateFailed','Unhealthy','InstallPackageRevision','Valid','Updated','RenderCRD','BackOff','BindClusterRole','SyncPackage') AND Kind IN ('Application','Pod','ProviderRevision','CompositeResourceDefinition','Namespace','ExternalSecret','Provider','SecretStore') AND ClusterName IN ('kubviz')",
+ "query": "SELECT * FROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Namespace IN ($namespace) AND Reason IN ($reason) AND Kind IN ($kind) AND ClusterName IN ($clusterName)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.events\nWHERE EventTime >= toDateTime(1693486140) AND EventTime <= toDateTime(1693486440) AND Namespace IN ('argocd','kubviz','default','observability','otel-collector','tek','sonarqube','tekton-pipelines','cert-manager','kube-system','quality','traefik','tracetestdemo') AND Reason IN ('OperationCompleted','Pulled','Created','Started','OperationStarted','SuccessfulCreate','Scheduled','BackOff','Pulling','Unhealthy','Killing','FinalizerUpdate','WaitForFirstConsumer','Running','ExternalProvisioning','Provisioning','Pending','ProvisioningFailed','ProvisioningSucceeded','SuccessfulAttachVolume','Succeeded','Failed','FailedGetResourceMetric','FailedComputeMetricsReplicas','NodeNotReady','NodeHasSufficientMemory','NodeHasNoDiskPressure','NodeHasSufficientPID','NodeReady') AND Kind IN ('Application','Pod','ReplicaSet','StatefulSet','PipelineRun','PersistentVolumeClaim','TaskRun','HorizontalPodAutoscaler','Node') AND ClusterName IN ('dev')\nORDER BY EventTime DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Kubernetes",
- "transparent": true,
"type": "table"
}
],
- "schemaVersion": 37,
+ "refresh": "",
+ "schemaVersion": 38,
"style": "dark",
"tags": [],
"templating": {
@@ -281,14 +747,16 @@ data:
]
},
"time": {
- "from": "now-15m",
+ "from": "now-5m",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Kubedata",
"uid": "Qq-FK1rVz",
- "version": 1,
+ "version": 3,
"weekStart": ""
}
+
+
{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/configmap-kuberhealthy-dashboard.yaml b/charts/client/templates/configmap-kuberhealthy-dashboard.yaml
new file mode 100644
index 00000000..cea36528
--- /dev/null
+++ b/charts/client/templates/configmap-kuberhealthy-dashboard.yaml
@@ -0,0 +1,1208 @@
+{{- if .Values.dashboards.enabled }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "client.fullname" . }}-kuberhealthy-dashboard
+ annotations:
+ grafana_folder: "Kubviz"
+ labels:
+ {{ .Values.dashboards.label }}: {{ .Values.dashboards.labelValue | quote }}
+data:
+ kuberhealthy.json: |-
+ {
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 41,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 18,
+ "x": 0,
+ "y": 0
+ },
+ "id": 14,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract data from your JSON as before\n const namespaces = context.panel.data.series[0].fields[0].values;\n const counts = context.panel.data.series[0].fields[1].values;\n\n // Create a hierarchical structure from the data with a default cluster node\n const hierarchy = {\n name: 'CheckName', // Default cluster node\n children: [],\n };\n\n // Create an object to store namespaces and their counts\n const namespaceCounts = {};\n\n // Populate the namespaceCounts object with namespaces and counts\n for (let i = 0; i < namespaces.length; i++) {\n const namespace = namespaces[i];\n const count = counts[i];\n\n if (!namespaceCounts[namespace]) {\n namespaceCounts[namespace] = count;\n } else {\n namespaceCounts[namespace] += count;\n }\n }\n\n // Create nodes for each namespace and add them as children of the default cluster node\n for (const namespace in namespaceCounts) {\n hierarchy.children.push({\n name: namespace,\n children: [{ name: `${namespaceCounts[namespace]}` }],\n });\n }\n\n // Create the tree chart using ECharts\n option = {\n tooltip: {\n trigger: 'item',\n triggerOn: 'mousemove',\n formatter: function (params) {\n const node = params.data;\n let tooltip = '';\n if (node.column) {\n tooltip += `${node.column}: ${node.name}`;\n } else {\n tooltip += node.name;\n }\n return tooltip;\n },\n },\n series: [\n {\n type: 'tree',\n data: [hierarchy], // Use the hierarchy object as the data\n top: '1%',\n left: '7%',\n bottom: '1%',\n right: '20%',\n symbolSize: 7,\n label: {\n position: 'left',\n verticalAlign: 'middle',\n align: 'centre',\n fontSize: 15, // Increase the text size for regular nodes\n fontWeight: 'bold', // Set the font weight to bold\n },\n leaves: {\n label: {\n position: 'right',\n verticalAlign: 'middle',\n align: 'left',\n fontSize: 15, // Increase the text size for leaves\n fontWeight: 'bold', // Set the font weight to bold\n },\n },\n emphasis: {\n focus: 'descendant',\n },\n expandAndCollapse: true,\n animationDuration: 550,\n animationDurationUpdate: 750,\n },\n ],\n };\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "console.log(context);\nreturn {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "codeHeight": 600,
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "editorMode": "builder",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT CheckName, count(OK) AS counts\nFROM default.kuberhealthy\nWHERE OK = '1'\nGROUP BY CheckName",
+ "rawQuery": "SELECT CheckName, count(OK) AS counts\nFROM default.kuberhealthy\nWHERE OK = '1'\nGROUP BY CheckName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Running kuberhealthy checks",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 18,
+ "x": 0,
+ "y": 13
+ },
+ "id": 15,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract data from your JSON as before\n const namespaces = context.panel.data.series[0].fields[0].values;\n const counts = context.panel.data.series[0].fields[1].values;\n\n // Create a hierarchical structure from the data with a default cluster node\n const hierarchy = {\n name: 'CheckName', // Default cluster node\n children: [],\n };\n\n // Create an object to store namespaces and their counts\n const namespaceCounts = {};\n\n // Populate the namespaceCounts object with namespaces and counts\n for (let i = 0; i < namespaces.length; i++) {\n const namespace = namespaces[i];\n const count = counts[i];\n\n if (!namespaceCounts[namespace]) {\n namespaceCounts[namespace] = count;\n } else {\n namespaceCounts[namespace] += count;\n }\n }\n\n // Create nodes for each namespace and add them as children of the default cluster node\n for (const namespace in namespaceCounts) {\n hierarchy.children.push({\n name: namespace,\n children: [{ name: `${namespaceCounts[namespace]}` }],\n });\n }\n\n // Create the tree chart using ECharts\n option = {\n tooltip: {\n trigger: 'item',\n triggerOn: 'mousemove',\n formatter: function (params) {\n const node = params.data;\n let tooltip = '';\n if (node.column) {\n tooltip += `${node.column}: ${node.name}`;\n } else {\n tooltip += node.name;\n }\n return tooltip;\n },\n },\n series: [\n {\n type: 'tree',\n data: [hierarchy], // Use the hierarchy object as the data\n top: '1%',\n left: '7%',\n bottom: '1%',\n right: '20%',\n symbolSize: 7,\n label: {\n position: 'left',\n verticalAlign: 'middle',\n align: 'centre',\n fontSize: 15, // Increase the text size for regular nodes\n fontWeight: 'bold', // Set the font weight to bold\n },\n leaves: {\n label: {\n position: 'right',\n verticalAlign: 'middle',\n align: 'left',\n fontSize: 15, // Increase the text size for leaves\n fontWeight: 'bold', // Set the font weight to bold\n },\n },\n emphasis: {\n focus: 'descendant',\n },\n expandAndCollapse: true,\n animationDuration: 550,\n animationDurationUpdate: 750,\n },\n ],\n };\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "console.log(context);\nreturn {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "codeHeight": 600,
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "editorMode": "builder",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT CheckName, count(OK) AS counts\nFROM default.kuberhealthy\nWHERE OK = '0'\nGROUP BY CheckName",
+ "rawQuery": "SELECT CheckName, count(OK) AS counts\nFROM default.kuberhealthy\nWHERE OK = '0'\nGROUP BY CheckName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": " Faliure kuberhealthy checks",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 20,
+ "gradientMode": "scheme",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "smooth",
+ "lineWidth": 3,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 25
+ },
+ "id": 10,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "Select LastRun, OK,Errors\nfrom default.kuberhealthy\nwhere CheckName LIKE '%pod-restarts%' AND $__timeFilter(LastRun)",
+ "refId": "A"
+ }
+ ],
+ "title": "Kuberhealthy Pod Restarts Status Over Time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "hue",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "smooth",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 2,
+ "pointSize": 10,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "OK"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 25
+ },
+ "id": 8,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "Select LastRun, OK\nfrom default.kuberhealthy\nwhere $__timeFilter(LastRun)",
+ "refId": "A"
+ }
+ ],
+ "title": "Kuberhealthy Status Over Time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "count()"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 33
+ },
+ "id": 7,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "Select LastRun, count(*)\nfrom default.kuberhealthy\nwhere $__timeFilter(LastRun)\nGroup By LastRun",
+ "refId": "A"
+ }
+ ],
+ "title": "Kuberhealthy Counts Over Time.",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 20,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "smooth",
+ "lineWidth": 3,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 33
+ },
+ "id": 12,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "Select LastRun, OK,Errors\nfrom default.kuberhealthy\nwhere CheckName LIKE '%image-pull-check%' AND $__timeFilter(LastRun)",
+ "refId": "A"
+ }
+ ],
+ "title": "Image Pull Check",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 20,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "smooth",
+ "lineWidth": 5,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 41
+ },
+ "id": 13,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "Select LastRun, OK,Errors\nfrom default.kuberhealthy\nwhere CheckName LIKE '%resource-quota%' AND $__timeFilter(LastRun)",
+ "refId": "A"
+ }
+ ],
+ "title": "Resource-quata check",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 41
+ },
+ "id": 6,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "Select LastRun, OK,Errors\nfrom default.kuberhealthy\nwhere $__timeFilter(LastRun) AND OK='0'\nOrder BY LastRun DESC",
+ "refId": "A"
+ }
+ ],
+ "title": "Kuberhealthy Errors Over Time",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 49
+ },
+ "id": 3,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true,
+ "sortBy": []
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "SELECT * FROM \"default\".\"kuberhealthy\"\nWhere $__timeFilter(LastRun)\nOrder By LastRun DESC \n\n",
+ "refId": "A"
+ }
+ ],
+ "title": "Kuberhealthy Tables",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 57
+ },
+ "id": 16,
+ "options": {
+ "displayMode": "gradient",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "SELECT CheckName, count(OK) AS counts\nFROM default.kuberhealthy\nWHERE OK = '1'\nGROUP BY CheckName",
+ "refId": "A"
+ }
+ ],
+ "title": "Success Kuberhealthy Checks",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 11,
+ "x": 12,
+ "y": 57
+ },
+ "id": 2,
+ "options": {
+ "displayMode": "gradient",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "SELECT CheckName, count(OK) AS counts\nFROM default.kuberhealthy\nWHERE OK = '0'\nGROUP BY CheckName",
+ "refId": "A"
+ }
+ ],
+ "title": "Faliure Kuberhealthy Checks",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
+ },
+ {
+ "color": "red",
+ "value": 85
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 65
+ },
+ "id": 4,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "SELECT count(OK)\nFROM default.kuberhealthy\nWHERE OK = '1'",
+ "refId": "A"
+ }
+ ],
+ "title": "Total count Kuberhealthy Running",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
+ },
+ {
+ "color": "red",
+ "value": 85
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 65
+ },
+ "id": 5,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "SELECT count(OK)\nFROM default.kuberhealthy\nWHERE OK = '0'",
+ "refId": "A"
+ }
+ ],
+ "title": "Total Count Kuberhealthy Faliures",
+ "type": "gauge"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "KuberHealth",
+ "uid": "d946c53c-8b1d-4e3c-9154-4219165342",
+ "version": 2,
+ "weekStart": ""
+ }
+
+{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/configmap-kubescore-dashboard.yaml b/charts/client/templates/configmap-kubescore-dashboard.yaml
index 2f80dd51..398a44c9 100644
--- a/charts/client/templates/configmap-kubescore-dashboard.yaml
+++ b/charts/client/templates/configmap-kubescore-dashboard.yaml
@@ -3,6 +3,8 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "client.fullname" . }}-kubescore-dashboard
+ annotations:
+ grafana_folder: "Kubviz"
labels:
{{ .Values.dashboards.label }}: {{ .Values.dashboards.labelValue | quote }}
data:
@@ -30,7 +32,7 @@ data:
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
- "id": 10,
+ "id": 5,
"links": [],
"liveNow": false,
"panels": [
@@ -47,8 +49,11 @@ data:
},
"custom": {
"align": "center",
- "displayMode": "color-text",
- "filterable": true
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
},
"mappings": [],
"thresholds": {
@@ -86,7 +91,7 @@ data:
},
"showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -98,34 +103,33 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.kubescore",
- "rawQuery": "SELECT * FROM default.kubescore",
+ "query": "SELECT * FROM default.kubescore\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.kubescore\nWHERE EventTime >= toDateTime(1694245574) AND EventTime <= toDateTime(1694267174)\nORDER BY EventTime DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "KubeScore",
- "transparent": true,
"type": "table"
}
],
"refresh": "",
- "schemaVersion": 35,
+ "schemaVersion": 38,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
- "from": "now-6h",
+ "from": "now-24h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "KubeScore",
"uid": "d8f0fceb-7621-45bc-9710-89e11fe57a79",
- "version": 2,
+ "version": 1,
"weekStart": ""
}
{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/configmap-kubviz-dashboard.yaml b/charts/client/templates/configmap-kubviz-dashboard.yaml
index 888a7ec0..206fc3d8 100644
--- a/charts/client/templates/configmap-kubviz-dashboard.yaml
+++ b/charts/client/templates/configmap-kubviz-dashboard.yaml
@@ -3,6 +3,8 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "client.fullname" . }}-kubviz-dashboard
+ annotations:
+ grafana_folder: "Kubviz"
labels:
{{ .Values.dashboards.label }}: {{ .Values.dashboards.labelValue | quote }}
data:
@@ -30,7 +32,7 @@ data:
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
- "id": 5,
+ "id": 64,
"links": [],
"liveNow": false,
"panels": [
@@ -47,8 +49,12 @@ data:
},
"custom": {
"align": "center",
- "displayMode": "lcd-gauge",
- "filterable": true
+ "cellOptions": {
+ "mode": "lcd",
+ "type": "gauge"
+ },
+ "filterable": true,
+ "inspect": false
},
"mappings": [],
"thresholds": {
@@ -86,7 +92,7 @@ data:
},
"showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -98,15 +104,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT DISTINCT ClusterName from(SELECT distinct ClusterName FROM default.DeletedAPIs\nUNION ALL\nSELECT DISTINCT ClusterName FROM default.DeprecatedAPIs\n UNION ALL\n SELECT DISTINCT ClusterName FROM default.events\n UNION ALL\n SELECT DISTINCT ClusterName FROM default.getall_resources\n UNION ALL\n SELECT DISTINCT ClusterName FROM default.outdated_images)",
- "rawQuery": "SELECT DISTINCT ClusterName from(SELECT distinct ClusterName FROM default.DeletedAPIs\nUNION ALL\nSELECT DISTINCT ClusterName FROM default.DeprecatedAPIs\n UNION ALL\n SELECT DISTINCT ClusterName FROM default.events\n UNION ALL\n SELECT DISTINCT ClusterName FROM default.getall_resources\n UNION ALL\n SELECT DISTINCT ClusterName FROM default.outdated_images)",
+ "query": "SELECT DISTINCT ClusterName FROM default.events",
+ "rawQuery": "SELECT DISTINCT ClusterName FROM default.events",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Monitoring Clusters",
- "transparent": true,
"type": "table"
},
{
@@ -120,13 +125,7 @@ data:
"color": {
"mode": "thresholds"
},
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -163,7 +162,7 @@ data:
"showThresholdLabels": false,
"showThresholdMarkers": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -175,15 +174,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT ClusterName,count(*) AS Events From default.events\nGROUP BY ClusterName\nORDER BY Events DESC\nLIMIT 1",
- "rawQuery": "SELECT ClusterName,count(*) AS Events From default.events\nGROUP BY ClusterName\nORDER BY Events DESC\nLIMIT 1",
+ "query": "SELECT ClusterName,count(*) AS Events From default.events\nWHERE $timeFilterByColumn(EventTime) \nGROUP BY ClusterName\nORDER BY Events DESC\nLIMIT 1",
+ "rawQuery": "SELECT ClusterName,count(*) AS Events From default.events\nWHERE EventTime >= toDateTime(1693460315) AND EventTime <= toDateTime(1693460615) \nGROUP BY ClusterName\nORDER BY Events DESC\nLIMIT 1",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Clusters with Most Activity",
- "transparent": true,
"type": "gauge"
},
{
@@ -191,22 +189,176 @@ data:
"type": "vertamedia-clickhouse-datasource",
"uid": "{{ .Values.datasources.uid }}"
},
- "description": "This panel displays the total count of outdated images from all cluster.",
+ "gridPos": {
+ "h": 9,
+ "w": 13,
+ "x": 0,
+ "y": 6
+ },
+ "id": 72,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "// Check if context.panel.data.series exists\nif (context.panel.data.series && context.panel.data.series.length > 0) {\n const reasons = context.panel.data.series[0].fields[0].values;\n const kinds = context.panel.data.series[0].fields[1].values;\n const eventTimes = context.panel.data.series[0].fields[2].values;\n\n // Create nodes and links\n const nodes = [];\n const links = [];\n\n reasons.forEach((reason, index) => {\n const sourceNode = {\n name: reason,\n category: 0, // Category for source nodes\n symbolSize: 60, // Size for source nodes\n };\n\n const kindNode = {\n name: kinds[index],\n category: 1, // Category for kind nodes\n symbolSize: 40, // Size for kind nodes\n };\n\n const eventTimeNode = {\n name: eventTimes[index],\n category: 2, // Category for eventTime nodes\n symbolSize: 20, // Size for eventTime nodes\n };\n\n // Ensure source, kind, and eventTime nodes are unique before adding them\n if (!nodes.some((node) => node.name === sourceNode.name)) {\n nodes.push(sourceNode);\n }\n\n if (!nodes.some((node) => node.name === kindNode.name)) {\n nodes.push(kindNode);\n }\n\n if (!nodes.some((node) => node.name === eventTimeNode.name)) {\n nodes.push(eventTimeNode);\n }\n\n // Create links between reason, kind, and eventTime nodes\n links.push({\n source: reason,\n target: kinds[index],\n });\n\n links.push({\n source: kinds[index],\n target: eventTimes[index],\n });\n });\n\n // Create categories for nodes\n const categories = [\n {\n name: 'Reasons',\n },\n {\n name: 'Nodes',\n },\n {\n name: 'Event Times',\n },\n ];\n\n // Create ECharts option\n const option = {\n tooltip: {\n trigger: 'item',\n formatter: '{b}',\n },\n legend: {\n x: 'left',\n data: ['Reasons', 'Nodes', 'Event Times'],\n },\n series: [\n {\n type: 'graph',\n layout: 'circular',\n roam: true,\n label: {\n show: true,\n textStyle: {\n color: '#000',\n },\n },\n force: {\n repulsion: 100,\n gravity: 0.1,\n edgeLength: 150,\n },\n data: nodes,\n links: links,\n draggable: true,\n categories: categories,\n edgeSymbol: [\"none\", \"arrow\"],\n edgeSymbolSize: [12, 12],\n lineStyle: {\n color: \"#000000\",\n curveness: 0,\n opacity: 0.3,\n },\n },\n ],\n };\n\n // Return the ECharts option\n return option;\n} else {\n // Display \"Data not available\" in the panel\n return {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n}\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Reason, Host, EventTime\nFROM default.events\nWHERE Kind IN ('Node') AND Reason IN ('NodeNotReady')",
+ "rawQuery": "SELECT Reason, Host, EventTime\nFROM default.events\nWHERE Kind IN ('Node') AND Reason IN ('NodeNotReady')",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "NodeNotReady Events for Nodes",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 11,
+ "x": 13,
+ "y": 6
+ },
+ "id": 70,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let kind = [];\nlet resources = [];\n\ncontext.panel.data.series.forEach((s) => {\n const kindField = s.fields.find((f) => f.name === 'Kind');\n const resourcesField = s.fields.find((f) => f.name === 'Resources');\n if (kindField && resourcesField) {\n kind = kindField.values;\n resources = resourcesField.values;\n }\n});\n\n// Create an empty array to store doughnut chart data\nconst doughnutChartData = [];\n\n// Define colors for doughnut slices\nconst doughnutSliceColors = ['#235894', '#FFFF00', '#FF0000', '#00FF00', '#FFA500'];\n\n// Map kind and resources counts to doughnut chart data\nkind.forEach((kinddata, index) => {\n doughnutChartData.push({\n value: resources[index],\n name: kinddata,\n clusterName: context.panel.data.series[0].fields[0].values[index], // Extract cluster name\n itemStyle: {\n borderRadius: [10, 10, 10, 10], // Add rounded corners\n color: doughnutSliceColors[index % doughnutSliceColors.length],\n borderWidth: 2,\n borderColor: '#fff',\n },\n });\n});\n\nreturn {\n backgroundColor: '#FFFFFF', // Set the background color to white\n tooltip: {\n trigger: 'item',\n formatter: function (params) {\n return `Resource From
${params.data.clusterName} ${params.value}`;\n },\n },\n legend: {\n top: '5%',\n left: 'center',\n },\n series: [\n {\n name: '',\n type: 'pie',\n radius: ['40%', '70%'],\n avoidLabelOverlap: false,\n label: {\n show: false,\n position: 'center',\n },\n emphasis: {\n label: {\n show: true,\n fontSize: 40,\n fontWeight: 'bold',\n },\n },\n labelLine: {\n show: false,\n },\n data: doughnutChartData, // Use the modified doughnut chart data\n },\n ],\n};\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Kind, count(Resource) AS Resources\nFROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime) AND Kind In ('Deployment','Job','Secret', 'ConfigMap', 'Service')\nGROUP BY ClusterName, Kind",
+ "rawQuery": "SELECT ClusterName, Kind, count(Resource) AS Resources\nFROM default.getall_resources\nWHERE EventTime >= toDateTime(1698841487) AND EventTime <= toDateTime(1698927887) AND Kind In ('Deployment','Job','Secret', 'ConfigMap', 'Service')\nGROUP BY ClusterName, Kind",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Resource Distribution",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
"fieldConfig": {
"defaults": {
"color": {
- "mode": "thresholds"
+ "mode": "palette-classic"
},
- "links": [
- {
- "targetBlank": true,
- "title": "Outdated Images",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 30,
+ "gradientMode": "hue",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ],
+ },
"mappings": [],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
{
"color": "green",
@@ -221,11 +373,239 @@ data:
},
"overrides": []
},
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 15
+ },
+ "id": 69,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "time_series",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": " SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE $timeFilterByColumn(EventTime) AND Kind = 'Pod' AND Reason = 'Created'\n GROUP BY EventTime, Kind, Reason",
+ "rawQuery": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE EventTime >= toDateTime(1698839664) AND EventTime <= toDateTime(1698926064) AND Kind = 'Pod' AND Reason = 'Created'\n GROUP BY EventTime, Kind, Reason",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "time_series",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "hide": false,
+ "intervalFactor": 1,
+ "query": " SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE $timeFilterByColumn(EventTime) AND Kind = 'Pod' AND Reason = 'BackOff'\n GROUP BY EventTime, Kind, Reason",
+ "rawQuery": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE EventTime >= toDateTime(1698839664) AND EventTime <= toDateTime(1698926064) AND Kind = 'Pod' AND Reason = 'BackOff'\n GROUP BY EventTime, Kind, Reason",
+ "refId": "B",
+ "round": "0s",
+ "skip_comments": true
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "time_series",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "hide": false,
+ "intervalFactor": 1,
+ "query": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE $timeFilterByColumn(EventTime) AND Kind = 'Pod' AND Reason = 'NodeNotReady'\n GROUP BY EventTime, Kind, Reason",
+ "rawQuery": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE EventTime >= toDateTime(1698839664) AND EventTime <= toDateTime(1698926064) AND Kind = 'Pod' AND Reason = 'NodeNotReady'\n GROUP BY EventTime, Kind, Reason",
+ "refId": "C",
+ "round": "0s",
+ "skip_comments": true
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "time_series",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "hide": false,
+ "intervalFactor": 1,
+ "query": " SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE $timeFilterByColumn(EventTime) AND Kind = 'Pod' AND Reason = 'Scheduled'\n GROUP BY EventTime, Kind, Reason",
+ "rawQuery": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE EventTime >= toDateTime(1698839664) AND EventTime <= toDateTime(1698926064) AND Kind = 'Pod' AND Reason = 'Scheduled'\n GROUP BY EventTime, Kind, Reason",
+ "refId": "D",
+ "round": "0s",
+ "skip_comments": true
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "time_series",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "hide": false,
+ "intervalFactor": 1,
+ "query": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE $timeFilterByColumn(EventTime) AND Kind = 'Pod' AND Reason = 'FailedScheduling'\n GROUP BY EventTime, Kind, Reason",
+ "rawQuery": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE EventTime >= toDateTime(1698839664) AND EventTime <= toDateTime(1698926064) AND Kind = 'Pod' AND Reason = 'FailedScheduling'\n GROUP BY EventTime, Kind, Reason",
+ "refId": "E",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Pod Events by Reason Over Time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Age"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "lcd",
+ "type": "gauge"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 15
+ },
+ "id": 71,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Resource, Age\nFROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT ClusterName, Resource, Age\nFROM default.getall_resources\nWHERE EventTime >= toDateTime(1696336208) AND EventTime <= toDateTime(1698928208)",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Resource Details by Cluster and Age",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel displays the total count of outdated images from all cluster.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "links": [],
+ "mappings": [],
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
"gridPos": {
"h": 4,
"w": 5,
"x": 0,
- "y": 6
+ "y": 22
},
"id": 18,
"options": {
@@ -240,7 +620,7 @@ data:
"showThresholdLabels": false,
"showThresholdMarkers": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -252,15 +632,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.outdated_images\nWHERE VersionsBehind > 0",
- "rawQuery": "SELECT count(*) FROM default.outdated_images\nWHERE VersionsBehind > 0",
+ "query": "SELECT count(*)\n\nFROM default.outdated_images\n\nWHERE $timeFilterByColumn(EventTime) AND VersionsBehind > 0",
+ "rawQuery": "SELECT count(*)\n\nFROM default.outdated_images\n\nWHERE EventTime >= toDateTime(1695283225) AND EventTime <= toDateTime(1695369625) AND VersionsBehind > 0",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Number of Outdated Images Across All Clusters",
- "transparent": true,
"type": "gauge"
},
{
@@ -274,20 +653,13 @@ data:
"color": {
"mode": "thresholds"
},
- "links": [
- {
- "targetBlank": true,
- "title": "Kubedata",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1689917173495&to=1689918073495"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -302,7 +674,7 @@ data:
"h": 4,
"w": 5,
"x": 5,
- "y": 6
+ "y": 22
},
"id": 20,
"options": {
@@ -317,7 +689,7 @@ data:
"showThresholdLabels": false,
"showThresholdMarkers": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -329,15 +701,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.events",
- "rawQuery": "SELECT count(*) FROM default.events",
+ "query": "SELECT count(*) FROM default.events\nWHERE $timeFilterByColumn(EventTime) ",
+ "rawQuery": "SELECT count(*) FROM default.events\nWHERE EventTime >= toDateTime(1693460292) AND EventTime <= toDateTime(1693460592)",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Number of Events across all Clusters",
- "transparent": true,
"type": "gauge"
},
{
@@ -351,20 +722,13 @@ data:
"color": {
"mode": "thresholds"
},
- "links": [
- {
- "targetBlank": true,
- "title": "DeletedAPIs",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -379,7 +743,7 @@ data:
"h": 4,
"w": 5,
"x": 10,
- "y": 6
+ "y": 22
},
"id": 22,
"options": {
@@ -394,7 +758,7 @@ data:
"showThresholdLabels": false,
"showThresholdMarkers": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -406,15 +770,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.DeletedAPIs",
- "rawQuery": "SELECT count(*) FROM default.DeletedAPIs",
+ "query": "SELECT count(*) FROM default.DeletedAPIs\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT count(*) FROM default.DeletedAPIs\nWHERE EventTime >= toDateTime(1695367863) AND EventTime <= toDateTime(1695369663)",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Number of DeletedAPIs across all Clusters",
- "transparent": true,
"type": "gauge"
},
{
@@ -428,20 +791,13 @@ data:
"color": {
"mode": "thresholds"
},
- "links": [
- {
- "targetBlank": true,
- "title": "DeprecatedAPIs",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -456,7 +812,7 @@ data:
"h": 4,
"w": 5,
"x": 15,
- "y": 6
+ "y": 22
},
"id": 24,
"options": {
@@ -471,7 +827,7 @@ data:
"showThresholdLabels": false,
"showThresholdMarkers": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -483,15 +839,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.DeprecatedAPIs",
- "rawQuery": "SELECT count(*) FROM default.DeprecatedAPIs",
+ "query": "SELECT count(*) FROM default.DeprecatedAPIs\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT count(*) FROM default.DeprecatedAPIs\nWHERE EventTime >= toDateTime(1695367882) AND EventTime <= toDateTime(1695369682)",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Number of DeprecatedAPIs across all Clusters",
- "transparent": true,
"type": "gauge"
},
{
@@ -505,20 +860,13 @@ data:
"color": {
"mode": "thresholds"
},
- "links": [
- {
- "targetBlank": true,
- "title": "Kubernetes Resources",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -533,7 +881,7 @@ data:
"h": 4,
"w": 4,
"x": 20,
- "y": 6
+ "y": 22
},
"id": 26,
"options": {
@@ -548,7 +896,7 @@ data:
"showThresholdLabels": false,
"showThresholdMarkers": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -560,17 +908,219 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.getall_resources",
- "rawQuery": "SELECT count(*) FROM default.getall_resources",
+ "query": "SELECT count(*) FROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT count(*) FROM default.getall_resources\nWHERE EventTime >= toDateTime(1695367909) AND EventTime <= toDateTime(1695369709)",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Number of k8s_Resources across all Clusters",
- "transparent": true,
"type": "gauge"
},
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "description": "This panel provides a time-based analysis of the occurrences of 'Pod'",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisGridShow": true,
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "always",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 13,
+ "x": 0,
+ "y": 26
+ },
+ "id": 53,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "format": 0,
+ "meta": {
+ "builderOptions": {
+ "fields": [],
+ "limit": 100,
+ "mode": "list"
+ }
+ },
+ "queryType": "sql",
+ "rawSql": "SELECT EventTime, COUNT(*) AS Pods\nFROM default.events\nWHERE Kind = 'Pod'\nGROUP BY EventTime;\n",
+ "refId": "A",
+ "selectedFormat": 0
+ }
+ ],
+ "title": "Number of Pods over time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "description": "This panel provides a time-based analysis of the occurrences of 'Node'",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 11,
+ "x": 13,
+ "y": 26
+ },
+ "id": 65,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "format": 0,
+ "meta": {
+ "builderOptions": {
+ "fields": [],
+ "limit": 100,
+ "mode": "list"
+ }
+ },
+ "queryType": "sql",
+ "rawSql": "SELECT EventTime, COUNT(*) AS Nodes\nFROM default.events\nWHERE Kind = 'Node'\nGROUP BY EventTime;\n",
+ "refId": "A",
+ "selectedFormat": 0
+ }
+ ],
+ "title": "Number of Nodes over Time",
+ "type": "timeseries"
+ },
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
@@ -583,20 +1133,13 @@ data:
"fixedColor": "#249b6a",
"mode": "fixed"
},
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -611,7 +1154,7 @@ data:
"h": 4,
"w": 6,
"x": 0,
- "y": 10
+ "y": 32
},
"id": 57,
"options": {
@@ -628,7 +1171,7 @@ data:
},
"textMode": "value"
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -640,8 +1183,8 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Pod'",
- "rawQuery": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Pod'",
+ "query": "SELECT count(*) FROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN 'Pod'",
+ "rawQuery": "SELECT count(*) FROM default.events\nWHERE EventTime >= toDateTime(1693438853) AND EventTime <= toDateTime(1693460453) AND Kind IN 'Pod'",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -658,20 +1201,13 @@ data:
"description": "This panel displays the total number of pods with Created state.",
"fieldConfig": {
"defaults": {
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "orange",
@@ -690,7 +1226,7 @@ data:
"h": 4,
"w": 6,
"x": 6,
- "y": 10
+ "y": 32
},
"id": 63,
"options": {
@@ -705,7 +1241,7 @@ data:
"showThresholdLabels": false,
"showThresholdMarkers": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -717,15 +1253,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Pod' AND Reason IN 'Created'",
- "rawQuery": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Pod' AND Reason IN 'Created'",
+ "query": "SELECT count(*) FROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN 'Pod' AND Reason IN 'Created'",
+ "rawQuery": "SELECT count(*) FROM default.events\nWHERE EventTime >= toDateTime(1693460172) AND EventTime <= toDateTime(1693460472) AND Kind IN 'Pod' AND Reason IN 'Created'",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Number of pods with Created state",
- "transparent": true,
"type": "gauge"
},
{
@@ -736,20 +1271,13 @@ data:
"description": "This panel displays the total number of pods with backOff state.",
"fieldConfig": {
"defaults": {
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "orange",
@@ -768,7 +1296,7 @@ data:
"h": 4,
"w": 6,
"x": 12,
- "y": 10
+ "y": 32
},
"id": 61,
"options": {
@@ -783,7 +1311,7 @@ data:
"showThresholdLabels": false,
"showThresholdMarkers": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -795,15 +1323,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Pod' AND Reason IN 'BackOff'",
- "rawQuery": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Pod' AND Reason IN 'BackOff'",
+ "query": "SELECT count(*) FROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN 'Pod' AND Reason IN 'BackOff'",
+ "rawQuery": "SELECT count(*) FROM default.events\nWHERE EventTime >= toDateTime(1693460182) AND EventTime <= toDateTime(1693460482) AND Kind IN 'Pod' AND Reason IN 'BackOff'",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Number of pods with BackOff state",
- "transparent": true,
"type": "gauge"
},
{
@@ -813,21 +1340,14 @@ data:
},
"description": "This panel displays the total number of pods with Unhealthy state.",
"fieldConfig": {
- "defaults": {
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "defaults": {
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "orange",
@@ -846,7 +1366,7 @@ data:
"h": 4,
"w": 6,
"x": 18,
- "y": 10
+ "y": 32
},
"id": 62,
"options": {
@@ -861,7 +1381,7 @@ data:
"showThresholdLabels": false,
"showThresholdMarkers": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -873,15 +1393,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Pod' AND Reason IN 'Unhealthy'",
- "rawQuery": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Pod' AND Reason IN 'Unhealthy'",
+ "query": "SELECT count(*) FROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN 'Pod' AND Reason IN 'Unhealthy'",
+ "rawQuery": "SELECT count(*) FROM default.events\nWHERE EventTime >= toDateTime(1693460193) AND EventTime <= toDateTime(1693460493) AND Kind IN 'Pod' AND Reason IN 'Unhealthy'",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Number of pods with Unhealthy state",
- "transparent": true,
"type": "gauge"
},
{
@@ -896,20 +1415,13 @@ data:
"fixedColor": "#249b6a",
"mode": "fixed"
},
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -924,7 +1436,7 @@ data:
"h": 4,
"w": 6,
"x": 0,
- "y": 14
+ "y": 36
},
"id": 56,
"options": {
@@ -941,7 +1453,7 @@ data:
},
"textMode": "value"
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -953,15 +1465,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Node'",
- "rawQuery": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Node'",
+ "query": "SELECT count(*) FROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN 'Node'",
+ "rawQuery": "SELECT count(*) FROM default.events\nWHERE EventTime >= toDateTime(1693438754) AND EventTime <= toDateTime(1693460354) AND Kind IN 'Node'",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Number of Node creation events",
- "transparent": true,
"type": "stat"
},
{
@@ -972,20 +1483,13 @@ data:
"description": "This panel displays the total number of nodes which is in not ready state",
"fieldConfig": {
"defaults": {
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "orange",
@@ -1004,7 +1508,7 @@ data:
"h": 4,
"w": 6,
"x": 6,
- "y": 14
+ "y": 36
},
"id": 58,
"options": {
@@ -1019,7 +1523,7 @@ data:
"showThresholdLabels": false,
"showThresholdMarkers": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -1031,15 +1535,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Node' AND Reason IN 'NodeNotReady'",
- "rawQuery": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Node' AND Reason IN 'NodeNotReady'",
+ "query": "SELECT count(*) FROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN 'Node' AND Reason IN 'NodeNotReady'",
+ "rawQuery": "SELECT count(*) FROM default.events\nWHERE EventTime >= toDateTime(1693438784) AND EventTime <= toDateTime(1693460384) AND Kind IN 'Node' AND Reason IN 'NodeNotReady'",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Number of Nodes with NotReady State",
- "transparent": true,
"type": "gauge"
},
{
@@ -1050,20 +1553,13 @@ data:
"description": "This panel displays the total number of nodes which is in ready state",
"fieldConfig": {
"defaults": {
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "orange",
@@ -1082,7 +1578,7 @@ data:
"h": 4,
"w": 6,
"x": 12,
- "y": 14
+ "y": 36
},
"id": 59,
"options": {
@@ -1097,7 +1593,7 @@ data:
"showThresholdLabels": false,
"showThresholdMarkers": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -1109,15 +1605,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Node' AND Reason IN 'NodeReady'",
- "rawQuery": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Node' AND Reason IN 'NodeReady'",
+ "query": "SELECT count(*) FROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN 'Node' AND Reason IN 'NodeReady'",
+ "rawQuery": "SELECT count(*) FROM default.events\nWHERE EventTime >= toDateTime(1693438825) AND EventTime <= toDateTime(1693460425) AND Kind IN 'Node' AND Reason IN 'NodeReady'",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Number of Nodes with Ready State",
- "transparent": true,
"type": "gauge"
},
{
@@ -1128,20 +1623,13 @@ data:
"description": "This panel displays the total number of nodes which is in NodeHasNoDiskPressure state",
"fieldConfig": {
"defaults": {
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "orange",
@@ -1160,7 +1648,7 @@ data:
"h": 4,
"w": 6,
"x": 18,
- "y": 14
+ "y": 36
},
"id": 60,
"options": {
@@ -1175,7 +1663,7 @@ data:
"showThresholdLabels": false,
"showThresholdMarkers": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -1187,15 +1675,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Node' AND Reason IN 'NodeHasNoDiskPressure'",
- "rawQuery": "SELECT count(*) FROM default.events\nWHERE Kind IN 'Node' AND Reason IN 'NodeHasNoDiskPressure'",
+ "query": "SELECT count(*) FROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN 'Node' AND Reason IN 'NodeHasNoDiskPressure'",
+ "rawQuery": "SELECT count(*) FROM default.events\nWHERE EventTime >= toDateTime(1693438835) AND EventTime <= toDateTime(1693460435) AND Kind IN 'Node' AND Reason IN 'NodeHasNoDiskPressure'",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Number of Nodes with NodeHasNoDiskPressure State",
- "transparent": true,
"type": "gauge"
},
{
@@ -1211,23 +1698,19 @@ data:
},
"custom": {
"align": "center",
- "displayMode": "gradient-gauge",
- "filterable": true
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
},
- "links": [
- {
- "targetBlank": true,
- "title": "Kubedata",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1689917173495&to=1689918073495"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "light-yellow",
@@ -1248,12 +1731,20 @@ data:
{
"matcher": {
"id": "byName",
- "options": "Namespace"
+ "options": "Events"
},
"properties": [
{
"id": "custom.width",
"value": 425
+ },
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "gradient",
+ "type": "gauge",
+ "valueDisplayMode": "text"
+ }
}
]
}
@@ -1263,7 +1754,7 @@ data:
"h": 8,
"w": 24,
"x": 0,
- "y": 18
+ "y": 40
},
"id": 44,
"options": {
@@ -1279,7 +1770,7 @@ data:
"showHeader": true,
"sortBy": []
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -1291,15 +1782,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT ClusterName, Namespace, count(Event) AS Events FROM default.events\nGROUP BY ClusterName,Namespace\nORDER BY Events DESC",
- "rawQuery": "SELECT ClusterName, Namespace, count(Event) AS Events FROM default.events\nGROUP BY ClusterName,Namespace\nORDER BY Events DESC",
+ "query": "SELECT ClusterName, Namespace, count(Event) AS Events FROM default.events\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY ClusterName,Namespace\nORDER BY Events DESC",
+ "rawQuery": "SELECT ClusterName, Namespace, count(Event) AS Events FROM default.events\nWHERE EventTime >= toDateTime(1693460424) AND EventTime <= toDateTime(1693460724)\nGROUP BY ClusterName,Namespace\nORDER BY Events DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Events per cluster by Namespace",
- "transparent": true,
"type": "table"
},
{
@@ -1307,24 +1797,26 @@ data:
"type": "vertamedia-clickhouse-datasource",
"uid": "{{ .Values.datasources.uid }}"
},
- "description": "This panel displays the total number of clusters containing activity uniquely.",
+ "description": "This panel displays the total number of clusters containing DeletedAPIs activity.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
- "links": [],
"mappings": [],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
},
{
"color": "red",
- "value": 80
+ "value": 85
}
]
}
@@ -1333,11 +1825,11 @@ data:
},
"gridPos": {
"h": 5,
- "w": 24,
+ "w": 8,
"x": 0,
- "y": 26
+ "y": 48
},
- "id": 40,
+ "id": 66,
"options": {
"orientation": "auto",
"reduceOptions": {
@@ -1350,7 +1842,7 @@ data:
"showThresholdLabels": false,
"showThresholdMarkers": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -1362,29 +1854,65 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT COUNT(DISTINCT ClusterName) AS DeletedAPIs\nFROM default.DeletedAPIs",
- "rawQuery": "SELECT COUNT(DISTINCT ClusterName) AS DeletedAPIs\nFROM default.DeletedAPIs",
+ "query": "SELECT COUNT(DISTINCT ClusterName) AS DeletedAPIs\nFROM default.DeletedAPIs\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT COUNT(DISTINCT ClusterName) AS DeletedAPIs\nFROM default.DeletedAPIs\nWHERE EventTime >= toDateTime(1695369491) AND EventTime <= toDateTime(1695369791)",
"refId": "A",
"round": "0s",
"skip_comments": true
+ }
+ ],
+ "title": "Number of Clusters Containing DeletedAPIs Activity",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel displays the total number of clusters containing Outdated Images activity.",
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
+ },
+ {
+ "color": "red",
+ "value": 85
+ }
+ ]
+ }
},
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT COUNT(DISTINCT ClusterName) AS DeprecatedAPIs\nFROM default.DeprecatedAPIs",
- "rawQuery": "SELECT COUNT(DISTINCT ClusterName) AS DeprecatedAPIs\nFROM default.DeprecatedAPIs",
- "refId": "B",
- "round": "0s",
- "skip_comments": true
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 48
+ },
+ "id": 68,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
},
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
@@ -1394,14 +1922,66 @@ data:
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
"intervalFactor": 1,
- "query": "SELECT count(DISTINCT(ClusterName)) AS Events\nFROM default.events",
- "rawQuery": "SELECT count(DISTINCT(ClusterName)) AS Events\nFROM default.events",
- "refId": "C",
+ "query": "SELECT count(DISTINCT(ClusterName)) AS OutdatedImages\nFROM default.outdated_images\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT count(DISTINCT(ClusterName)) AS OutdatedImages\nFROM default.outdated_images\nWHERE EventTime >= toDateTime(1695369518) AND EventTime <= toDateTime(1695369818)",
+ "refId": "A",
"round": "0s",
"skip_comments": true
+ }
+ ],
+ "title": "Number of Clusters Containing Outdated Images Activity",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel displays the total number of clusters containing DeprecatedAPIs activity.",
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
+ },
+ {
+ "color": "red",
+ "value": 85
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 48
+ },
+ "id": 67,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
},
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
@@ -1411,17 +1991,15 @@ data:
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
"intervalFactor": 1,
- "query": "SELECT count(DISTINCT(ClusterName)) AS OutdatedImages\nFROM default.outdated_images",
- "rawQuery": "SELECT count(DISTINCT(ClusterName)) AS OutdatedImages\nFROM default.outdated_images",
- "refId": "D",
+ "query": "SELECT COUNT(DISTINCT ClusterName) AS DeprecatedAPIs\nFROM default.DeprecatedAPIs\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT COUNT(DISTINCT ClusterName) AS DeprecatedAPIs\nFROM default.DeprecatedAPIs\nWHERE EventTime >= toDateTime(1695369535) AND EventTime <= toDateTime(1695369835)",
+ "refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Number of Clusters Containing Activity",
- "transparent": true,
+ "title": "Number of Clusters Containing DeprecatedAPIs Activity",
"type": "gauge"
},
{
@@ -1435,20 +2013,21 @@ data:
"color": {
"mode": "continuous-GrYlRd"
},
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "custom": {
+ "align": "auto",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1457,29 +2036,45 @@ data:
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Pods"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "lcd",
+ "type": "gauge",
+ "valueDisplayMode": "text"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
- "y": 31
+ "y": 53
},
"id": 64,
"options": {
- "displayMode": "lcd",
- "minVizHeight": 10,
- "minVizWidth": 0,
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showUnfilled": true,
- "valueMode": "color"
+ "showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -1491,16 +2086,15 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(Kind) AS Pods,ClusterName, Reason FROM default.events\nWHERE Kind IN 'Pod' \nGROUP BY ClusterName,Reason",
- "rawQuery": "SELECT count(Kind) AS Pods,ClusterName, Reason FROM default.events\nWHERE Kind IN 'Pod' \nGROUP BY ClusterName,Reason",
+ "query": "SELECT ClusterName, Reason, count(Kind) AS Pods FROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN 'Pod' \nGROUP BY ClusterName,Reason",
+ "rawQuery": "SELECT ClusterName, Reason, count(Kind) AS Pods FROM default.events\nWHERE EventTime >= toDateTime(1695369559) AND EventTime <= toDateTime(1695369859) AND Kind IN 'Pod' \nGROUP BY ClusterName,Reason",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Count of Pods grouped by Reason",
- "transparent": true,
- "type": "bargauge"
+ "type": "table"
},
{
"datasource": {
@@ -1515,6 +2109,8 @@ data:
"mode": "fixed"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"axisSoftMin": 0,
@@ -1528,22 +2124,18 @@ data:
"lineWidth": 1,
"scaleDistribution": {
"type": "linear"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "links": [
- {
- "targetBlank": true,
- "title": "DeprecatedAPIs",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1558,7 +2150,7 @@ data:
"h": 9,
"w": 12,
"x": 0,
- "y": 39
+ "y": 61
},
"id": 34,
"options": {
@@ -1593,15 +2185,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT ClusterName, count(Deprecated) AS DeprecatedAPIs FROM default.DeprecatedAPIs\nGROUP BY ClusterName",
- "rawQuery": "SELECT ClusterName, count(Deprecated) AS DeprecatedAPIs FROM default.DeprecatedAPIs\nGROUP BY ClusterName",
+ "query": "SELECT ClusterName, count(Deprecated) AS DeprecatedAPIs FROM default.DeprecatedAPIs\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY ClusterName",
+ "rawQuery": "SELECT ClusterName, count(Deprecated) AS DeprecatedAPIs FROM default.DeprecatedAPIs\nWHERE EventTime >= toDateTime(1695369580) AND EventTime <= toDateTime(1695369880)\nGROUP BY ClusterName",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Count of DeprecatedAPIs per cluster",
- "transparent": true,
"type": "barchart"
},
{
@@ -1617,6 +2208,8 @@ data:
"mode": "fixed"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"axisSoftMin": 0,
@@ -1630,22 +2223,18 @@ data:
"lineWidth": 1,
"scaleDistribution": {
"type": "linear"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "links": [
- {
- "targetBlank": true,
- "title": "DeletedAPIs",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1660,7 +2249,7 @@ data:
"h": 9,
"w": 12,
"x": 12,
- "y": 39
+ "y": 61
},
"id": 36,
"options": {
@@ -1695,15 +2284,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT ClusterName, count(Deleted) AS DeletedAPIs FROM default.DeletedAPIs\nGROUP BY ClusterName",
- "rawQuery": "SELECT ClusterName, count(Deleted) AS DeletedAPIs FROM default.DeletedAPIs\nGROUP BY ClusterName",
+ "query": "SELECT ClusterName, count(Deleted) AS DeletedAPIs FROM default.DeletedAPIs\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY ClusterName",
+ "rawQuery": "SELECT ClusterName, count(Deleted) AS DeletedAPIs FROM default.DeletedAPIs\nWHERE EventTime >= toDateTime(1695369607) AND EventTime <= toDateTime(1695369907)\nGROUP BY ClusterName",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Count of DeletedAPIs per cluster",
- "transparent": true,
"type": "barchart"
},
{
@@ -1719,6 +2307,8 @@ data:
"mode": "fixed"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"axisSoftMin": 0,
@@ -1732,22 +2322,18 @@ data:
"lineWidth": 1,
"scaleDistribution": {
"type": "linear"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "links": [
- {
- "targetBlank": true,
- "title": "Outdated Images",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1762,7 +2348,7 @@ data:
"h": 8,
"w": 8,
"x": 0,
- "y": 48
+ "y": 70
},
"id": 28,
"options": {
@@ -1797,15 +2383,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT ClusterName, count(CurrentImage) AS Outdated_Images FROM default.outdated_images\nGROUP BY ClusterName",
- "rawQuery": "SELECT ClusterName, count(CurrentImage) AS Outdated_Images FROM default.outdated_images\nGROUP BY ClusterName",
+ "query": "SELECT ClusterName, count(CurrentImage) AS Outdated_Images FROM default.outdated_images\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY ClusterName",
+ "rawQuery": "SELECT ClusterName, count(CurrentImage) AS Outdated_Images FROM default.outdated_images\nWHERE EventTime >= toDateTime(1695369627) AND EventTime <= toDateTime(1695369927)\nGROUP BY ClusterName",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Count of Outdated Images per cluster",
- "transparent": true,
"type": "barchart"
},
{
@@ -1820,6 +2405,8 @@ data:
"mode": "palette-classic"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"axisSoftMin": 0,
@@ -1833,22 +2420,18 @@ data:
"lineWidth": 1,
"scaleDistribution": {
"type": "linear"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "links": [
- {
- "targetBlank": true,
- "title": "Kubedata",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1689917173495&to=1689918073495"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1863,7 +2446,7 @@ data:
"h": 8,
"w": 8,
"x": 8,
- "y": 48
+ "y": 70
},
"id": 32,
"options": {
@@ -1898,15 +2481,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "select ClusterName,count(Id) AS Events from default.events\ngroup by ClusterName",
- "rawQuery": "select ClusterName,count(Id) AS Events from default.events\ngroup by ClusterName",
+ "query": "select ClusterName,count(Id) AS Events from default.events\nWHERE $timeFilterByColumn(EventTime) \ngroup by ClusterName",
+ "rawQuery": "select ClusterName,count(Id) AS Events from default.events\nWHERE EventTime >= toDateTime(1693460253) AND EventTime <= toDateTime(1693460553) \ngroup by ClusterName",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Count of Events per cluster",
- "transparent": true,
"type": "barchart"
},
{
@@ -1922,6 +2504,8 @@ data:
"mode": "fixed"
},
"custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"axisSoftMin": 0,
@@ -1935,22 +2519,18 @@ data:
"lineWidth": 1,
"scaleDistribution": {
"type": "linear"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
},
- "links": [
- {
- "targetBlank": true,
- "title": "Kubernetes Resources",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1965,7 +2545,7 @@ data:
"h": 8,
"w": 8,
"x": 16,
- "y": 48
+ "y": 70
},
"id": 30,
"options": {
@@ -2000,15 +2580,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT ClusterName, count(Resource) AS Resources FROM default.getall_resources\nGROUP BY ClusterName",
- "rawQuery": "SELECT ClusterName, count(Resource) AS Resources FROM default.getall_resources\nGROUP BY ClusterName",
+ "query": "SELECT ClusterName, count(Resource) AS Resources FROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY ClusterName",
+ "rawQuery": "SELECT ClusterName, count(Resource) AS Resources FROM default.getall_resources\nWHERE EventTime >= toDateTime(1695369653) AND EventTime <= toDateTime(1695369953)\nGROUP BY ClusterName",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "Count of Resources per cluster",
- "transparent": true,
"type": "barchart"
},
{
@@ -2024,23 +2603,19 @@ data:
},
"custom": {
"align": "center",
- "displayMode": "gradient-gauge",
- "filterable": true
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
},
- "links": [
- {
- "targetBlank": true,
- "title": "Kubernetes Resources",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "light-yellow",
@@ -2053,13 +2628,30 @@ data:
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Resources"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "gradient",
+ "type": "gauge",
+ "valueDisplayMode": "text"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 7,
"w": 24,
"x": 0,
- "y": 56
+ "y": 78
},
"id": 42,
"options": {
@@ -2074,7 +2666,7 @@ data:
},
"showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -2086,15 +2678,14 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT ClusterName, Kind, count(Resource) AS Resources FROM default.getall_resources\nGROUP BY ClusterName,Kind\nORDER BY Resources DESC",
- "rawQuery": "SELECT ClusterName, Kind, count(Resource) AS Resources FROM default.getall_resources\nGROUP BY ClusterName,Kind\nORDER BY Resources DESC",
+ "query": "SELECT ClusterName, Kind, count(Resource) AS Resources FROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY ClusterName,Kind\nORDER BY Resources DESC",
+ "rawQuery": "SELECT ClusterName, Kind, count(Resource) AS Resources FROM default.getall_resources\nWHERE EventTime >= toDateTime(1695369674) AND EventTime <= toDateTime(1695369974)\nGROUP BY ClusterName,Kind\nORDER BY Resources DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": " Resources per Cluster by Kind",
- "transparent": true,
"type": "table"
},
{
@@ -2107,7 +2698,7 @@ data:
"h": 1,
"w": 24,
"x": 0,
- "y": 63
+ "y": 85
},
"id": 16,
"panels": [
@@ -2150,11 +2741,13 @@ data:
"h": 16,
"w": 24,
"x": 0,
- "y": 217
+ "y": 110
},
"id": 14,
"options": {
+ "cellHeight": "sm",
"footer": {
+ "countRows": false,
"fields": "",
"reducer": [
"sum"
@@ -2163,7 +2756,7 @@ data:
},
"showHeader": true
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -2175,8 +2768,8 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.getall_resources",
- "rawQuery": "SELECT * FROM default.getall_resources",
+ "query": "SELECT * FROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT * FROM default.getall_resources\nWHERE EventTime >= toDateTime(1695369699) AND EventTime <= toDateTime(1695369999)",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -2209,7 +2802,7 @@ data:
"h": 1,
"w": 24,
"x": 0,
- "y": 64
+ "y": 86
},
"id": 12,
"panels": [
@@ -2251,11 +2844,13 @@ data:
"h": 16,
"w": 24,
"x": 0,
- "y": 218
+ "y": 127
},
"id": 10,
"options": {
+ "cellHeight": "sm",
"footer": {
+ "countRows": false,
"fields": "",
"reducer": [
"sum"
@@ -2264,7 +2859,7 @@ data:
},
"showHeader": true
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -2276,8 +2871,8 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.outdated_images\nWHERE VersionsBehind > 0",
- "rawQuery": "SELECT * FROM default.outdated_images\nWHERE VersionsBehind > 0",
+ "query": "SELECT * FROM default.outdated_images\nWHERE $timeFilterByColumn(EventTime) AND VersionsBehind > 0",
+ "rawQuery": "SELECT * FROM default.outdated_images\nWHERE EventTime >= toDateTime(1695369722) AND EventTime <= toDateTime(1695370022) AND VersionsBehind > 0",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -2310,7 +2905,7 @@ data:
"h": 1,
"w": 24,
"x": 0,
- "y": 65
+ "y": 87
},
"id": 8,
"panels": [
@@ -2353,11 +2948,13 @@ data:
"h": 11,
"w": 24,
"x": 0,
- "y": 219
+ "y": 112
},
"id": 6,
"options": {
+ "cellHeight": "sm",
"footer": {
+ "countRows": false,
"fields": "",
"reducer": [
"sum"
@@ -2366,7 +2963,7 @@ data:
},
"showHeader": true
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -2378,8 +2975,8 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.DeletedAPIs",
- "rawQuery": "SELECT * FROM default.DeletedAPIs",
+ "query": "SELECT * FROM default.DeletedAPIs\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT * FROM default.DeletedAPIs\nWHERE EventTime >= toDateTime(1695369749) AND EventTime <= toDateTime(1695370049)",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -2412,7 +3009,7 @@ data:
"h": 1,
"w": 24,
"x": 0,
- "y": 66
+ "y": 88
},
"id": 4,
"panels": [
@@ -2455,11 +3052,13 @@ data:
"h": 8,
"w": 24,
"x": 0,
- "y": 220
+ "y": 113
},
"id": 2,
"options": {
+ "cellHeight": "sm",
"footer": {
+ "countRows": false,
"fields": "",
"reducer": [
"sum"
@@ -2468,7 +3067,7 @@ data:
},
"showHeader": true
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -2480,8 +3079,8 @@ data:
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.DeprecatedAPIs",
- "rawQuery": "SELECT * FROM default.DeprecatedAPIs",
+ "query": "SELECT * FROM default.DeprecatedAPIs\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT * FROM default.DeprecatedAPIs\nWHERE EventTime >= toDateTime(1695369773) AND EventTime <= toDateTime(1695370073)",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -2506,21 +3105,22 @@ data:
}
],
"refresh": "",
- "schemaVersion": 35,
+ "schemaVersion": 38,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
- "from": "now-6h",
+ "from": "now-24h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Kubviz Dashboard",
"uid": "eT4fox94z",
- "version": 8,
+ "version": 4,
"weekStart": ""
}
+
{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/configmap-trivy-dashboard.yaml b/charts/client/templates/configmap-trivy-dashboard.yaml
index 9e3c1921..fee0c1f4 100644
--- a/charts/client/templates/configmap-trivy-dashboard.yaml
+++ b/charts/client/templates/configmap-trivy-dashboard.yaml
@@ -3,6 +3,8 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "client.fullname" . }}-trivy-dashboard
+ annotations:
+ grafana_folder: "Kubviz"
labels:
{{ .Values.dashboards.label }}: {{ .Values.dashboards.labelValue | quote }}
data:
@@ -30,20 +32,37 @@ data:
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
- "id": 6,
+ "id": 71,
"links": [],
"liveNow": false,
"panels": [
+ {
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 49,
+ "title": "Image Vulnerability and SBOM",
+ "type": "row"
+ },
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "{{ .Values.datasources.uid }}"
},
- "description": "This panel illustrates the distribution of vulnerability severities across different clusters. It provides an overview of the count of vulnerabilities categorized by severity levels within each cluster.",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "continuous-GrYlRd"
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "inspect": false
},
"mappings": [],
"thresholds": {
@@ -53,33 +72,67 @@ data:
"color": "green",
"value": null
},
+ {
+ "color": "semi-dark-yellow",
+ "value": 10
+ },
+ {
+ "color": "orange",
+ "value": 25
+ },
{
"color": "red",
- "value": 80
+ "value": 50
+ },
+ {
+ "color": "semi-dark-red",
+ "value": 100
+ },
+ {
+ "color": "dark-red",
+ "value": 1000
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Counts"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "lcd",
+ "type": "gauge"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
- "y": 0
+ "y": 1
},
- "id": 20,
+ "id": 47,
"options": {
- "displayMode": "gradient",
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showUnfilled": true
+ "showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -87,31 +140,37 @@ data:
"uid": "{{ .Values.datasources.uid }}"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, vul_severity, count(*) \nFROM default.trivy_vul\nGROUP BY cluster_name, vul_severity",
- "rawQuery": "SELECT cluster_name, vul_severity, count(*) \nFROM default.trivy_vul\nGROUP BY cluster_name, vul_severity",
+ "query": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'LOW'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
+ "rawQuery": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162544) AND EventTime <= toDateTime(1713248944) AND vul_severity = 'LOW'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Vulnerability Severity counts grouped by Cluster",
- "transparent": true,
- "type": "bargauge"
+ "title": "Highest Vulnerability Images with Low Severity",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "{{ .Values.datasources.uid }}"
},
- "description": "This panel illustrates the distribution of misconfiguration severities across different clusters. It provides an overview of the count of misconfigurations categorized by severity levels within each cluster. ",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "continuous-GrYlRd"
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "inspect": false
},
"mappings": [],
"thresholds": {
@@ -121,33 +180,67 @@ data:
"color": "green",
"value": null
},
+ {
+ "color": "semi-dark-yellow",
+ "value": 10
+ },
+ {
+ "color": "orange",
+ "value": 25
+ },
{
"color": "red",
- "value": 80
+ "value": 50
+ },
+ {
+ "color": "semi-dark-red",
+ "value": 100
+ },
+ {
+ "color": "dark-red",
+ "value": 1000
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Counts"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "lcd",
+ "type": "gauge"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 0
+ "y": 1
},
- "id": 22,
+ "id": 48,
"options": {
- "displayMode": "gradient",
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showUnfilled": true
+ "showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -155,68 +248,107 @@ data:
"uid": "{{ .Values.datasources.uid }}"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, misconfig_severity, count(*)\nFROM default.trivy_misconfig\nGROUP BY cluster_name, misconfig_severity",
- "rawQuery": "SELECT cluster_name, misconfig_severity, count(*)\nFROM default.trivy_misconfig\nGROUP BY cluster_name, misconfig_severity",
+ "query": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'HIGH'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
+ "rawQuery": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162563) AND EventTime <= toDateTime(1713248963) AND vul_severity = 'HIGH'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Misconfiguration Severity counts grouped by Cluster",
- "transparent": true,
- "type": "bargauge"
+ "title": "Highest Vulnerability Images with High Severity",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "{{ .Values.datasources.uid }}"
},
- "description": "This panel displays the total of misconfigurations from each clusters.",
"fieldConfig": {
"defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "inspect": false
+ },
"mappings": [],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
+ {
+ "color": "semi-dark-yellow",
+ "value": 10
+ },
{
"color": "orange",
- "value": 70
+ "value": 25
},
{
"color": "red",
- "value": 85
+ "value": 50
+ },
+ {
+ "color": "semi-dark-red",
+ "value": 100
+ },
+ {
+ "color": "dark-red",
+ "value": 1000
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Counts"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "lcd",
+ "type": "gauge"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
- "h": 5,
+ "h": 8,
"w": 12,
"x": 0,
- "y": 8
+ "y": 9
},
- "id": 16,
+ "id": 45,
"options": {
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showThresholdLabels": false,
- "showThresholdMarkers": true
+ "showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -224,68 +356,107 @@ data:
"uid": "{{ .Values.datasources.uid }}"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, count(*) FROM default.trivy_misconfig\nGROUP BY cluster_name",
- "rawQuery": "SELECT cluster_name, count(*) FROM default.trivy_misconfig\nGROUP BY cluster_name",
+ "query": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'MEDIUM'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
+ "rawQuery": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162505) AND EventTime <= toDateTime(1713248905) AND vul_severity = 'MEDIUM'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Misconfiguration Count by Cluster",
- "transparent": true,
- "type": "gauge"
+ "title": "Highest Vulnerability Images with Medium Severity",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "{{ .Values.datasources.uid }}"
},
- "description": "This panel displays the total number Vulnerabilities under each namespace",
"fieldConfig": {
"defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "inspect": false
+ },
"mappings": [],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
+ {
+ "color": "semi-dark-yellow",
+ "value": 10
+ },
{
"color": "orange",
- "value": 70
+ "value": 25
},
{
"color": "red",
- "value": 85
+ "value": 50
+ },
+ {
+ "color": "semi-dark-red",
+ "value": 100
+ },
+ {
+ "color": "dark-red",
+ "value": 1000
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Counts"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "lcd",
+ "type": "gauge"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
- "h": 5,
+ "h": 8,
"w": 12,
"x": 12,
- "y": 8
+ "y": 9
},
- "id": 18,
+ "id": 46,
"options": {
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showThresholdLabels": false,
- "showThresholdMarkers": true
+ "showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -293,67 +464,88 @@ data:
"uid": "{{ .Values.datasources.uid }}"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, count(*) FROM default.trivy_vul\nGROUP BY cluster_name",
- "rawQuery": "SELECT cluster_name, count(*) FROM default.trivy_vul\nGROUP BY cluster_name",
+ "query": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'CRITICAL'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
+ "rawQuery": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162524) AND EventTime <= toDateTime(1713248924) AND vul_severity = 'CRITICAL'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Vulnerability Count by Cluster",
- "transparent": true,
- "type": "gauge"
+ "title": "Highest Vulnerability Images with Critical Severity",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "{{ .Values.datasources.uid }}"
},
- "description": "This panel provides a count of critical vulnerabilities categorized by namespace. It helps to monitor and prioritize critical security issues across different namespaces.",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "continuous-GrYlRd"
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "filterable": true,
+ "inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
+ "color": "light-blue",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "vul_severity"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "basic",
+ "type": "color-background"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
- "h": 7,
+ "h": 8,
"w": 12,
"x": 0,
- "y": 13
+ "y": 17
},
- "id": 12,
+ "id": 43,
"options": {
- "displayMode": "basic",
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showUnfilled": true
+ "showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -361,31 +553,38 @@ data:
"uid": "{{ .Values.datasources.uid }}"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, namespace, count(vul_severity) AS Critical_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace\n",
- "rawQuery": "SELECT cluster_name, namespace, count(vul_severity) AS Critical_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace",
+ "query": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'LOW'",
+ "rawQuery": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162449) AND EventTime <= toDateTime(1713248849) AND vul_severity = 'LOW'",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Critical Vulnerability Count by Namespace and ClusterName",
- "transparent": true,
- "type": "bargauge"
+ "title": "Low Vulnerability Images",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "{{ .Values.datasources.uid }}"
},
- "description": "This panel provides a count of critical misconfigurations categorized by namespace. It helps to monitor and prioritize critical security issues across different namespaces.",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "continuous-GrYlRd"
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "filterable": true,
+ "inspect": false
},
"mappings": [],
"thresholds": {
@@ -394,34 +593,48 @@ data:
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "vul_severity"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "basic",
+ "type": "color-background"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
- "h": 7,
+ "h": 8,
"w": 12,
"x": 12,
- "y": 13
+ "y": 17
},
- "id": 14,
+ "id": 44,
"options": {
- "displayMode": "basic",
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showUnfilled": true
+ "showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -429,71 +642,88 @@ data:
"uid": "{{ .Values.datasources.uid }}"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "interval": "",
"intervalFactor": 1,
- "query": "SELECT cluster_name, namespace, count(misconfig_severity) AS Critical_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace\n",
- "rawQuery": "SELECT cluster_name, namespace, count(misconfig_severity) AS Critical_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace",
+ "query": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'MEDIUM'",
+ "rawQuery": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162478) AND EventTime <= toDateTime(1713248878) AND vul_severity = 'MEDIUM'",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Critical Misconfiguration Count by Namespace and ClusterName",
- "transparent": true,
- "type": "bargauge"
+ "title": "Medium Vulnerability Images",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "{{ .Values.datasources.uid }}"
},
- "description": "This panel displays the total count of Misconfiguration severity for each level",
"fieldConfig": {
"defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "filterable": true,
+ "inspect": false
+ },
"mappings": [],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
{
- "color": "green",
+ "color": "super-light-orange",
"value": null
- },
- {
- "color": "orange",
- "value": 70
- },
- {
- "color": "red",
- "value": 85
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "vul_severity"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "basic",
+ "type": "color-background"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
- "h": 6,
+ "h": 8,
"w": 12,
"x": 0,
- "y": 20
+ "y": 25
},
- "id": 8,
+ "id": 41,
"options": {
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": false
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showThresholdLabels": false,
- "showThresholdMarkers": true
+ "showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -501,121 +731,88 @@ data:
"uid": "{{ .Values.datasources.uid }}"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) AS High_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'HIGH'",
- "rawQuery": "SELECT count(*) AS High_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'HIGH'",
+ "query": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'HIGH'",
+ "rawQuery": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162400) AND EventTime <= toDateTime(1713248800) AND vul_severity = 'HIGH'",
"refId": "A",
"round": "0s",
"skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Medium_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'MEDIUM'",
- "rawQuery": "SELECT count(*) AS Medium_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'MEDIUM'",
- "refId": "B",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Low_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'LOW'",
- "rawQuery": "SELECT count(*) AS Low_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'LOW'",
- "refId": "C",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Critical_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'CRITICAL'",
- "rawQuery": "SELECT count(*) AS Critical_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'CRITICAL'",
- "refId": "D",
- "round": "0s",
- "skip_comments": true
}
],
- "title": "Count of Misconfiguration Severity Level",
- "transparent": true,
- "type": "gauge"
+ "title": "High Vulnerability Images",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "{{ .Values.datasources.uid }}"
},
- "description": "This panel displays the total count of Vulnerability severity for each level",
"fieldConfig": {
"defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "filterable": true,
+ "inspect": false
+ },
"mappings": [],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "orange",
- "value": 70
- },
{
"color": "red",
- "value": 85
+ "value": null
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "vul_severity"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "basic",
+ "type": "color-background"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
- "h": 6,
+ "h": 8,
"w": 12,
"x": 12,
- "y": 20
+ "y": 25
},
- "id": 10,
+ "id": 42,
"options": {
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showThresholdLabels": false,
- "showThresholdMarkers": true
+ "showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -623,82 +820,38 @@ data:
"uid": "{{ .Values.datasources.uid }}"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) AS High_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'HIGH'",
- "rawQuery": "SELECT count(*) AS High_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'HIGH'",
+ "query": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'CRITICAL'",
+ "rawQuery": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162429) AND EventTime <= toDateTime(1713248829) AND vul_severity = 'CRITICAL'",
"refId": "A",
"round": "0s",
"skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Medium_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'MEDIUM'",
- "rawQuery": "SELECT count(*) AS Medium_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'MEDIUM'",
- "refId": "B",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Low_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'LOW'",
- "rawQuery": "SELECT count(*) AS Low_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'LOW'",
- "refId": "C",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "{{ .Values.datasources.uid }}"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Critical_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'CRITICAL'",
- "rawQuery": "SELECT count(*) AS Critical_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'CRITICAL'",
- "refId": "D",
- "round": "0s",
- "skip_comments": true
}
],
- "title": "Count of Vulnereability Severity level",
- "transparent": true,
- "type": "gauge"
+ "title": "Critical Vulnerability Images",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "{{ .Values.datasources.uid }}"
},
- "description": "This panel displays the count of Misconfigurations in different clusters and namespaces.",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "continuous-GrYlRd"
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
},
"mappings": [],
"thresholds": {
@@ -721,20 +874,22 @@ data:
"h": 8,
"w": 12,
"x": 0,
- "y": 26
+ "y": 33
},
- "id": 4,
+ "id": 39,
"options": {
- "displayMode": "lcd",
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showUnfilled": true
+ "showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -742,31 +897,35 @@ data:
"uid": "{{ .Values.datasources.uid }}"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, namespace, count(*) AS Misconfigurations\nFROM default.trivy_misconfig\nGROUP BY cluster_name, namespace",
- "rawQuery": "SELECT cluster_name, namespace, count(*) AS Misconfigurations\nFROM default.trivy_misconfig\nGROUP BY cluster_name, namespace",
+ "query": "SELECT image_name, package_url, count(*) AS duplicates\nFROM default.trivysbom\nWHERE $timeFilterByColumn(event_time)\nGROUP BY image_name,package_url\nHAVING count(*) > 1\n",
+ "rawQuery": "SELECT image_name, package_url, count(*) AS duplicates\nFROM default.trivysbom\nWHERE event_time >= toDateTime(1713162318) AND event_time <= toDateTime(1713248718)\nGROUP BY image_name,package_url\nHAVING count(*) > 1",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Misconfiguration Count by Cluster and Namespace",
- "transparent": true,
- "type": "bargauge"
+ "title": "duplicate package for sbom images",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "{{ .Values.datasources.uid }}"
},
- "description": "This panel displays the count of vulnerabilities in different clusters and namespaces.",
"fieldConfig": {
"defaults": {
- "color": {
- "mode": "continuous-GrYlRd"
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
},
"mappings": [],
"thresholds": {
@@ -783,26 +942,44 @@ data:
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "images"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "lcd",
+ "type": "gauge"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 26
+ "y": 33
},
- "id": 6,
+ "id": 40,
"options": {
- "displayMode": "lcd",
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showUnfilled": true
+ "showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -810,25 +987,25 @@ data:
"uid": "{{ .Values.datasources.uid }}"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, namespace, count(*) AS Vulnerabilities\nFROM default.trivy_vul\nGROUP BY cluster_name, namespace",
- "rawQuery": "SELECT cluster_name, namespace, count(*) AS Vulnerabilities\nFROM default.trivy_vul\nGROUP BY cluster_name, namespace",
+ "query": "SELECT vul_id, count(artifact_name) AS images\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY vul_id",
+ "rawQuery": "SELECT vul_id, count(artifact_name) AS images\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162358) AND EventTime <= toDateTime(1713248758)\nGROUP BY vul_id",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Vulnerability Count by Cluster and Namespace",
- "transparent": true,
- "type": "bargauge"
+ "title": "Count of images across Vulnerability Id",
+ "type": "table"
},
{
"datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
},
"fieldConfig": {
"defaults": {
@@ -837,20 +1014,20 @@ data:
},
"custom": {
"align": "center",
- "displayMode": "color-text",
- "filterable": true
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
},
"mappings": [],
+ "noValue": "Trivy Image not available",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
}
@@ -861,9 +1038,9 @@ data:
"h": 8,
"w": 24,
"x": 0,
- "y": 34
+ "y": 41
},
- "id": 2,
+ "id": 34,
"options": {
"cellHeight": "sm",
"footer": {
@@ -876,55 +1053,32 @@ data:
},
"showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
- "builderOptions": {
- "database": "default",
- "fields": [
- "cluster_name",
- "namespace",
- "kind",
- "name",
- "vul_id",
- "vul_vendor_ids",
- "vul_pkg_id",
- "vul_pkg_name",
- "vul_pkg_path",
- "vul_installed_version",
- "vul_fixed_version",
- "vul_title",
- "vul_severity",
- "vul_published_date",
- "vul_last_modified_date"
- ],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [],
- "table": "trivy_vul"
- },
"datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
},
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT cluster_name, namespace, kind, name, vul_id, vul_vendor_ids, vul_pkg_id, vul_pkg_name, vul_pkg_path, vul_installed_version, vul_fixed_version, vul_title, vul_severity, vul_published_date, vul_last_modified_date FROM default.\"trivy_vul\" LIMIT 100",
- "refId": "A",
- "selectedFormat": 1
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT \"cluster_name\", \"EventTime\", \"artifact_name\", \"vul_id\", \"vul_pkg_id\", \"vul_pkg_name\", \"vul_installed_version\", \"vul_fixed_version\", \"vul_title\", \"vul_severity\", \"vul_published_date\", \"vul_last_modified_date\" \nFROM \"default\".\"trivyimage\"\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT \"cluster_name\", \"EventTime\", \"artifact_name\", \"vul_id\", \"vul_pkg_id\", \"vul_pkg_name\", \"vul_installed_version\", \"vul_fixed_version\", \"vul_title\", \"vul_severity\", \"vul_published_date\", \"vul_last_modified_date\" \nFROM \"default\".\"trivyimage\"\nWHERE EventTime >= toDateTime(1693581675) AND EventTime <= toDateTime(1694186475)\nORDER BY EventTime DESC",
+ "round": "0s",
+ "skip_comments": true
}
],
- "title": "Trivy Vulnerabilities",
- "transparent": true,
+ "title": "Trivy Image",
"type": "table"
},
{
"datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
},
- "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -932,20 +1086,20 @@ data:
},
"custom": {
"align": "center",
- "displayMode": "color-text",
- "filterable": true
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
},
"mappings": [],
+ "noValue": "Trivy SBOM not available",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
}
@@ -953,12 +1107,12 @@ data:
"overrides": []
},
"gridPos": {
- "h": 10,
+ "h": 8,
"w": 24,
"x": 0,
- "y": 42
+ "y": 49
},
- "id": 1,
+ "id": 35,
"options": {
"cellHeight": "sm",
"footer": {
@@ -971,196 +1125,1300 @@ data:
},
"showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
- "builderOptions": {
- "database": "default",
- "fields": [
- "cluster_name",
- "namespace",
- "kind",
- "name",
- "misconfig_id",
- "misconfig_avdid",
- "misconfig_type",
- "misconfig_title",
- "misconfig_desc",
- "misconfig_msg",
- "misconfig_query",
- "misconfig_resolution",
- "misconfig_severity",
- "misconfig_status"
- ],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [],
- "table": "trivy_misconfig"
- },
"datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
},
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT cluster_name, namespace, kind, name, misconfig_id, misconfig_avdid, misconfig_type, misconfig_title, misconfig_desc, misconfig_msg, misconfig_query, misconfig_resolution, misconfig_severity, misconfig_status FROM default.\"trivy_misconfig\" LIMIT 100",
+ "dateTimeType": "DATETIME",
+ "editorMode": "builder",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.trivysbom\nWHERE $timeFilterByColumn(event_time)",
+ "rawQuery": "SELECT * FROM default.trivysbom\nWHERE event_time >= toDateTime(1713162248) AND event_time <= toDateTime(1713248648)",
"refId": "A",
- "selectedFormat": 1
+ "round": "0s",
+ "skip_comments": true
}
],
- "title": "Trivy Misconfiguration",
- "transparent": true,
+ "title": "Trivy_SBOM",
"type": "table"
},
{
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 57
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
+ "id": 38,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
},
- "custom": {
- "align": "center",
- "displayMode": "color-text",
- "filterable": true
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 97
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
+ "id": 36,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let severity = [];\nlet counts = [];\n\ndata.series.map((s) => {\n severity = s.fields.find((f) => f.name === 'vul_severity').values;\n counts = s.fields.find((f) => f.name === 'total_count').values;\n});\n\n// Create an empty array to store pie chart data\nconst pieChartData = [];\n\n// Define colors for pie slices\nconst pieSliceColors = ['#235894', '#FF0000', '#00FF00', '#FFFF00', '#FFA500'];\n\n// Map severity and counts to pie chart data\nseverity.forEach((sev, index) => {\n pieChartData.push({\n value: counts[index],\n name: sev,\n itemStyle: {\n opacity: 0.7,\n color: pieSliceColors[index % pieSliceColors.length],\n borderWidth: 3,\n borderColor: '#FFFFFF', // Set the border color to white\n },\n });\n});\n\nreturn {\n backgroundColor: '#FFFFFF', // Set the background color to white\n tooltip: {},\n series: [\n {\n name: 'pie',\n type: 'pie',\n selectedMode: 'single',\n selectedOffset: 30,\n clockwise: true,\n label: {\n fontSize: 18,\n color: '#235894',\n },\n labelLine: {\n lineStyle: {\n color: '#235894',\n },\n },\n data: pieChartData, // Use the modified pie chart data\n itemStyle: {\n opacity: 0.7,\n borderWidth: 3,\n borderColor: '#FFFFFF', // Set the border color to white\n },\n },\n ],\n};",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
},
- {
- "color": "red",
- "value": 80
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT vul_severity, count(*) AS total_count\nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date)\nGROUP BY vul_severity",
+ "rawQuery": "SELECT vul_severity, count(*) AS total_count\nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694438766) AND vul_last_modified_date <= toDateTime(1694611566)\nGROUP BY vul_severity",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Vulnerability Severity Distribution",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 97
+ },
+ "id": 37,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let severity = [];\nlet counts = [];\n\ndata.series.map((s) => {\n severity = s.fields.find((f) => f.name === 'misconfig_severity').values;\n counts = s.fields.find((f) => f.name === 'total_count').values;\n});\n\n// Create an empty array to store pie chart data\nconst pieChartData = [];\n\n// Define colors for pie slices\nconst pieSliceColors = ['#235894', '#FF0000', '#00FF00', '#FFFF00', '#FFA500'];\n\n// Map severity and counts to pie chart data\nseverity.forEach((sev, index) => {\n pieChartData.push({\n value: counts[index],\n name: sev,\n itemStyle: {\n opacity: 0.7,\n color: pieSliceColors[index % pieSliceColors.length],\n borderWidth: 3,\n borderColor: '#FFFFFF', // Set the border color to white\n },\n });\n});\n\nreturn {\n backgroundColor: '#FFFFFF', // Set the background color to white\n tooltip: {},\n series: [\n {\n name: 'pie',\n type: 'pie',\n selectedMode: 'single',\n selectedOffset: 30,\n clockwise: true,\n label: {\n fontSize: 18,\n color: '#235894',\n },\n labelLine: {\n lineStyle: {\n color: '#235894',\n },\n },\n data: pieChartData, // Use the modified pie chart data\n itemStyle: {\n opacity: 0.7,\n borderWidth: 3,\n borderColor: '#FFFFFF', // Set the border color to white\n },\n },\n ],\n};",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT misconfig_severity, count(*) AS total_count\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY misconfig_severity",
+ "rawQuery": "SELECT misconfig_severity, count(*) AS total_count\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694438912) AND EventTime <= toDateTime(1694611712)\nGROUP BY misconfig_severity",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Misconfiguration Severity Distribution",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel illustrates the distribution of vulnerability severities across different clusters. It provides an overview of the count of vulnerabilities categorized by severity levels within each cluster.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
}
- ]
- }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 105
+ },
+ "id": 20,
+ "options": {
+ "displayMode": "gradient",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, vul_severity, count(*) \nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date)\nGROUP BY cluster_name, vul_severity",
+ "rawQuery": "SELECT cluster_name, vul_severity, count(*) \nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694155614) AND vul_last_modified_date <= toDateTime(1694242014)\nGROUP BY cluster_name, vul_severity",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Vulnerability Severity counts grouped by Cluster",
+ "type": "bargauge"
},
- "overrides": []
- },
- "gridPos": {
- "h": 10,
- "w": 24,
- "x": 0,
- "y": 52
- },
- "id": 24,
- "options": {
- "footer": {
- "fields": "",
- "reducer": [
- "sum"
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel illustrates the distribution of misconfiguration severities across different clusters. It provides an overview of the count of misconfigurations categorized by severity levels within each cluster. ",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 105
+ },
+ "id": 22,
+ "options": {
+ "displayMode": "gradient",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, misconfig_severity, count(*)\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY cluster_name, misconfig_severity",
+ "rawQuery": "SELECT cluster_name, misconfig_severity, count(*)\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156239) AND EventTime <= toDateTime(1694242639)\nGROUP BY cluster_name, misconfig_severity",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
],
- "show": false
+ "title": "Misconfiguration Severity counts grouped by Cluster",
+ "type": "bargauge"
},
- "showHeader": true
- },
- "pluginVersion": "8.4.6",
- "targets": [
{
- "builderOptions": {
- "0": "T",
- "1": "h",
- "2": "e",
- "3": " ",
- "4": "q",
- "5": "u",
- "6": "e",
- "7": "r",
- "8": "y",
- "9": " ",
- "10": "i",
- "11": "s",
- "12": " ",
- "13": "n",
- "14": "o",
- "15": "t",
- "16": " ",
- "17": "a",
- "18": " ",
- "19": "s",
- "20": "e",
- "21": "l",
- "22": "e",
- "23": "c",
- "24": "t",
- "25": " ",
- "26": "s",
- "27": "t",
- "28": "a",
- "29": "t",
- "30": "e",
- "31": "m",
- "32": "e",
- "33": "n",
- "34": "t",
- "35": ".",
- "database": "default",
- "fields": [
- "cluster_name",
- "artifact_name",
- "vul_id",
- "vul_pkg_id",
- "vul_pkg_name",
- "vul_installed_version",
- "vul_fixed_version",
- "vul_title",
- "vul_severity",
- "vul_published_date",
- "vul_last_modified_date"
- ],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [],
- "table": "trivyimage"
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel displays the total number Vulnerabilities under each namespace",
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
+ },
+ {
+ "color": "red",
+ "value": 85
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 113
},
+ "id": 18,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, count(*) FROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date)\nGROUP BY cluster_name",
+ "rawQuery": "SELECT cluster_name, count(*) FROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694155665) AND vul_last_modified_date <= toDateTime(1694242065)\nGROUP BY cluster_name",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Vulnerability Count by Cluster",
+ "type": "gauge"
+ },
+ {
"datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel displays the total of misconfigurations from each clusters.",
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
+ },
+ {
+ "color": "red",
+ "value": 85
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 113
+ },
+ "id": 16,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, count(*) FROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY cluster_name",
+ "rawQuery": "SELECT cluster_name, count(*) FROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156266) AND EventTime <= toDateTime(1694242666)\nGROUP BY cluster_name",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
}
+ ],
+ "title": "Misconfiguration Count by Cluster",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
},
- "queryType": "builder",
- "rawSql": "SELECT cluster_name, artifact_name, vul_id, vul_pkg_id, vul_pkg_name, vul_installed_version, vul_fixed_version, vul_title, vul_severity, vul_published_date, vul_last_modified_date FROM default.\"trivyimage\" LIMIT 100",
- "refId": "A",
- "selectedFormat": 1
+ "description": "This panel provides a count of high vulnerabilities categorized by namespace. It helps to monitor and prioritize high security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 118
+ },
+ "id": 29,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(vul_severity) AS High_Severity\nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date) AND vul_severity = 'HIGH'\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(vul_severity) AS High_Severity\nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694155706) AND vul_last_modified_date <= toDateTime(1694242106) AND vul_severity = 'HIGH'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "High Vulnerability Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel provides a count of high misconfigurations categorized by namespace. It helps to monitor and prioritize high security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 118
+ },
+ "id": 30,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(misconfig_severity) AS High_Severity\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime) AND misconfig_severity = 'HIGH'\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(misconfig_severity) AS High_Severity\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156303) AND EventTime <= toDateTime(1694242703) AND misconfig_severity = 'HIGH'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "High Misconfiguration Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel provides a count of Low vulnerabilities categorized by namespace. It helps to monitor and prioritize Low security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 125
+ },
+ "id": 27,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(vul_severity) AS Low_Severity\nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date) AND vul_severity = 'LOW'\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(vul_severity) AS Low_Severity\nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694155745) AND vul_last_modified_date <= toDateTime(1694242145) AND vul_severity = 'LOW'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Low Vulnerability Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel provides a count of low misconfigurations categorized by namespace. It helps to monitor and prioritize low security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 125
+ },
+ "id": 28,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(misconfig_severity) AS Low_Severity\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime) AND misconfig_severity = 'LOW'\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(misconfig_severity) AS Low_Severity\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156331) AND EventTime <= toDateTime(1694242731) AND misconfig_severity = 'LOW'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Low Misconfiguration Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel provides a count of Medium vulnerabilities categorized by namespace. It helps to monitor and prioritize Medium security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 132
+ },
+ "id": 25,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(vul_severity) AS Medium_Severity\nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date) AND vul_severity = 'MEDIUM'\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(vul_severity) AS Medium_Severity\nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694155776) AND vul_last_modified_date <= toDateTime(1694242176) AND vul_severity = 'MEDIUM'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Medium Vulnerability Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel provides a count of medium misconfigurations categorized by namespace. It helps to monitor and prioritize medium security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 132
+ },
+ "id": 26,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(misconfig_severity) AS Medium_Severity\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime) AND misconfig_severity = 'MEDIUM'\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(misconfig_severity) AS Medium_Severity\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156357) AND EventTime <= toDateTime(1694242757) AND misconfig_severity = 'MEDIUM'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Medium Misconfiguration Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel provides a count of critical vulnerabilities categorized by namespace. It helps to monitor and prioritize critical security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 139
+ },
+ "id": 12,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(vul_severity) AS Critical_Severity\nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date) AND vul_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace\n",
+ "rawQuery": "SELECT cluster_name, namespace, count(vul_severity) AS Critical_Severity\nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694156016) AND vul_last_modified_date <= toDateTime(1694242416) AND vul_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Critical Vulnerability Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel provides a count of critical misconfigurations categorized by namespace. It helps to monitor and prioritize critical security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 139
+ },
+ "id": 14,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "interval": "",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(misconfig_severity) AS Critical_Severity\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime) AND misconfig_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace\n",
+ "rawQuery": "SELECT cluster_name, namespace, count(misconfig_severity) AS Critical_Severity\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156383) AND EventTime <= toDateTime(1694242783) AND misconfig_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Critical Misconfiguration Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel displays the count of vulnerabilities in different clusters and namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 146
+ },
+ "id": 6,
+ "options": {
+ "displayMode": "lcd",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(*) AS Vulnerabilities\nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date)\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(*) AS Vulnerabilities\nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694156175) AND vul_last_modified_date <= toDateTime(1694242575)\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Vulnerability Count by Cluster and Namespace",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "description": "This panel displays the count of Misconfigurations in different clusters and namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 146
+ },
+ "id": 4,
+ "options": {
+ "displayMode": "lcd",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(*) AS Misconfigurations\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(*) AS Misconfigurations\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156545) AND EventTime <= toDateTime(1694242945)\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Misconfiguration Count by Cluster and Namespace",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 154
+ },
+ "id": 32,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT \"cluster_name\", \"namespace\", \"kind\", \"name\", \"vul_id\", \"vul_vendor_ids\", \"vul_pkg_id\", \"vul_pkg_name\", \"vul_pkg_path\", \"vul_installed_version\", \"vul_fixed_version\", \"vul_title\", \"vul_severity\", \"vul_published_date\", \"vul_last_modified_date\" \nFROM \"default\".\"trivy_vul\"\nWHERE $timeFilterByColumn(vul_last_modified_date)\nORDER BY vul_last_modified_date DESC",
+ "rawQuery": "SELECT \"cluster_name\", \"namespace\", \"kind\", \"name\", \"vul_id\", \"vul_vendor_ids\", \"vul_pkg_id\", \"vul_pkg_name\", \"vul_pkg_path\", \"vul_installed_version\", \"vul_fixed_version\", \"vul_title\", \"vul_severity\", \"vul_published_date\", \"vul_last_modified_date\" \nFROM \"default\".\"trivy_vul\"\nWHERE vul_last_modified_date >= toDateTime(1694099993) AND vul_last_modified_date <= toDateTime(1694186393)\nORDER BY vul_last_modified_date DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Trivy Vulnerabilities",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 162
+ },
+ "id": 33,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "{{ .Values.datasources.uid }}"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT \"cluster_name\", \"EventTime\", \"namespace\", \"kind\", \"name\", \"misconfig_id\", \"misconfig_avdid\", \"misconfig_type\", \"misconfig_title\", \"misconfig_desc\", \"misconfig_msg\", \"misconfig_query\", \"misconfig_resolution\", \"misconfig_severity\", \"misconfig_status\" \nFROM \"default\".\"trivy_misconfig\"\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT \"cluster_name\", \"EventTime\", \"namespace\", \"kind\", \"name\", \"misconfig_id\", \"misconfig_avdid\", \"misconfig_type\", \"misconfig_title\", \"misconfig_desc\", \"misconfig_msg\", \"misconfig_query\", \"misconfig_resolution\", \"misconfig_severity\", \"misconfig_status\" \nFROM \"default\".\"trivy_misconfig\"\nWHERE EventTime >= toDateTime(1694966455) AND EventTime <= toDateTime(1695052855)\nORDER BY EventTime DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Trivy Misconfiguration",
+ "type": "table"
}
],
- "title": "Trivy Image",
- "transparent": true,
- "type": "table"
+ "title": "Trivy Vulnerability and Misconfiguration",
+ "type": "row"
}
],
"refresh": "",
- "schemaVersion": 35,
+ "schemaVersion": 38,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
- "from": "now-6h",
+ "from": "now-24h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Trivy",
- "uid": "f9b0a865-f419-410a-b7d9-9a3f79a70d47",
- "version": 15,
+ "uid": "f9b0a865-f419-410a-b7d9-9a3f79a70d48",
+ "version": 2,
"weekStart": ""
}
+
{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/configmap-vertamedia-datasource.yaml b/charts/client/templates/configmap-vertamedia-datasource.yaml
index b2ea28fe..e50b83e3 100644
--- a/charts/client/templates/configmap-vertamedia-datasource.yaml
+++ b/charts/client/templates/configmap-vertamedia-datasource.yaml
@@ -11,6 +11,27 @@ data:
datasources:
- name: vertamedia-clickhouse-datasource
type: vertamedia-clickhouse-datasource
- url: http://kubviz-client-clickhouse:8123
+ {{- if .Values.clickhouse.enabled }}
+ url: {{ include "client.fullname" . }}-clickhouse:8123
access: proxy
+ basicAuth: true
+ basicAuthUser: {{ .Values.clickhouse.user }}
+ secureJsonData:
+ basicAuthPassword: {{ .Values.clickhouse.password }}
+ {{- else }}
+ url: {{ .Values.existingClickhouse.host }}:8123
+ access: proxy
+ basicAuth: true
+ {{- if not .Values.existingClickhouse.secret }}
+ basicAuthUser: {{ .Values.existingClickhouse.username }}
+ {{- else }}
+ basicAuthUser: $CLICKHOUSE_USERNAME
+ {{- end }}
+ secureJsonData:
+ {{- if not .Values.existingClickhouse.secret }}
+ basicAuthPassword: {{ .Values.existingClickhouse.password }}
+ {{- else }}
+ basicAuthPassword: $CLICKHOUSE_PASSWORD
+ {{- end }}
+ {{- end }}
{{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/deployment.yaml b/charts/client/templates/deployment.yaml
index 7dd1c34b..2706a7fa 100644
--- a/charts/client/templates/deployment.yaml
+++ b/charts/client/templates/deployment.yaml
@@ -27,6 +27,53 @@ spec:
serviceAccountName: {{ include "client.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
+ initContainers:
+ - name: migration-init
+ image: "{{ .Values.migration.image.repository }}:{{ .Values.migration.image.tag }}"
+ imagePullPolicy: {{ .Values.migration.image.pullPolicy }}
+ command:
+ - /bin/sh
+ - -c
+ args:
+ - "/script/wait-for-clickhouse.sh && /migration sql -e --yes"
+ env:
+ - name: SCHEMA_PATH
+ value : {{ .Values.migration.schema.path }}
+ {{- if .Values.clickhouse.enabled }}
+ - name: DB_ADDRESS
+ value: {{ include "client.fullname" . }}-clickhouse
+ - name: CLICKHOUSE_USERNAME
+ value: {{ .Values.clickhouse.user }}
+ - name: CLICKHOUSE_PASSWORD
+ value: {{ .Values.clickhouse.password }}
+ {{- else }}
+ - name: DB_ADDRESS
+ value: {{ .Values.existingClickhouse.host }}
+ - name: CLICKHOUSE_USERNAME
+ {{- if not .Values.existingClickhouse.secret }}
+ value: {{ .Values.existingClickhouse.username }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.existingClickhouse.secret.name }}
+ key: {{ .Values.existingClickhouse.secret.usernamekey }}
+ {{- end }}
+ - name: CLICKHOUSE_PASSWORD
+ {{- if not .Values.existingClickhouse.secret }}
+ value: {{ .Values.existingClickhouse.password }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.existingClickhouse.secret.name }}
+ key: {{ .Values.existingClickhouse.secret.passwordkey }}
+ {{- end }}
+ {{- end }}
+ - name: DB_PORT
+ value: "9000"
+ - name: TTL_INTERVAL
+ value: "{{ .Values.ttl.ttlInterval }}"
+ - name: TTL_UNIT
+ value: {{ .Values.ttl.ttlUnit }}
containers:
- name: {{ .Chart.Name }}
securityContext:
@@ -47,13 +94,79 @@ spec:
# port: http
env:
- name: NATS_TOKEN
+ {{- if and .Values.nats.enabled .Values.nats.auth.enabled .Values.nats.auth.token }}
value: {{ .Values.nats.auth.token }}
+ {{- else if and .Values.nats.enabled .Values.nats.auth.enabled .Values.nats.auth.secret }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.nats.auth.secret.name }}
+ key: {{ .Values.nats.auth.secret.key }}
+ {{- end }}
- name: NATS_ADDRESS
value: {{ include "client.fullname" . }}-nats
+ {{- if .Values.clickhouse.enabled }}
- name: DB_ADDRESS
value: {{ include "client.fullname" . }}-clickhouse
+ - name: CLICKHOUSE_USERNAME
+ value: {{ .Values.clickhouse.user }}
+ - name: CLICKHOUSE_PASSWORD
+ value: {{ .Values.clickhouse.password }}
+ {{- else }}
+ - name: DB_ADDRESS
+ value: {{ .Values.existingClickhouse.host }}
+ - name: CLICKHOUSE_USERNAME
+ {{- if not .Values.existingClickhouse.secret }}
+ value: {{ .Values.existingClickhouse.username }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.existingClickhouse.secret.name }}
+ key: {{ .Values.existingClickhouse.secret.usernamekey }}
+ {{- end }}
+ - name: CLICKHOUSE_PASSWORD
+ {{- if not .Values.existingClickhouse.secret }}
+ value: {{ .Values.existingClickhouse.password }}
+ {{- else }}
+ valueFrom:
+ secretKeyRef:
+ name: {{ .Values.existingClickhouse.secret.name }}
+ key: {{ .Values.existingClickhouse.secret.passwordkey }}
+ {{- end }}
+ {{- end }}
- name: DB_PORT
value: "9000"
+ - name: TTL_INTERVAL
+ value: "{{ .Values.ttl.ttlInterval }}"
+ - name: TTL_UNIT
+ value: {{ .Values.ttl.ttlUnit }}
+ - name: IS_OPTEL_ENABLED
+ value: "{{ .Values.opentelemetry.isEnabled }}"
+ - name : OPTEL_URL
+ value: {{ .Values.opentelemetry.url }}
+ - name : APPLICATION_NAME
+ value : {{ .Values.opentelemetry.appName }}
+ - name : KETALL_EVENTS_CONSUMER
+ value : {{ .Values.consumer.ketallconsumer }}
+ - name : RAKEES_METRICS_CONSUMER
+ value : {{ .Values.consumer.rakeesconsumer }}
+ - name : OUTDATED_EVENTS_CONSUMER
+ value : {{ .Values.consumer.outdatedconsumer }}
+ - name : DEPRECATED_API_CONSUMER
+ value : {{ .Values.consumer.deprecatedconsumer }}
+ - name : DELETED_API_CONSUMER
+ value : {{ .Values.consumer.deletedconsumer }}
+ - name : KUBVIZ_EVENTS_CONSUMER
+ value : {{ .Values.consumer.kubvizconsumer }}
+ - name : KUBSCORE_CONSUMER
+ value : {{ .Values.consumer.kubscoreconsumer }}
+ - name : TRIVY_CONSUMER
+ value : {{ .Values.consumer.trivyconsumer }}
+ - name : TRIVY_IMAGE_CONSUMER
+ value : {{ .Values.consumer.trivyimageconsumer }}
+ - name : TRIVY_SBOM_CONSUMER
+ value : {{ .Values.consumer.trivysbomconsumer }}
+ - name : KUBERHEALTHY_CONSUMER
+ value : {{ .Values.consumer.kuberhealthyconsumer }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
@@ -67,4 +180,4 @@ spec:
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
- {{- end }}
+ {{- end }}
\ No newline at end of file
diff --git a/charts/client/templates/external_secret.yaml b/charts/client/templates/external_secret.yaml
new file mode 100644
index 00000000..a856e725
--- /dev/null
+++ b/charts/client/templates/external_secret.yaml
@@ -0,0 +1,18 @@
+{{- if .Values.externalSecrets.create }}
+apiVersion: external-secrets.io/v1beta1
+kind: ExternalSecret
+metadata:
+ name: nats-external
+spec:
+ refreshInterval: "10s"
+ secretStoreRef:
+ name: vault-store
+ kind: ClusterSecretStore
+ target:
+ name: nats-secret
+ data:
+ - secretKey: nats-token
+ remoteRef:
+ key: secret/generic/nats/auth-token
+ property: nats
+{{- end }}
\ No newline at end of file
diff --git a/charts/client/values.yaml b/charts/client/values.yaml
index d0231221..4fe08c21 100644
--- a/charts/client/values.yaml
+++ b/charts/client/values.yaml
@@ -1,4 +1,4 @@
-# Default values for client.
+# Default values for client.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
@@ -8,7 +8,7 @@ image:
repository: ghcr.io/intelops/kubviz/client
pullPolicy: Always
# Overrides the image tag whose default is the chart appVersion.
- tag: "v1.0.0"
+ tag: "v1.1.7"
imagePullSecrets: []
nameOverride: ""
@@ -53,17 +53,19 @@ ingress:
# hosts:
# - chart-example.local
-resources: {}
+resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- # limits:
- # cpu: 100m
- # memory: 128Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
+ limits:
+ cpu: 200m
+ memory: 256Mi
+ ephemeral-storage: 50Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ ephemeral-storage: 50Mi
autoscaling:
enabled: false
@@ -78,23 +80,73 @@ tolerations: []
affinity: {}
+externalSecrets:
+ create: false
+
nats:
enabled: true
#Authentication setup
auth:
enabled: true
- token: "UfmrJOYwYCCsgQvxvcfJ3BdI6c8WBbnD"
+ # Use token if you want to provide the token via Helm Values
+ token: ""
+ # Use a secret reference if you want to get a token from a secret
+ # secret:
+ # name: ""
+ # key: ""
nats:
jetstream:
enabled: true
clickhouse:
enabled: true
- clickhouse:
- replicas: "1"
+ user: admin
+ password: admin
+ replicasCount: 1
+
+existingClickhouse:
+ host: clickhouse
+ # Use username and password if you want to provide the token via Helm Values
+ username: ""
+ password: ""
+ # Use a secret reference if you want to get a username and password from a secret
+ secret: {}
+ # name: ""
+ # usernamekey: ""
+ # passwordkey: ""
grafana:
enabled: false
+ plugins:
+ - vertamedia-clickhouse-datasource
+ - grafana-clickhouse-datasource
+ - volkovlabs-echarts-panel
+ sidecar:
+ dashboards:
+ provider:
+ allowUiUpdates: true
+ postgresql:
+ enabled: false
+ database:
+ type: postgres
+ host: kubviz-client-postgresql:5432
+ name: postgres
+ ssl_mode: disable
+ user: postgres
+ password: $__file{/etc/secrets/postgresql/postgres-password}
+ secretMount:
+ name: postgresql-mount
+ mountPath: /etc/secrets/postgresql
+ secretName: kubviz-client-postgresql
+ readOnly: true
+ clickhouse:
+ enabled: false
+ username: ""
+ password: ""
+ existingSecret: {}
+ # name: ""
+ # usernamekey: ""
+ # passwordkey: ""
dashboards:
enabled: true
@@ -106,3 +158,34 @@ datasources:
label: grafana_datasource
labelValue: "1"
uid: vertamedia-clickhouse-datasource
+
+migration:
+ enabled: true
+ image:
+ repository: ghcr.io/intelops/kubviz/migration
+ pullPolicy: Always
+ tag: "v1.1.7"
+ schema:
+ path: "/sql"
+
+ttl:
+ ttlInterval: "1"
+ ttlUnit: MONTH
+
+opentelemetry:
+ isEnabled: false
+ url: "otelcollector.local"
+ appName: "kubviz"
+
+consumer:
+ ketallconsumer: "KETALL_EVENTS_CONSUMER"
+ rakeesconsumer: "RAKEES_METRICS_CONSUMER"
+ outdatedconsumer: "OUTDATED_EVENTS_CONSUMER"
+ deprecatedconsumer: "DEPRECATED_API_CONSUMER"
+ deletedconsumer: "DELETED_API_CONSUMER"
+ kubvizconsumer: "KUBVIZ_EVENTS_CONSUMER"
+ kubscoreconsumer: "KUBSCORE_CONSUMER"
+ trivyconsumer: "TRIVY_CONSUMER"
+ trivyimageconsumer: "TRIVY_IMAGE_CONSUMER"
+ trivysbomconsumer: "TRIVY_SBOM_CONSUMER"
+ kuberhealthyconsumer: "KUBERHEALTHY_CONSUMER"
\ No newline at end of file
diff --git a/charts/nats/.helmignore b/charts/nats/.helmignore
deleted file mode 100644
index 50af0317..00000000
--- a/charts/nats/.helmignore
+++ /dev/null
@@ -1,22 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
-.vscode/
diff --git a/charts/nats/Chart.yaml b/charts/nats/Chart.yaml
deleted file mode 100644
index 2ceaa087..00000000
--- a/charts/nats/Chart.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-apiVersion: v2
-appVersion: 2.7.2
-description: A Helm chart for the NATS.io High Speed Cloud Native Distributed Communications
- Technology.
-home: http://github.com/nats-io/k8s
-icon: https://nats.io/img/nats-icon-color.png
-keywords:
-- nats
-- messaging
-- cncf
-maintainers:
-- email: wally@nats.io
- name: Waldemar Quevedo
- url: https://github.com/wallyqs
-- email: colin@nats.io
- name: Colin Sullivan
- url: https://github.com/ColinSullivan1
-- email: jaime@nats.io
- name: Jaime Piña
- url: https://github.com/variadico
-name: nats
-version: 0.13.4
diff --git a/charts/nats/README.md b/charts/nats/README.md
deleted file mode 100644
index 4b4bd917..00000000
--- a/charts/nats/README.md
+++ /dev/null
@@ -1,799 +0,0 @@
-# NATS Server
-
-[NATS](https://nats.io) is a simple, secure and performant communications system for digital systems, services and devices. NATS is part of the Cloud Native Computing Foundation ([CNCF](https://cncf.io)). NATS has over [30 client language implementations](https://nats.io/download/), and its server can run on-premise, in the cloud, at the edge, and even on a Raspberry Pi. NATS can secure and simplify design and operation of modern distributed systems.
-
-## TL;DR;
-
-```console
-helm repo add nats https://nats-io.github.io/k8s/helm/charts/
-helm install my-nats nats/nats
-```
-
-## Configuration
-
-### Server Image
-
-```yaml
-nats:
- image: nats:2.6.5-alpine
- pullPolicy: IfNotPresent
-```
-
-### Limits
-
-```yaml
-nats:
- # The number of connect attempts against discovered routes.
- connectRetries: 30
-
- # How many seconds should pass before sending a PING
- # to a client that has no activity.
- pingInterval:
-
- # Server settings.
- limits:
- maxConnections:
- maxSubscriptions:
- maxControlLine:
- maxPayload:
-
- writeDeadline:
- maxPending:
- maxPings:
- lameDuckDuration:
-
- # Number of seconds to wait for client connections to end after the pod termination is requested
- terminationGracePeriodSeconds: 60
-```
-
-### Logging
-
-*Note*: It is not recommended to enable trace or debug in production since enabling it will significantly degrade performance.
-
-```yaml
-nats:
- logging:
- debug:
- trace:
- logtime:
- connectErrorReports:
- reconnectErrorReports:
-```
-
-### TLS setup for client connections
-
-You can find more on how to setup and trouble shoot TLS connnections at:
-https://docs.nats.io/nats-server/configuration/securing_nats/tls
-
-```yaml
-nats:
- tls:
- secret:
- name: nats-client-tls
- ca: "ca.crt"
- cert: "tls.crt"
- key: "tls.key"
-```
-
-## Clustering
-
-If clustering is enabled, then a 3-node cluster will be setup. More info at:
-https://docs.nats.io/nats-server/configuration/clustering#nats-server-clustering
-
-```yaml
-cluster:
- enabled: true
- replicas: 3
-
- tls:
- secret:
- name: nats-server-tls
- ca: "ca.crt"
- cert: "tls.crt"
- key: "tls.key"
-```
-
-Example:
-
-```sh
-$ helm install nats nats/nats --set cluster.enabled=true
-```
-
-## Leafnodes
-
-Leafnode connections to extend a cluster. More info at:
-https://docs.nats.io/nats-server/configuration/leafnodes
-
-```yaml
-leafnodes:
- enabled: true
- remotes:
- - url: "tls://connect.ngs.global:7422"
- # credentials:
- # secret:
- # name: leafnode-creds
- # key: TA.creds
- # tls:
- # secret:
- # name: nats-leafnode-tls
- # ca: "ca.crt"
- # cert: "tls.crt"
- # key: "tls.key"
-
- #######################
- # #
- # TLS Configuration #
- # #
- #######################
- #
- # # You can find more on how to setup and trouble shoot TLS connnections at:
- #
- # # https://docs.nats.io/nats-server/configuration/securing_nats/tls
- #
- tls:
- secret:
- name: nats-client-tls
- ca: "ca.crt"
- cert: "tls.crt"
- key: "tls.key"
-```
-
-## Setting up External Access
-
-### Using HostPorts
-
-In case of both external access and advertisements being enabled, an
-initializer container will be used to gather the public ips. This
-container will required to have enough RBAC policy to be able to make a
-look up of the public ip of the node where it is running.
-
-For example, to setup external access for a cluster and advertise the public ip to clients:
-
-```yaml
-nats:
- # Toggle whether to enable external access.
- # This binds a host port for clients, gateways and leafnodes.
- externalAccess: true
-
- # Toggle to disable client advertisements (connect_urls),
- # in case of running behind a load balancer (which is not recommended)
- # it might be required to disable advertisements.
- advertise: true
-
- # In case both external access and advertise are enabled
- # then a service account would be required to be able to
- # gather the public ip from a node.
- serviceAccount: "nats-server"
-```
-
-Where the service account named `nats-server` has the following RBAC policy for example:
-
-```yaml
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: nats-server
- namespace: default
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: nats-server
-rules:
-- apiGroups: [""]
- resources:
- - nodes
- verbs: ["get"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: nats-server-binding
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: nats-server
-subjects:
-- kind: ServiceAccount
- name: nats-server
- namespace: default
-```
-
-The container image of the initializer can be customized via:
-
-```yaml
-bootconfig:
- image: natsio/nats-boot-config:latest
- pullPolicy: IfNotPresent
-```
-
-### Using LoadBalancers
-
-In case of using a load balancer for external access, it is recommended to disable no advertise
-so that internal ips from the NATS Servers are not advertised to the clients connecting through
-the load balancer.
-
-```yaml
-nats:
- image: nats:alpine
-
-cluster:
- enabled: true
- noAdvertise: true
-
-leafnodes:
- enabled: true
- noAdvertise: true
-
-natsbox:
- enabled: true
-```
-
-Then could use an L4 enabled load balancer to connect to NATS, for example:
-
-```yaml
-apiVersion: v1
-kind: Service
-metadata:
- name: nats-lb
-spec:
- type: LoadBalancer
- selector:
- app.kubernetes.io/name: nats
- ports:
- - protocol: TCP
- port: 4222
- targetPort: 4222
- name: nats
- - protocol: TCP
- port: 7422
- targetPort: 7422
- name: leafnodes
- - protocol: TCP
- port: 7522
- targetPort: 7522
- name: gateways
-```
-
-## Gateways
-
-A super cluster can be formed by pointing to remote gateways.
-You can find more about gateways in the NATS documentation:
-https://docs.nats.io/nats-server/configuration/gateways
-
-```yaml
-gateway:
- enabled: false
- name: 'default'
-
- #############################
- # #
- # List of remote gateways #
- # #
- #############################
- # gateways:
- # - name: other
- # url: nats://my-gateway-url:7522
-
- #######################
- # #
- # TLS Configuration #
- # #
- #######################
- #
- # # You can find more on how to setup and trouble shoot TLS connnections at:
- #
- # # https://docs.nats.io/nats-server/configuration/securing_nats/tls
- #
- # tls:
- # secret:
- # name: nats-client-tls
- # ca: "ca.crt"
- # cert: "tls.crt"
- # key: "tls.key"
-```
-
-## Auth setup
-
-### Auth with a Memory Resolver
-
-```yaml
-auth:
- enabled: true
-
- # Reference to the Operator JWT.
- operatorjwt:
- configMap:
- name: operator-jwt
- key: KO.jwt
-
- # Public key of the System Account
- systemAccount:
-
- resolver:
- ############################
- # #
- # Memory resolver settings #
- # #
- ##############################
- type: memory
-
- #
- # Use a configmap reference which will be mounted
- # into the container.
- #
- configMap:
- name: nats-accounts
- key: resolver.conf
-```
-
-### Auth using an Account Server Resolver
-
-```yaml
-auth:
- enabled: true
-
- # Reference to the Operator JWT.
- operatorjwt:
- configMap:
- name: operator-jwt
- key: KO.jwt
-
- # Public key of the System Account
- systemAccount:
-
- resolver:
- ##########################
- # #
- # URL resolver settings #
- # #
- ##########################
- type: URL
- url: "http://nats-account-server:9090/jwt/v1/accounts/"
-```
-
-## JetStream
-
-### Setting up Memory and File Storage
-
-```yaml
-nats:
- image: nats:alpine
-
- jetstream:
- enabled: true
-
- memStorage:
- enabled: true
- size: 2Gi
-
- fileStorage:
- enabled: true
- size: 1Gi
- storageDirectory: /data/
- storageClassName: default
-```
-
-### Using with an existing PersistentVolumeClaim
-
-For example, given the following `PersistentVolumeClaim`:
-
-```yaml
----
-kind: PersistentVolumeClaim
-apiVersion: v1
-metadata:
- name: nats-js-disk
- annotations:
- volume.beta.kubernetes.io/storage-class: "default"
-spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 3Gi
-```
-
-You can start JetStream so that one pod is bounded to it:
-
-```yaml
-nats:
- image: nats:alpine
-
- jetstream:
- enabled: true
-
- fileStorage:
- enabled: true
- storageDirectory: /data/
- existingClaim: nats-js-disk
- claimStorageSize: 3Gi
-```
-
-### Clustering example
-
-```yaml
-
-nats:
- image: nats:alpine
-
- jetstream:
- enabled: true
-
- memStorage:
- enabled: true
- size: "2Gi"
-
- fileStorage:
- enabled: true
- size: "1Gi"
- storageDirectory: /data/
- storageClassName: default
-
-cluster:
- enabled: true
- # Cluster name is required, by default will be release name.
- # name: "nats"
- replicas: 3
-```
-
-### Basic Authentication and JetStream
-
-```yaml
-nats:
- image: nats:alpine
-
- jetstream:
- enabled: true
-
- memStorage:
- enabled: true
- size: "2Gi"
-
- fileStorage:
- enabled: true
- size: "8Gi"
- storageDirectory: /data/
- storageClassName: gp2
-
-cluster:
- enabled: true
- # Can set a custom cluster name
- # name: "nats"
- replicas: 3
-
-auth:
- enabled: true
-
- systemAccount: sys
-
- basic:
- accounts:
- sys:
- users:
- - user: sys
- pass: sys
- js:
- jetstream: true
- users:
- - user: foo
-```
-
-### NATS Resolver setup example
-
-As of NATS v2.2, the server now has a built-in NATS resolver of accounts.
-The following is an example guide of how to get it configured.
-
-```sh
-# Create a working directory to keep the creds.
-mkdir nats-creds
-cd nats-creds
-
-# This just creates some accounts for you to get started.
-curl -fSl https://nats-io.github.io/k8s/setup/nsc-setup.sh | sh
-source .nsc.env
-
-# You should have some accounts now, at least the following.
-nsc list accounts
-+-------------------------------------------------------------------+
-| Accounts |
-+--------+----------------------------------------------------------+
-| Name | Public Key |
-+--------+----------------------------------------------------------+
-| A | ABJ4OIKBBFCNXZDP25C7EWXCXOVCYYAGBEHFAG7F5XYCOYPHZLNSJYDF |
-| B | ACVRK7GFBRQUCB3NEABGQ7XPNED2BSPT27GOX5QBDYW2NOFMQKK755DJ |
-| SYS | ADGFH4NYV5V75SVM5DYSW5AWOD7H2NRUWAMO6XLZKIDGUWYEXCZG5D6N |
-+--------+----------------------------------------------------------+
-
-# Now create an account with JetStream support
-export account=JS1
-nsc add account --name $account
-nsc edit account --name $account --js-disk-storage -1 --js-consumer -1 --js-streams -1
-nsc add user -a $account js-user
-```
-
-Next, generate the NATS resolver config. This will be used to fill in the values of the YAML in the Helm template.
-For example the result of generating this:
-
-```sh
-nsc generate config --sys-account SYS --nats-resolver
-
-# Operator named KO
-operator: eyJ0eXAiOiJKV1QiLCJhbGciOiJlZDI1NTE5LW5rZXkifQ.eyJqdGkiOiJDRlozRlE0WURNTUc1Q1UzU0FUWVlHWUdQUDJaQU1QUzVNRUdNWFdWTUJFWUdIVzc2WEdBIiwiaWF0IjoxNjMyNzgzMDk2LCJpc3MiOiJPQ0lWMlFGSldJTlpVQVQ1VDJZSkJJUkMzQjZKS01TWktRTkY1S0dQNE4zS1o0RkZEVkFXWVhDTCIsIm5hbWUiOiJLTyIsInN1YiI6Ik9DSVYyUUZKV0lOWlVBVDVUMllKQklSQzNCNkpLTVNaS1FORjVLR1A0TjNLWjRGRkRWQVdZWENMIiwibmF0cyI6eyJ0eXBlIjoib3BlcmF0b3IiLCJ2ZXJzaW9uIjoyfX0.e3gvJ-C1IBznmbUljeT_wbLRl1akv5IGBS3rbxs6mzzTvf3zlqQI4wDKVE8Gvb8qfTX6TIwocClfOqNaN3k3CQ
-
-# System Account named SYS
-system_account: ADGFH4NYV5V75SVM5DYSW5AWOD7H2NRUWAMO6XLZKIDGUWYEXCZG5D6N
-
-resolver_preload: {
- ADGFH4NYV5V75SVM5DYSW5AWOD7H2NRUWAMO6XLZKIDGUWYEXCZG5D6N: eyJ0eXAiOiJKV1QiLCJhbGciOiJlZDI1NTE5LW5rZXkifQ.eyJqdGkiOiJDR0tWVzJGQUszUE5XQTRBWkhHT083UTdZWUVPQkJYNDZaTU1VSFc1TU5QSUFVSFE0RVRRIiwiaWF0IjoxNjMyNzgzMDk2LCJpc3MiOiJPQ0lWMlFGSldJTlpVQVQ1VDJZSkJJUkMzQjZKS01TWktRTkY1S0dQNE4zS1o0RkZEVkFXWVhDTCIsIm5hbWUiOiJTWVMiLCJzdWIiOiJBREdGSDROWVY1Vjc1U1ZNNURZU1c1QVdPRDdIMk5SVVdBTU82WExaS0lER1VXWUVYQ1pHNUQ2TiIsIm5hdHMiOnsibGltaXRzIjp7InN1YnMiOi0xLCJkYXRhIjotMSwicGF5bG9hZCI6LTEsImltcG9ydHMiOi0xLCJleHBvcnRzIjotMSwid2lsZGNhcmRzIjp0cnVlLCJjb25uIjotMSwibGVhZiI6LTF9LCJkZWZhdWx0X3Blcm1pc3Npb25zIjp7InB1YiI6e30sInN1YiI6e319LCJ0eXBlIjoiYWNjb3VudCIsInZlcnNpb24iOjJ9fQ.J7g73TEn-ZT13owq4cVWl4l0hZnGK4DJtH2WWOZmGbefcCQ1xsx4cIagKc1cZTCwUpELVAYnSkmPp4LsQOspBg,
-}
-```
-
-In the YAML would be configured as follows:
-
-```
-auth:
- enabled: true
-
- timeout: "5s"
-
- resolver:
- type: full
-
- operator: eyJ0eXAiOiJKV1QiLCJhbGciOiJlZDI1NTE5LW5rZXkifQ.eyJqdGkiOiJDRlozRlE0WURNTUc1Q1UzU0FUWVlHWUdQUDJaQU1QUzVNRUdNWFdWTUJFWUdIVzc2WEdBIiwiaWF0IjoxNjMyNzgzMDk2LCJpc3MiOiJPQ0lWMlFGSldJTlpVQVQ1VDJZSkJJUkMzQjZKS01TWktRTkY1S0dQNE4zS1o0RkZEVkFXWVhDTCIsIm5hbWUiOiJLTyIsInN1YiI6Ik9DSVYyUUZKV0lOWlVBVDVUMllKQklSQzNCNkpLTVNaS1FORjVLR1A0TjNLWjRGRkRWQVdZWENMIiwibmF0cyI6eyJ0eXBlIjoib3BlcmF0b3IiLCJ2ZXJzaW9uIjoyfX0.e3gvJ-C1IBznmbUljeT_wbLRl1akv5IGBS3rbxs6mzzTvf3zlqQI4wDKVE8Gvb8qfTX6TIwocClfOqNaN3k3CQ
-
- systemAccount: ADGFH4NYV5V75SVM5DYSW5AWOD7H2NRUWAMO6XLZKIDGUWYEXCZG5D6N
-
- store:
- dir: "/etc/nats-config/accounts/jwt"
- size: "1Gi"
-
- resolverPreload:
- ADGFH4NYV5V75SVM5DYSW5AWOD7H2NRUWAMO6XLZKIDGUWYEXCZG5D6N: eyJ0eXAiOiJKV1QiLCJhbGciOiJlZDI1NTE5LW5rZXkifQ.eyJqdGkiOiJDR0tWVzJGQUszUE5XQTRBWkhHT083UTdZWUVPQkJYNDZaTU1VSFc1TU5QSUFVSFE0RVRRIiwiaWF0IjoxNjMyNzgzMDk2LCJpc3MiOiJPQ0lWMlFGSldJTlpVQVQ1VDJZSkJJUkMzQjZKS01TWktRTkY1S0dQNE4zS1o0RkZEVkFXWVhDTCIsIm5hbWUiOiJTWVMiLCJzdWIiOiJBREdGSDROWVY1Vjc1U1ZNNURZU1c1QVdPRDdIMk5SVVdBTU82WExaS0lER1VXWUVYQ1pHNUQ2TiIsIm5hdHMiOnsibGltaXRzIjp7InN1YnMiOi0xLCJkYXRhIjotMSwicGF5bG9hZCI6LTEsImltcG9ydHMiOi0xLCJleHBvcnRzIjotMSwid2lsZGNhcmRzIjp0cnVlLCJjb25uIjotMSwibGVhZiI6LTF9LCJkZWZhdWx0X3Blcm1pc3Npb25zIjp7InB1YiI6e30sInN1YiI6e319LCJ0eXBlIjoiYWNjb3VudCIsInZlcnNpb24iOjJ9fQ.J7g73TEn-ZT13owq4cVWl4l0hZnGK4DJtH2WWOZmGbefcCQ1xsx4cIagKc1cZTCwUpELVAYnSkmPp4LsQOspBg
-```
-
-Now we start the server with the NATS Account Resolver (`auth.resolver.type=full`):
-
-```yaml
-nats:
- image: nats:2.6.1-alpine
-
- logging:
- debug: false
- trace: false
-
- jetstream:
- enabled: true
-
- memStorage:
- enabled: true
- size: "2Gi"
-
- fileStorage:
- enabled: true
- size: "4Gi"
- storageDirectory: /data/
- storageClassName: gp2 # NOTE: AWS setup but customize as needed for your infra.
-
-cluster:
- enabled: true
- # Can set a custom cluster name
- name: "nats"
- replicas: 3
-
-auth:
- enabled: true
-
- timeout: "5s"
-
- resolver:
- type: full
-
- operator: eyJ0eXAiOiJKV1QiLCJhbGciOiJlZDI1NTE5LW5rZXkifQ.eyJqdGkiOiJDRlozRlE0WURNTUc1Q1UzU0FUWVlHWUdQUDJaQU1QUzVNRUdNWFdWTUJFWUdIVzc2WEdBIiwiaWF0IjoxNjMyNzgzMDk2LCJpc3MiOiJPQ0lWMlFGSldJTlpVQVQ1VDJZSkJJUkMzQjZKS01TWktRTkY1S0dQNE4zS1o0RkZEVkFXWVhDTCIsIm5hbWUiOiJLTyIsInN1YiI6Ik9DSVYyUUZKV0lOWlVBVDVUMllKQklSQzNCNkpLTVNaS1FORjVLR1A0TjNLWjRGRkRWQVdZWENMIiwibmF0cyI6eyJ0eXBlIjoib3BlcmF0b3IiLCJ2ZXJzaW9uIjoyfX0.e3gvJ-C1IBznmbUljeT_wbLRl1akv5IGBS3rbxs6mzzTvf3zlqQI4wDKVE8Gvb8qfTX6TIwocClfOqNaN3k3CQ
-
- systemAccount: ADGFH4NYV5V75SVM5DYSW5AWOD7H2NRUWAMO6XLZKIDGUWYEXCZG5D6N
-
- store:
- dir: "/etc/nats-config/accounts/jwt"
- size: "1Gi"
-
- resolverPreload:
- ADGFH4NYV5V75SVM5DYSW5AWOD7H2NRUWAMO6XLZKIDGUWYEXCZG5D6N: eyJ0eXAiOiJKV1QiLCJhbGciOiJlZDI1NTE5LW5rZXkifQ.eyJqdGkiOiJDR0tWVzJGQUszUE5XQTRBWkhHT083UTdZWUVPQkJYNDZaTU1VSFc1TU5QSUFVSFE0RVRRIiwiaWF0IjoxNjMyNzgzMDk2LCJpc3MiOiJPQ0lWMlFGSldJTlpVQVQ1VDJZSkJJUkMzQjZKS01TWktRTkY1S0dQNE4zS1o0RkZEVkFXWVhDTCIsIm5hbWUiOiJTWVMiLCJzdWIiOiJBREdGSDROWVY1Vjc1U1ZNNURZU1c1QVdPRDdIMk5SVVdBTU82WExaS0lER1VXWUVYQ1pHNUQ2TiIsIm5hdHMiOnsibGltaXRzIjp7InN1YnMiOi0xLCJkYXRhIjotMSwicGF5bG9hZCI6LTEsImltcG9ydHMiOi0xLCJleHBvcnRzIjotMSwid2lsZGNhcmRzIjp0cnVlLCJjb25uIjotMSwibGVhZiI6LTF9LCJkZWZhdWx0X3Blcm1pc3Npb25zIjp7InB1YiI6e30sInN1YiI6e319LCJ0eXBlIjoiYWNjb3VudCIsInZlcnNpb24iOjJ9fQ.J7g73TEn-ZT13owq4cVWl4l0hZnGK4DJtH2WWOZmGbefcCQ1xsx4cIagKc1cZTCwUpELVAYnSkmPp4LsQOspBg
-```
-
-Finally, using a local port-forward make it possible to establish a connection to one of the servers and upload the accounts.
-
-```sh
-nsc push --system-account SYS -u nats://localhost:4222 -A
-[ OK ] push to nats-server "nats://localhost:4222" using system account "SYS":
- [ OK ] push JS1 to nats-server with nats account resolver:
- [ OK ] pushed "JS1" to nats-server nats-0: jwt updated
- [ OK ] pushed "JS1" to nats-server nats-1: jwt updated
- [ OK ] pushed "JS1" to nats-server nats-2: jwt updated
- [ OK ] pushed to a total of 3 nats-server
-```
-
-Now you should be able to use JetStream and the NATS based account resolver:
-
-```sh
-nats stream ls -s localhost --creds ./nsc/nkeys/creds/KO/JS1/js-user.creds
-No Streams defined
-```
-
-## Misc
-
-### NATS Box
-
-A lightweight container with NATS and NATS Streaming utilities that is deployed along the cluster to confirm the setup.
-You can find the image at: https://github.com/nats-io/nats-box
-
-```yaml
-natsbox:
- enabled: true
- image: natsio/nats-box:latest
- pullPolicy: IfNotPresent
-
- # credentials:
- # secret:
- # name: nats-sys-creds
- # key: sys.creds
-```
-
-### Configuration Reload sidecar
-
-The NATS config reloader image to use:
-
-```yaml
-reloader:
- enabled: true
- image: natsio/nats-server-config-reloader:latest
- pullPolicy: IfNotPresent
-```
-
-### Prometheus Exporter sidecar
-
-You can toggle whether to start the sidecar that can be used to feed metrics to Prometheus:
-
-```yaml
-exporter:
- enabled: true
- image: natsio/prometheus-nats-exporter:latest
- pullPolicy: IfNotPresent
-```
-
-### Prometheus operator ServiceMonitor support
-
-You can enable prometheus operator ServiceMonitor:
-
-```yaml
-exporter:
- # You have to enable exporter first
- enabled: true
- serviceMonitor:
- enabled: true
- ## Specify the namespace where Prometheus Operator is running
- # namespace: monitoring
- # ...
-```
-
-### Pod Customizations
-
-#### Security Context
-
-```yaml
- # Toggle whether to use setup a Pod Security Context
- # ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-securityContext:
- fsGroup: 1000
- runAsUser: 1000
- runAsNonRoot: true
-```
-
-#### Affinity
-
-
-
-`matchExpressions` must be configured according to your setup
-
-```yaml
-affinity:
- nodeAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- nodeSelectorTerms:
- - matchExpressions:
- - key: node.kubernetes.io/purpose
- operator: In
- values:
- - nats
- podAntiAffinity:
- requiredDuringSchedulingIgnoredDuringExecution:
- - labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - nats
- - stan
- topologyKey: "kubernetes.io/hostname"
-```
-
-#### Service topology
-
-[Service topology](https://kubernetes.io/docs/concepts/services-networking/service-topology/) is disabled by default, but can be enabled by setting `topologyKeys`. For example:
-
-```yaml
-topologyKeys:
- - "kubernetes.io/hostname"
- - "topology.kubernetes.io/zone"
- - "topology.kubernetes.io/region"
-```
-
-#### CPU/Memory Resource Requests/Limits
-Sets the pods cpu/memory requests/limits
-
-```yaml
-nats:
- resources:
- requests:
- cpu: 2
- memory: 4Gi
- limits:
- cpu: 4
- memory: 6Gi
-```
-
-No resources are set by default.
-
-#### Annotations
-
-
-
-```yaml
-podAnnotations:
- key1 : "value1",
- key2 : "value2"
-```
-
-### Name Overides
-
-Can change the name of the resources as needed with:
-
-```yaml
-nameOverride: "my-nats"
-```
-
-### Image Pull Secrets
-
-```yaml
-imagePullSecrets:
-- name: myRegistry
-```
-
-Adds this to the StatefulSet:
-
-```yaml
-spec:
- imagePullSecrets:
- - name: myRegistry
-```
-
-### Mixed TLS and non TLS mode
-
-You can use the `nats.tls.allowNonTLS` option to allow a cluster to use TLS connections
-and plain connections:
-
-```yaml
-nats:
- client:
- port: 4222
-
- tls:
- allowNonTLS: true
- secret:
- name: nats-server-tls
- ca: "ca.crt"
- cert: "tls.crt"
- key: "tls.key"
- timeout: "5s"
-```
diff --git a/charts/nats/templates/NOTES.txt b/charts/nats/templates/NOTES.txt
deleted file mode 100644
index 86661eda..00000000
--- a/charts/nats/templates/NOTES.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-
-{{- if or .Values.nats.logging.debug .Values.nats.logging.trace }}
-*WARNING*: Keep in mind that running the server with
-debug and/or trace enabled significantly affects the
-performance of the server!
-{{- end }}
-
-You can find more information about running NATS on Kubernetes
-in the NATS documentation website:
-
- https://docs.nats.io/nats-on-kubernetes/nats-kubernetes
-
-{{- if .Values.natsbox.enabled }}
-
-NATS Box has been deployed into your cluster, you can
-now use the NATS tools within the container as follows:
-
- kubectl exec -n {{ template "nats.namespace" . }} -it deployment/{{ template "nats.fullname" . }}-box -- /bin/sh -l
-
- nats-box:~# nats-sub test &
- nats-box:~# nats-pub test hi
- nats-box:~# nc {{ template "nats.fullname" . }} {{ .Values.nats.client.port }}
-
-{{- end }}
-
-Thanks for using NATS!
diff --git a/charts/nats/templates/_helpers.tpl b/charts/nats/templates/_helpers.tpl
deleted file mode 100644
index 5499b6a5..00000000
--- a/charts/nats/templates/_helpers.tpl
+++ /dev/null
@@ -1,147 +0,0 @@
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "nats.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{- define "nats.namespace" -}}
-{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-
-{{- define "nats.fullname" -}}
-{{- if .Values.fullnameOverride -}}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- if contains $name .Release.Name -}}
-{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "nats.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Common labels
-*/}}
-{{- define "nats.labels" -}}
-helm.sh/chart: {{ include "nats.chart" . }}
-{{- range $name, $value := .Values.commonLabels }}
-{{ $name }}: {{ tpl $value $ }}
-{{- end }}
-{{ include "nats.selectorLabels" . }}
-{{- if .Chart.AppVersion }}
-app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
-{{- end }}
-app.kubernetes.io/managed-by: {{ .Release.Service }}
-{{- end }}
-
-{{/*
-Selector labels
-*/}}
-{{- define "nats.selectorLabels" -}}
-{{- if .Values.nats.selectorLabels }}
-{{ tpl (toYaml .Values.nats.selectorLabels) . }}
-{{- else }}
-app.kubernetes.io/name: {{ include "nats.name" . }}
-app.kubernetes.io/instance: {{ .Release.Name }}
-{{- end }}
-{{- end }}
-
-
-{{/*
-Return the proper NATS image name
-*/}}
-{{- define "nats.clusterAdvertise" -}}
-{{- if $.Values.useFQDN }}
-{{- printf "$(POD_NAME).%s.$(POD_NAMESPACE).svc.%s" (include "nats.fullname" . ) $.Values.k8sClusterDomain }}
-{{- else }}
-{{- printf "$(POD_NAME).%s.$(POD_NAMESPACE)" (include "nats.fullname" . ) }}
-{{- end }}
-{{- end }}
-
-{{/*
-Return the NATS cluster routes.
-*/}}
-{{- define "nats.clusterRoutes" -}}
-{{- $name := (include "nats.fullname" . ) -}}
-{{- $namespace := (include "nats.namespace" . ) -}}
-{{- range $i, $e := until (.Values.cluster.replicas | int) -}}
-{{- if $.Values.useFQDN }}
-{{- printf "nats://%s-%d.%s.%s.svc.%s:6222," $name $i $name $namespace $.Values.k8sClusterDomain -}}
-{{- else }}
-{{- printf "nats://%s-%d.%s.%s:6222," $name $i $name $namespace -}}
-{{- end }}
-{{- end -}}
-{{- end }}
-
-{{- define "nats.extraRoutes" -}}
-{{- range $i, $url := .Values.cluster.extraRoutes -}}
-{{- printf "%s," $url -}}
-{{- end -}}
-{{- end }}
-
-{{- define "nats.tlsConfig" -}}
-tls {
-{{- if .cert }}
- cert_file: {{ .secretPath }}/{{ .secret.name }}/{{ .cert }}
-{{- end }}
-{{- if .key }}
- key_file: {{ .secretPath }}/{{ .secret.name }}/{{ .key }}
-{{- end }}
-{{- if .ca }}
- ca_file: {{ .secretPath }}/{{ .secret.name }}/{{ .ca }}
-{{- end }}
-{{- if .insecure }}
- insecure: {{ .insecure }}
-{{- end }}
-{{- if .verify }}
- verify: {{ .verify }}
-{{- end }}
-{{- if .verifyAndMap }}
- verify_and_map: {{ .verifyAndMap }}
-{{- end }}
-{{- if .curvePreferences }}
- curve_preferences: {{ .curvePreferences }}
-{{- end }}
-{{- if .timeout }}
- timeout: {{ .timeout }}
-{{- end }}
-{{- if .cipherSuites }}
- cipher_suites: {{ toRawJson .cipherSuites }}
-{{- end }}
-}
-{{- end }}
-
-{{/*
-Return the appropriate apiVersion for networkpolicy.
-*/}}
-{{- define "networkPolicy.apiVersion" -}}
-{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}}
-{{- print "extensions/v1beta1" -}}
-{{- else -}}
-{{- print "networking.k8s.io/v1" -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Renders a value that contains template.
-Usage:
-{{ include "tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }}
-*/}}
-{{- define "tplvalues.render" -}}
- {{- if typeIs "string" .value }}
- {{- tpl .value .context }}
- {{- else }}
- {{- tpl (.value | toYaml) .context }}
- {{- end }}
-{{- end -}}
diff --git a/charts/nats/templates/configmap.yaml b/charts/nats/templates/configmap.yaml
deleted file mode 100644
index ceb891ae..00000000
--- a/charts/nats/templates/configmap.yaml
+++ /dev/null
@@ -1,544 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ include "nats.fullname" . }}-config
- namespace: {{ include "nats.namespace" . }}
- labels:
- {{- include "nats.labels" . | nindent 4 }}
-data:
- nats.conf: |
- # NATS Clients Port
- port: {{ .Values.nats.client.port }}
-
- # PID file shared with configuration reloader.
- pid_file: "/var/run/nats/nats.pid"
-
- {{- if .Values.nats.config }}
- ###########
- # #
- # Imports #
- # #
- ###########
- {{- range .Values.nats.config }}
- include ./{{ .name }}/{{ .name }}.conf
- {{- end}}
- {{- end }}
-
- ###############
- # #
- # Monitoring #
- # #
- ###############
- http: 8222
- server_name: {{- if .Values.nats.serverNamePrefix }}$SERVER_NAME{{- else }}$POD_NAME{{- end }}
-
- {{- if .Values.nats.tls }}
- #####################
- # #
- # TLS Configuration #
- # #
- #####################
- {{- with .Values.nats.tls }}
- {{- $nats_tls := merge (dict) . }}
- {{- $_ := set $nats_tls "secretPath" "/etc/nats-certs/clients" }}
- {{- tpl (include "nats.tlsConfig" $nats_tls) $ | nindent 4}}
- {{- end }}
-
- {{- if .Values.nats.tls.allowNonTLS }}
- allow_non_tls: {{ .Values.nats.tls.allowNonTLS }}
- {{- end }}
-
- {{- end }}
-
- {{- if .Values.nats.jetstream.enabled }}
- ###################################
- # #
- # NATS JetStream #
- # #
- ###################################
- jetstream {
- {{- if .Values.nats.jetstream.encryption }}
- {{- if .Values.nats.jetstream.encryption.key }}
- key: {{ .Values.nats.jetstream.encryption.key | quote }}
- {{- else if .Values.nats.jetstream.encryption.secret }}
- key: $JS_KEY
- {{- end}}
- {{- end}}
-
- {{- if .Values.nats.jetstream.memStorage.enabled }}
- max_mem: {{ .Values.nats.jetstream.memStorage.size }}
- {{- end }}
-
- {{- if .Values.nats.jetstream.domain }}
- domain: {{ .Values.nats.jetstream.domain }}
- {{- end }}
-
- {{- if .Values.nats.jetstream.fileStorage.enabled }}
- store_dir: {{ .Values.nats.jetstream.fileStorage.storageDirectory }}
-
- max_file:
- {{- if .Values.nats.jetstream.fileStorage.existingClaim }}
- {{- .Values.nats.jetstream.fileStorage.claimStorageSize }}
- {{- else }}
- {{- .Values.nats.jetstream.fileStorage.size }}
- {{- end }}
- {{- end }}
- }
- {{- end }}
- {{- if .Values.mqtt.enabled }}
- ###################################
- # #
- # NATS MQTT #
- # #
- ###################################
- mqtt {
- port: 1883
-
- {{- with .Values.mqtt.tls }}
- {{- $mqtt_tls := merge (dict) . }}
- {{- $_ := set $mqtt_tls "secretPath" "/etc/nats-certs/mqtt" }}
- {{- tpl (include "nats.tlsConfig" $mqtt_tls) $ | nindent 6}}
- {{- end }}
-
- {{- if .Values.mqtt.noAuthUser }}
- no_auth_user: {{ .Values.mqtt.noAuthUser | quote }}
- {{- end }}
-
- ack_wait: {{ .Values.mqtt.ackWait | quote }}
- max_ack_pending: {{ .Values.mqtt.maxAckPending }}
- }
- {{- end }}
-
- {{- if .Values.cluster.enabled }}
- ###################################
- # #
- # NATS Full Mesh Clustering Setup #
- # #
- ###################################
- cluster {
- port: 6222
-
- {{- if .Values.nats.jetstream.enabled }}
- {{- if .Values.cluster.name }}
- name: {{ .Values.cluster.name }}
- {{- else }}
- name: {{ template "nats.name" . }}
- {{- end }}
- {{- else }}
- {{- with .Values.cluster.name }}
- name: {{ . }}
- {{- end }}
- {{- end }}
-
- {{- with .Values.cluster.tls }}
- {{- $cluster_tls := merge (dict) . }}
- {{- $_ := set $cluster_tls "secretPath" "/etc/nats-certs/cluster" }}
- {{- tpl (include "nats.tlsConfig" $cluster_tls) $ | nindent 6}}
- {{- end }}
-
- {{- if .Values.cluster.authorization }}
- authorization {
- {{- with .Values.cluster.authorization.user }}
- user: {{ . }}
- {{- end }}
- {{- with .Values.cluster.authorization.password }}
- password: {{ . }}
- {{- end }}
- {{- with .Values.cluster.authorization.timeout }}
- timeout: {{ . }}
- {{- end }}
- }
- {{- end }}
-
- routes = [
- {{ include "nats.clusterRoutes" . }}
- {{ include "nats.extraRoutes" . }}
- ]
- cluster_advertise: $CLUSTER_ADVERTISE
-
- {{- with .Values.cluster.noAdvertise }}
- no_advertise: {{ . }}
- {{- end }}
-
- connect_retries: {{ .Values.nats.connectRetries }}
- }
- {{- end }}
-
- {{- if and .Values.nats.advertise .Values.nats.externalAccess }}
- include "advertise/client_advertise.conf"
- {{- end }}
-
- {{- if or .Values.leafnodes.enabled .Values.leafnodes.remotes }}
- #################
- # #
- # NATS Leafnode #
- # #
- #################
- leafnodes {
- {{- if .Values.leafnodes.enabled }}
- listen: "0.0.0.0:7422"
- {{- end }}
-
- {{- if and .Values.nats.advertise .Values.nats.externalAccess }}
- include "advertise/gateway_advertise.conf"
- {{- end }}
-
- {{- with .Values.leafnodes.noAdvertise }}
- no_advertise: {{ . }}
- {{- end }}
-
- {{- with .Values.leafnodes.authorization }}
- authorization: {
- {{- with .user }}
- user: {{ . }}
- {{- end }}
- {{- with .password }}
- password: {{ . }}
- {{- end }}
- {{- with .account }}
- account: {{ . | quote }}
- {{- end }}
- {{- with .timeout }}
- timeout: {{ . }}
- {{- end }}
- {{- with .users }}
- users: [
- {{- range . }}
- {{- toRawJson . | nindent 10 }},
- {{- end }}
- ]
- {{- end }}
- }
- {{- end }}
-
- {{- with .Values.leafnodes.tls }}
- {{- if .custom }}
- tls {
- {{- .custom | nindent 8 }}
- }
- {{- else }}
- {{- $leafnode_tls := merge (dict) . }}
- {{- $_ := set $leafnode_tls "secretPath" "/etc/nats-certs/leafnodes" }}
- {{- tpl (include "nats.tlsConfig" $leafnode_tls) $ | nindent 6}}
- {{- end }}
- {{- end }}
-
- remotes: [
- {{- range .Values.leafnodes.remotes }}
- {
- {{- with .url }}
- url: {{ . | quote }}
- {{- end }}
-
- {{- with .urls }}
- urls: {{ toRawJson . }}
- {{- end }}
-
- {{- with .account }}
- account: {{ . | quote }}
- {{- end }}
-
- {{- with .credentials }}
- credentials: "/etc/nats-creds/{{ .secret.name }}/{{ .secret.key }}"
- {{- end }}
-
- {{- with .tls }}
- tls: {
- {{- if .custom }}
- {{- .custom | nindent 10 }}
- {{- else }}
- {{ $secretName := tpl .secret.name $ }}
- {{- with .cert }}
- cert_file: /etc/nats-certs/leafnodes/{{ $secretName }}/{{ . }}
- {{- end }}
-
- {{- with .key }}
- key_file: /etc/nats-certs/leafnodes/{{ $secretName }}/{{ . }}
- {{- end }}
-
- {{- with .ca }}
- ca_file: /etc/nats-certs/leafnodes/{{ $secretName }}/{{ . }}
- {{- end }}
- {{- end }}
- }
- {{- end }}
- }
- {{- end }}
- ]
- }
- {{- end }}
-
- {{- if .Values.gateway.enabled }}
- #################
- # #
- # NATS Gateways #
- # #
- #################
- gateway {
- name: {{ .Values.gateway.name }}
- port: 7522
-
- {{- if .Values.gateway.advertise }}
- advertise: {{ .Values.gateway.advertise }}
- {{- end }}
-
- {{- if .Values.gateway.rejectUnknownCluster }}
- reject_unknown_cluster: {{ .Values.gateway.rejectUnknownCluster }}
- {{- end }}
-
- {{- if .Values.gateway.authorization }}
- authorization {
- {{- with .Values.gateway.authorization.user }}
- user: {{ . }}
- {{- end }}
- {{- with .Values.gateway.authorization.password }}
- password: {{ . }}
- {{- end }}
- {{- with .Values.gateway.authorization.timeout }}
- timeout: {{ . }}
- {{- end }}
- }
- {{- end }}
-
- {{- if and .Values.nats.advertise .Values.nats.externalAccess }}
- include "advertise/gateway_advertise.conf"
- {{- end }}
-
- {{- with .Values.gateway.tls }}
- {{- $gateway_tls := merge (dict) . }}
- {{- $_ := set $gateway_tls "secretPath" "/etc/nats-certs/gateways" }}
- {{- tpl (include "nats.tlsConfig" $gateway_tls) $ | nindent 6}}
- {{- end }}
-
- # Gateways array here
- gateways: [
- {{- range .Values.gateway.gateways }}
- {
- {{- with .name }}
- name: {{ . }}
- {{- end }}
-
- {{- with .url }}
- url: {{ . | quote }}
- {{- end }}
-
- {{- with .urls }}
- urls: [{{ join "," . }}]
- {{- end }}
- },
- {{- end }}
- ]
- }
- {{- end }}
-
- {{- with .Values.nats.logging.debug }}
- debug: {{ . }}
- {{- end }}
-
- {{- with .Values.nats.logging.trace }}
- trace: {{ . }}
- {{- end }}
-
- {{- with .Values.nats.logging.logtime }}
- logtime: {{ . }}
- {{- end }}
-
- {{- with .Values.nats.logging.connectErrorReports }}
- connect_error_reports: {{ . }}
- {{- end }}
-
- {{- with .Values.nats.logging.reconnectErrorReports }}
- reconnect_error_reports: {{ . }}
- {{- end }}
-
- {{- with .Values.nats.limits.maxConnections }}
- max_connections: {{ . }}
- {{- end }}
-
- {{- with .Values.nats.limits.maxSubscriptions }}
- max_subscriptions: {{ . }}
- {{- end }}
-
- {{- with .Values.nats.limits.maxPending }}
- max_pending: {{ . }}
- {{- end }}
-
- {{- with .Values.nats.limits.maxControlLine }}
- max_control_line: {{ . }}
- {{- end }}
-
- {{- with .Values.nats.limits.maxPayload }}
- max_payload: {{ . }}
- {{- end }}
-
- {{- with .Values.nats.limits.pingInterval }}
- ping_interval: {{ . }}
- {{- end }}
-
- {{- with .Values.nats.limits.maxPings }}
- ping_max: {{ . }}
- {{- end }}
-
- {{- with .Values.nats.limits.writeDeadline }}
- write_deadline: {{ . }}
- {{- end }}
-
- {{- with .Values.nats.limits.lameDuckDuration }}
- lame_duck_duration: {{ . }}
- {{- end }}
-
- {{- if .Values.websocket.enabled }}
- ##################
- # #
- # Websocket #
- # #
- ##################
- websocket {
- port: {{ .Values.websocket.port }}
- {{- with .Values.websocket.tls }}
- {{ $secretName := tpl .secret.name $ }}
- tls {
- {{- with .cert }}
- cert_file: /etc/nats-certs/ws/{{ $secretName }}/{{ . }}
- {{- end }}
-
- {{- with .key }}
- key_file: /etc/nats-certs/ws/{{ $secretName }}/{{ . }}
- {{- end }}
-
- {{- with .ca }}
- ca_file: /etc/nats-certs/ws/{{ $secretName }}/{{ . }}
- {{- end }}
- }
- {{- else }}
- no_tls: {{ .Values.websocket.noTLS }}
- {{- end }}
- same_origin: {{ .Values.websocket.sameOrigin }}
- {{- with .Values.websocket.allowedOrigins }}
- allowed_origins: {{ toRawJson . }}
- {{- end }}
- }
- {{- end }}
-
- {{- if .Values.auth.enabled }}
- ##################
- # #
- # Authorization #
- # #
- ##################
- {{- if .Values.auth.resolver }}
- {{- if eq .Values.auth.resolver.type "memory" }}
- resolver: MEMORY
- include "accounts/{{ .Values.auth.resolver.configMap.key }}"
- {{- end }}
-
- {{- if eq .Values.auth.resolver.type "full" }}
- {{- if .Values.auth.resolver.configMap }}
- include "accounts/{{ .Values.auth.resolver.configMap.key }}"
- {{- else }}
- {{- with .Values.auth.resolver }}
- {{- if $.Values.auth.timeout }}
- authorization {
- timeout: {{ $.Values.auth.timeout }}
- }
- {{- end }}
-
- {{- if .operator }}
- operator: {{ .operator }}
- {{- end }}
-
- {{- if .systemAccount }}
- system_account: {{ .systemAccount }}
- {{- end }}
- {{- end }}
-
- resolver: {
- type: full
- {{- with .Values.auth.resolver }}
- dir: {{ .store.dir | quote }}
-
- allow_delete: {{ .allowDelete }}
-
- interval: {{ .interval | quote }}
- {{- end }}
- }
- {{- end }}
- {{- end }}
-
- {{- if .Values.auth.resolver.resolverPreload }}
- resolver_preload: {{ toRawJson .Values.auth.resolver.resolverPreload }}
- {{- end }}
-
- {{- if eq .Values.auth.resolver.type "URL" }}
- {{- with .Values.auth.resolver.url }}
- resolver: URL({{ . }})
- {{- end }}
- operator: /etc/nats-config/operator/{{ .Values.auth.operatorjwt.configMap.key }}
- {{- end }}
- {{- end }}
-
- {{- with .Values.auth.systemAccount }}
- system_account: {{ . }}
- {{- end }}
-
- {{- with .Values.auth.token }}
- authorization {
- token: "{{ . }}"
-
-
- {{- if $.Values.auth.timeout }}
- timeout: {{ $.Values.auth.timeout }}
- {{- end }}
- }
- {{- end }}
-
- {{- with .Values.auth.nkeys }}
- {{- with .users }}
- authorization {
- {{- if $.Values.auth.timeout }}
- timeout: {{ $.Values.auth.timeout }}
- {{- end }}
-
- users: [
- {{- range . }}
- {{- toRawJson . | nindent 4 }},
- {{- end }}
- ]
- }
- {{- end }}
- {{- end }}
-
- {{- with .Values.auth.basic }}
-
- {{- with .noAuthUser }}
- no_auth_user: {{ . }}
- {{- end }}
-
- {{- with .users }}
- authorization {
- {{- if $.Values.auth.timeout }}
- timeout: {{ $.Values.auth.timeout }}
- {{- end }}
-
- users: [
- {{- range . }}
- {{- toRawJson . | nindent 4 }},
- {{- end }}
- ]
- }
- {{- end }}
-
- {{- with .accounts }}
- authorization {
- {{- if $.Values.auth.timeout }}
- timeout: {{ $.Values.auth.timeout }}
- {{- end }}
- }
-
- accounts: {{- toRawJson . }}
- {{- end }}
-
- {{- end }}
-
- {{- end }}
diff --git a/charts/nats/templates/nats-box.yaml b/charts/nats/templates/nats-box.yaml
deleted file mode 100644
index 0c9f3f67..00000000
--- a/charts/nats/templates/nats-box.yaml
+++ /dev/null
@@ -1,117 +0,0 @@
-{{- if .Values.natsbox.enabled }}
----
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ include "nats.fullname" . }}-box
- namespace: {{ include "nats.namespace" . }}
- labels:
- app: {{ include "nats.fullname" . }}-box
- chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
- {{- if .Values.natsbox.additionalLabels }}
- {{- tpl (toYaml .Values.natsbox.additionalLabels) $ | nindent 4 }}
- {{- end }}
-spec:
- replicas: 1
- selector:
- matchLabels:
- app: {{ include "nats.fullname" . }}-box
- template:
- metadata:
- labels:
- app: {{ include "nats.fullname" . }}-box
- {{- if .Values.natsbox.podLabels }}
- {{- tpl (toYaml .Values.natsbox.podLabels) $ | nindent 8 }}
- {{- end }}
- {{- if .Values.natsbox.podAnnotations }}
- annotations:
- {{- range $key, $value := .Values.natsbox.podAnnotations }}
- {{ $key }}: {{ $value | quote }}
- {{- end }}
- {{- end }}
- spec:
- {{- with .Values.natsbox.affinity }}
- affinity:
- {{- tpl (toYaml .) $ | nindent 8 }}
- {{- end }}
- {{- with .Values.natsbox.nodeSelector }}
- nodeSelector: {{ toYaml . | nindent 8 }}
- {{- end }}
- {{- with .Values.natsbox.tolerations }}
- tolerations: {{ toYaml . | nindent 8 }}
- {{- end }}
- volumes:
- {{- if .Values.natsbox.credentials }}
- - name: nats-sys-creds
- secret:
- secretName: {{ .Values.natsbox.credentials.secret.name }}
- {{- end }}
- {{- if .Values.natsbox.extraVolumes }}
- {{- toYaml .Values.natsbox.extraVolumes | nindent 6}}
- {{- end }}
- {{- with .Values.nats.tls }}
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-clients-volume
- secret:
- secretName: {{ $secretName }}
- {{- end }}
-{{- with .Values.securityContext }}
- securityContext:
-{{ toYaml . | indent 8 }}
-{{- end }}
- {{- with .Values.imagePullSecrets }}
- imagePullSecrets:
- {{- toYaml . | nindent 8 }}
- {{- end }}
- containers:
- - name: nats-box
- image: {{ .Values.natsbox.image }}
- imagePullPolicy: {{ .Values.natsbox.pullPolicy }}
- {{- if .Values.natsbox.securityContext }}
- securityContext:
- {{- .Values.natsbox.securityContext | toYaml | nindent 10 }}
- {{- end }}
- resources:
- {{- toYaml .Values.natsbox.resources | nindent 10 }}
- env:
- - name: NATS_URL
- value: {{ template "nats.fullname" . }}
- {{- if .Values.natsbox.credentials }}
- - name: USER_CREDS
- value: /etc/nats-config/creds/{{ .Values.natsbox.credentials.secret.key }}
- - name: USER2_CREDS
- value: /etc/nats-config/creds/{{ .Values.natsbox.credentials.secret.key }}
- {{- end }}
- {{- with .Values.nats.tls }}
- {{ $secretName := tpl .secret.name $ }}
- lifecycle:
- postStart:
- exec:
- command:
- - /bin/sh
- - -c
- - cp /etc/nats-certs/clients/{{ $secretName }}/* /usr/local/share/ca-certificates && update-ca-certificates
- {{- end }}
- command:
- - "tail"
- - "-f"
- - "/dev/null"
- volumeMounts:
- {{- if .Values.natsbox.credentials }}
- - name: nats-sys-creds
- mountPath: /etc/nats-config/creds
- {{- end }}
- {{- if .Values.natsbox.extraVolumeMounts }}
- {{- toYaml .Values.natsbox.extraVolumeMounts | nindent 8 }}
- {{- end }}
- {{- with .Values.nats.tls }}
- #######################
- # #
- # TLS Volumes Mounts #
- # #
- #######################
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-clients-volume
- mountPath: /etc/nats-certs/clients/{{ $secretName }}
- {{- end }}
-{{- end }}
diff --git a/charts/nats/templates/networkpolicy.yaml b/charts/nats/templates/networkpolicy.yaml
deleted file mode 100644
index c815af32..00000000
--- a/charts/nats/templates/networkpolicy.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-{{- if .Values.networkPolicy.enabled }}
-kind: NetworkPolicy
-apiVersion: {{ template "networkPolicy.apiVersion" . }}
-metadata:
- name: {{ include "nats.fullname" . }}
- namespace: {{ include "nats.namespace" . }}
- labels:
- {{- include "nats.labels" . | nindent 4 }}
-spec:
- podSelector:
- matchLabels: {{- include "nats.selectorLabels" . | nindent 6 }}
- policyTypes:
- - Ingress
- - Egress
- egress:
- # Allow dns resolution
- - ports:
- - port: 53
- protocol: UDP
- # Allow outbound connections to other cluster pods
- - ports:
- - port: 4222
- protocol: TCP
- - port: 6222
- protocol: TCP
- - port: 8222
- protocol: TCP
- - port: 7777
- protocol: TCP
- - port: 7422
- protocol: TCP
- - port: 7522
- protocol: TCP
- to:
- - podSelector:
- matchLabels: {{- include "nats.selectorLabels" . | nindent 14 }}
- {{- if .Values.networkPolicy.extraEgress }}
- {{- include "tplvalues.render" ( dict "value" .Values.networkPolicy.extraEgress "context" $ ) | nindent 4 }}
- {{- end }}
- ingress:
- # Allow inbound connections
- - ports:
- - port: 4222
- protocol: TCP
- - port: 6222
- protocol: TCP
- - port: 8222
- protocol: TCP
- - port: 7777
- protocol: TCP
- - port: 7422
- protocol: TCP
- - port: 7522
- protocol: TCP
- {{- if not .Values.networkPolicy.allowExternal }}
- from:
- - podSelector:
- matchLabels:
- {{ include "nats.fullname" . }}-client: "true"
- - podSelector:
- matchLabels: {{- include "nats.selectorLabels" . | nindent 14 }}
- {{- if .Values.networkPolicy.ingressNSMatchLabels }}
- - namespaceSelector:
- matchLabels:
- {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }}
- {{ $key | quote }}: {{ $value | quote }}
- {{- end }}
- {{- if .Values.networkPolicy.ingressNSPodMatchLabels }}
- podSelector:
- matchLabels:
- {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }}
- {{ $key | quote }}: {{ $value | quote }}
- {{- end }}
- {{- end }}
- {{- end }}
- {{- end }}
- {{- if .Values.networkPolicy.extraIngress }}
- {{- include "tplvalues.render" ( dict "value" .Values.networkPolicy.extraIngress "context" $ ) | nindent 4 }}
- {{- end }}
-{{- end }}
diff --git a/charts/nats/templates/pdb.yaml b/charts/nats/templates/pdb.yaml
deleted file mode 100644
index b1140eb6..00000000
--- a/charts/nats/templates/pdb.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-{{- if .Values.podDisruptionBudget }}
----
-apiVersion: policy/v1beta1
-kind: PodDisruptionBudget
-metadata:
- name: {{ include "nats.fullname" . }}
- namespace: {{ include "nats.namespace" . }}
- labels:
- {{- include "nats.labels" . | nindent 4 }}
-spec:
- {{- if .Values.podDisruptionBudget.minAvailable }}
- minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
- {{- end }}
- {{- if .Values.podDisruptionBudget.maxUnavailable }}
- maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
- {{- end }}
- selector:
- matchLabels:
- {{- include "nats.selectorLabels" . | nindent 6 }}
-{{- end }}
-
diff --git a/charts/nats/templates/rbac.yaml b/charts/nats/templates/rbac.yaml
deleted file mode 100644
index f1a1c4d7..00000000
--- a/charts/nats/templates/rbac.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-{{ if and .Values.nats.externalAccess .Values.nats.advertise }}
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ .Values.nats.serviceAccount }}
- namespace: {{ include "nats.namespace" . }}
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: {{ .Values.nats.serviceAccount }}
-rules:
-- apiGroups: [""]
- resources:
- - nodes
- verbs: ["get"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: {{ .Values.nats.serviceAccount }}-binding
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: {{ .Values.nats.serviceAccount }}
-subjects:
-- kind: ServiceAccount
- name: {{ .Values.nats.serviceAccount }}
- namespace: {{ include "nats.namespace" . }}
-{{ end }}
diff --git a/charts/nats/templates/service.yaml b/charts/nats/templates/service.yaml
deleted file mode 100644
index acde1e64..00000000
--- a/charts/nats/templates/service.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ include "nats.fullname" . }}
- namespace: {{ include "nats.namespace" . }}
- labels:
- {{- include "nats.labels" . | nindent 4 }}
- {{- if .Values.serviceAnnotations}}
- annotations:
- {{- range $key, $value := .Values.serviceAnnotations }}
- {{ $key }}: {{ $value | quote }}
- {{- end }}
- {{- end }}
-spec:
- selector:
- {{- include "nats.selectorLabels" . | nindent 4 }}
- clusterIP: None
- {{- if .Values.topologyKeys }}
- topologyKeys:
- {{- .Values.topologyKeys | toYaml | nindent 4 }}
- {{- end }}
- ports:
- {{- if .Values.websocket.enabled }}
- - name: websocket
- port: {{ .Values.websocket.port }}
- {{- if .Values.appProtocol.enabled }}
- appProtocol: tcp
- {{- end }}
- {{- end }}
- {{- if .Values.nats.profiling.enabled }}
- - name: profiling
- port: {{ .Values.nats.profiling.port }}
- {{- if .Values.appProtocol.enabled }}
- appProtocol: http
- {{- end }}
- {{- end }}
- - name: {{ .Values.nats.client.portName }}
- port: {{ .Values.nats.client.port }}
- {{- if .Values.appProtocol.enabled }}
- appProtocol: tcp
- {{- end }}
- - name: cluster
- port: 6222
- {{- if .Values.appProtocol.enabled }}
- appProtocol: tcp
- {{- end }}
- - name: monitor
- port: 8222
- {{- if .Values.appProtocol.enabled }}
- appProtocol: http
- {{- end }}
- - name: metrics
- port: 7777
- {{- if .Values.appProtocol.enabled }}
- appProtocol: http
- {{- end }}
- - name: leafnodes
- port: 7422
- {{- if .Values.appProtocol.enabled }}
- appProtocol: tcp
- {{- end }}
- - name: gateways
- port: 7522
- {{- if .Values.appProtocol.enabled }}
- appProtocol: tcp
- {{- end }}
- {{- if .Values.mqtt.enabled }}
- - name: mqtt
- port: 1883
- {{- if .Values.appProtocol.enabled }}
- appProtocol: tcp
- {{- end }}
- {{- end }}
diff --git a/charts/nats/templates/serviceExternal.yaml b/charts/nats/templates/serviceExternal.yaml
deleted file mode 100644
index 3c238ab7..00000000
--- a/charts/nats/templates/serviceExternal.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ include "nats.fullname" . }}-external
- namespace: {{ include "nats.namespace" . }}
- labels:
- {{- include "nats.labels" . | nindent 4 }}
- {{- if .Values.serviceAnnotations}}
- annotations:
- {{- range $key, $value := .Values.serviceAnnotations }}
- {{ $key }}: {{ $value | quote }}
- {{- end }}
- {{- end }}
-spec:
- type: {{ .Values.service.type }}
- selector:
- {{- include "nats.selectorLabels" . | nindent 4 }}
- {{- if .Values.topologyKeys }}
- topologyKeys:
- {{- .Values.topologyKeys | toYaml | nindent 4 }}
- {{- end }}
- ports:
- {{- if .Values.websocket.enabled }}
- - name: websocket
- port: {{ .Values.websocket.port }}
- {{- if .Values.appProtocol.enabled }}
- appProtocol: tcp
- {{- end }}
- {{- end }}
- {{- if .Values.nats.profiling.enabled }}
- - name: profiling
- port: {{ .Values.nats.profiling.port }}
- {{- if .Values.appProtocol.enabled }}
- appProtocol: http
- {{- end }}
- {{- end }}
- - name: {{ .Values.nats.client.portName }}
- port: {{ .Values.nats.client.port }}
- {{- if .Values.appProtocol.enabled }}
- appProtocol: tcp
- {{- end }}
- - name: cluster
- port: 6222
- {{- if .Values.appProtocol.enabled }}
- appProtocol: tcp
- {{- end }}
- - name: monitor
- port: 8222
- {{- if .Values.appProtocol.enabled }}
- appProtocol: http
- {{- end }}
- - name: metrics
- port: 7777
- {{- if .Values.appProtocol.enabled }}
- appProtocol: http
- {{- end }}
- - name: leafnodes
- port: 7422
- {{- if .Values.appProtocol.enabled }}
- appProtocol: tcp
- {{- end }}
- - name: gateways
- port: 7522
- {{- if .Values.appProtocol.enabled }}
- appProtocol: tcp
- {{- end }}
- {{- if .Values.mqtt.enabled }}
- - name: mqtt
- port: 1883
- {{- if .Values.appProtocol.enabled }}
- appProtocol: tcp
- {{- end }}
- {{- end }}
diff --git a/charts/nats/templates/serviceMonitor.yaml b/charts/nats/templates/serviceMonitor.yaml
deleted file mode 100644
index 374cbae0..00000000
--- a/charts/nats/templates/serviceMonitor.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
-{{ if and .Values.exporter.enabled .Values.exporter.serviceMonitor.enabled }}
-apiVersion: monitoring.coreos.com/v1
-kind: ServiceMonitor
-metadata:
- name: {{ template "nats.fullname" . }}
- {{- if .Values.exporter.serviceMonitor.namespace }}
- namespace: {{ .Values.exporter.serviceMonitor.namespace }}
- {{- else }}
- namespace: {{ include "nats.namespace" . }}
- {{- end }}
- {{- if .Values.exporter.serviceMonitor.labels }}
- labels:
- {{- range $key, $value := .Values.exporter.serviceMonitor.labels }}
- {{ $key }}: {{ $value | quote }}
- {{- end }}
- {{- end }}
- {{- if .Values.exporter.serviceMonitor.annotations }}
- annotations:
- {{- range $key, $value := .Values.exporter.serviceMonitor.annotations }}
- {{ $key }}: {{ $value | quote }}
- {{- end }}
- {{- end }}
-spec:
- endpoints:
- - port: metrics
- {{- if .Values.exporter.serviceMonitor.path }}
- path: {{ .Values.exporter.serviceMonitor.path }}
- {{- end }}
- {{- if .Values.exporter.serviceMonitor.interval }}
- interval: {{ .Values.exporter.serviceMonitor.interval }}
- {{- end }}
- {{- if .Values.exporter.serviceMonitor.scrapeTimeout }}
- scrapeTimeout: {{ .Values.exporter.serviceMonitor.scrapeTimeout }}
- {{- end }}
- namespaceSelector:
- any: true
- selector:
- matchLabels:
- {{- include "nats.selectorLabels" . | nindent 6 }}
-{{- end }}
diff --git a/charts/nats/templates/statefulset.yaml b/charts/nats/templates/statefulset.yaml
deleted file mode 100644
index 9533e499..00000000
--- a/charts/nats/templates/statefulset.yaml
+++ /dev/null
@@ -1,636 +0,0 @@
----
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
- name: {{ include "nats.fullname" . }}
- namespace: {{ include "nats.namespace" . }}
- labels:
- {{- include "nats.labels" . | nindent 4 }}
- {{- if .Values.statefulSetAnnotations}}
- annotations:
- {{- range $key, $value := .Values.statefulSetAnnotations }}
- {{ $key }}: {{ $value | quote }}
- {{- end }}
- {{- end }}
-spec:
- selector:
- matchLabels:
- {{- include "nats.selectorLabels" . | nindent 6 }}
- {{- if .Values.cluster.enabled }}
- replicas: {{ .Values.cluster.replicas }}
- {{- else }}
- replicas: 1
- {{- end }}
- serviceName: {{ include "nats.fullname" . }}
-
- podManagementPolicy: {{ .Values.podManagementPolicy }}
-
- template:
- metadata:
- {{- if or .Values.podAnnotations .Values.exporter.enabled }}
- annotations:
- {{- if .Values.exporter.enabled }}
- prometheus.io/path: /metrics
- prometheus.io/port: "7777"
- prometheus.io/scrape: "true"
- {{- end }}
- {{- range $key, $value := .Values.podAnnotations }}
- {{ $key }}: {{ $value | quote }}
- {{- end }}
- {{- end }}
- labels:
- {{- include "nats.selectorLabels" . | nindent 8 }}
- {{- if .Values.statefulSetPodLabels }}
- {{- tpl (toYaml .Values.statefulSetPodLabels) . | nindent 8 }}
- {{- end }}
- spec:
-{{- with .Values.imagePullSecrets }}
- imagePullSecrets:
-{{- toYaml . | nindent 8 }}
-{{- end }}
-{{- with .Values.securityContext }}
- securityContext:
-{{- toYaml . | nindent 8 }}
-{{- end }}
-{{- with .Values.affinity }}
- affinity:
-{{- tpl (toYaml .) $ | nindent 8 }}
-{{- end }}
-{{- with .Values.nodeSelector }}
- nodeSelector: {{ toYaml . | nindent 8 }}
-{{- end }}
-{{- with .Values.tolerations }}
- tolerations: {{ toYaml . | nindent 8 }}
-{{- end }}
-{{- if .Values.topologySpreadConstraints }}
- topologySpreadConstraints:
- {{- range .Values.topologySpreadConstraints }}
- {{- if and .maxSkew .topologyKey }}
- - maxSkew: {{ .maxSkew }}
- topologyKey: {{ .topologyKey }}
- {{- if .whenUnsatisfiable }}
- whenUnsatisfiable: {{ .whenUnsatisfiable }}
- {{- end }}
- labelSelector:
- matchLabels:
- {{- include "nats.selectorLabels" $ | nindent 12 }}
- {{- end }}
- {{- end }}
-{{- end }}
-{{- if .Values.priorityClassName }}
- priorityClassName: {{ .Values.priorityClassName | quote }}
-{{- end }}
- # Common volumes for the containers.
- volumes:
- - name: config-volume
- {{ if .Values.nats.customConfigSecret }}
- secret:
- secretName: {{ .Values.nats.customConfigSecret.name }}
- {{ else }}
- configMap:
- name: {{ include "nats.fullname" . }}-config
- {{ end }}
-
- {{/* User extended config volumes*/}}
- {{- if .Values.nats.config }}
- # User extended config volumes
- {{- with .Values.nats.config }}
- {{- . | toYaml | nindent 6 }}
- {{- end }}
- {{- end }}
-
- # Local volume shared with the reloader.
- - name: pid
- emptyDir: {}
-
- {{- if and .Values.auth.enabled .Values.auth.resolver }}
- {{- if .Values.auth.resolver.configMap }}
- - name: resolver-volume
- configMap:
- name: {{ .Values.auth.resolver.configMap.name }}
- {{- end }}
-
- {{- if eq .Values.auth.resolver.type "URL" }}
- - name: operator-jwt-volume
- configMap:
- name: {{ .Values.auth.operatorjwt.configMap.name }}
- {{- end }}
- {{- end }}
-
- {{- if and .Values.nats.externalAccess .Values.nats.advertise }}
- # Local volume shared with the advertise config initializer.
- - name: advertiseconfig
- emptyDir: {}
- {{- end }}
-
- {{- if and .Values.nats.jetstream.fileStorage.enabled .Values.nats.jetstream.fileStorage.existingClaim }}
- # Persistent volume for jetstream running with file storage option
- - name: {{ include "nats.fullname" . }}-js-pvc
- persistentVolumeClaim:
- claimName: {{ .Values.nats.jetstream.fileStorage.existingClaim | quote }}
- {{- end }}
-
- #################
- # #
- # TLS Volumes #
- # #
- #################
- {{- with .Values.nats.tls }}
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-clients-volume
- secret:
- secretName: {{ $secretName }}
- {{- end }}
- {{- with .Values.mqtt.tls }}
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-mqtt-volume
- secret:
- secretName: {{ $secretName }}
- {{- end }}
- {{- with .Values.cluster.tls }}
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-cluster-volume
- secret:
- secretName: {{ $secretName }}
- {{- end }}
- {{- with .Values.leafnodes.tls }}
- {{- if not .custom }}
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-leafnodes-volume
- secret:
- secretName: {{ $secretName }}
- {{- end }}
- {{- end }}
- {{- with .Values.gateway.tls }}
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-gateways-volume
- secret:
- secretName: {{ $secretName }}
- {{- end }}
- {{- with .Values.websocket.tls }}
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-ws-volume
- secret:
- secretName: {{ $secretName }}
- {{- end }}
- {{- if .Values.leafnodes.enabled }}
- #
- # Leafnode credential volumes
- #
- {{- range .Values.leafnodes.remotes }}
- {{- with .credentials }}
- - name: {{ .secret.name }}-volume
- secret:
- secretName: {{ .secret.name }}
- {{- end }}
- {{- with .tls }}
- - name: {{ .secret.name }}-volume
- secret:
- secretName: {{ .secret.name }}
- {{- end }}
- {{- end }}
- {{- end }}
-
- {{- if .Values.additionalVolumes }}
- {{- toYaml .Values.additionalVolumes | nindent 6 }}
- {{- end }}
-
- {{ if and .Values.nats.externalAccess .Values.nats.advertise }}
- # Assume that we only use the service account in case we want to
- # figure out what is the current external public IP from the server
- # in order to be able to advertise correctly.
- serviceAccountName: {{ .Values.nats.serviceAccount }}
- {{ end }}
-
- # Required to be able to HUP signal and apply config
- # reload to the server without restarting the pod.
- shareProcessNamespace: true
-
- {{- if and .Values.nats.externalAccess .Values.nats.advertise }}
- # Initializer container required to be able to lookup
- # the external ip on which this node is running.
- initContainers:
- - name: bootconfig
- command:
- - nats-pod-bootconfig
- - -f
- - /etc/nats-config/advertise/client_advertise.conf
- - -gf
- - /etc/nats-config/advertise/gateway_advertise.conf
- env:
- - name: KUBERNETES_NODE_NAME
- valueFrom:
- fieldRef:
- apiVersion: v1
- fieldPath: spec.nodeName
- image: {{ .Values.bootconfig.image }}
- imagePullPolicy: {{ .Values.bootconfig.pullPolicy }}
- {{- if .Values.bootconfig.securityContext }}
- securityContext:
- {{- .Values.bootconfig.securityContext | toYaml | nindent 8 }}
- {{- end }}
- resources:
- {{- toYaml .Values.bootconfig.resources | nindent 10 }}
- volumeMounts:
- - mountPath: /etc/nats-config/advertise
- name: advertiseconfig
- subPath: advertise
- {{- end }}
-
- #################
- # #
- # NATS Server #
- # #
- #################
- terminationGracePeriodSeconds: {{ .Values.nats.terminationGracePeriodSeconds }}
- containers:
- - name: nats
- image: {{ .Values.nats.image }}
- imagePullPolicy: {{ .Values.nats.pullPolicy }}
- {{- if .Values.nats.securityContext }}
- securityContext:
- {{- .Values.nats.securityContext | toYaml | nindent 10 }}
- {{- end }}
- resources:
- {{- toYaml .Values.nats.resources | nindent 10 }}
- ports:
- - containerPort: {{ .Values.nats.client.port }}
- name: {{ .Values.nats.client.portName }}
- {{- if .Values.nats.externalAccess }}
- hostPort: {{ .Values.nats.client.port }}
- {{- end }}
- - containerPort: 7422
- name: leafnodes
- {{- if .Values.nats.externalAccess }}
- hostPort: 7422
- {{- end }}
- - containerPort: 7522
- name: gateways
- {{- if .Values.nats.externalAccess }}
- hostPort: 7522
- {{- end }}
- - containerPort: 6222
- name: cluster
- - containerPort: 8222
- name: monitor
- - containerPort: 7777
- name: metrics
- {{- if .Values.mqtt.enabled }}
- - containerPort: 1883
- name: mqtt
- {{- if .Values.nats.externalAccess }}
- hostPort: 1883
- {{- end }}
- {{- end }}
- {{- if .Values.websocket.enabled }}
- - containerPort: {{ .Values.websocket.port }}
- name: websocket
- {{- if .Values.nats.externalAccess }}
- hostPort: {{ .Values.websocket.port }}
- {{- end }}
- {{- end }}
- {{- if .Values.nats.profiling.enabled }}
- - containerPort: {{ .Values.nats.profiling.port }}
- name: profiling
- {{- end }}
-
- command:
- - "nats-server"
- - "--config"
- - "/etc/nats-config/nats.conf"
- {{- if .Values.nats.profiling.enabled }}
- - "--profile={{ .Values.nats.profiling.port }}"
- {{- end }}
-
- # Required to be able to define an environment variable
- # that refers to other environment variables. This env var
- # is later used as part of the configuration file.
- env:
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- - name: SERVER_NAME
- value: {{ .Values.nats.serverNamePrefix }}$(POD_NAME)
- - name: POD_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- - name: CLUSTER_ADVERTISE
- value: {{ include "nats.clusterAdvertise" . }}
-
- {{- if .Values.nats.jetstream.enabled }}
- {{- with .Values.nats.jetstream.encryption }}
- {{- with .secret }}
- - name: JS_KEY
- valueFrom:
- secretKeyRef:
- name: {{ .name }}
- key: {{ .key }}
- {{- end }}
- {{- end }}
- {{- end }}
- volumeMounts:
- - name: config-volume
- mountPath: /etc/nats-config
- - name: pid
- mountPath: /var/run/nats
- {{- if and .Values.nats.externalAccess .Values.nats.advertise }}
- - mountPath: /etc/nats-config/advertise
- name: advertiseconfig
- subPath: advertise
- {{- end }}
-
- {{/* User extended config volumes*/}}
- {{- range .Values.nats.config }}
- # User extended config volumes
- - name: {{ .name }}
- mountPath: /etc/nats-config/{{ .name }}
- {{- end }}
-
-
- {{- if and .Values.auth.enabled .Values.auth.resolver }}
- {{- if eq .Values.auth.resolver.type "memory" }}
- - name: resolver-volume
- mountPath: /etc/nats-config/accounts
- {{- end }}
-
- {{- if eq .Values.auth.resolver.type "full" }}
- {{- if .Values.auth.resolver.configMap }}
- - name: resolver-volume
- mountPath: /etc/nats-config/accounts
- {{- end }}
- {{- if and .Values.auth.resolver .Values.auth.resolver.store }}
- - name: nats-jwt-pvc
- mountPath: {{ .Values.auth.resolver.store.dir }}
- {{- end }}
- {{- end }}
-
- {{- if eq .Values.auth.resolver.type "URL" }}
- - name: operator-jwt-volume
- mountPath: /etc/nats-config/operator
- {{- end }}
- {{- end }}
-
- {{- if .Values.nats.jetstream.fileStorage.enabled }}
- - name: {{ include "nats.fullname" . }}-js-pvc
- mountPath: {{ .Values.nats.jetstream.fileStorage.storageDirectory }}
- {{- end }}
-
- {{- with .Values.nats.tls }}
- #######################
- # #
- # TLS Volumes Mounts #
- # #
- #######################
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-clients-volume
- mountPath: /etc/nats-certs/clients/{{ $secretName }}
- {{- end }}
- {{- with .Values.mqtt.tls }}
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-mqtt-volume
- mountPath: /etc/nats-certs/mqtt/{{ $secretName }}
- {{- end }}
- {{- with .Values.cluster.tls }}
- {{- if not .custom }}
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-cluster-volume
- mountPath: /etc/nats-certs/cluster/{{ $secretName }}
- {{- end }}
- {{- end }}
- {{- with .Values.leafnodes.tls }}
- {{- if not .custom }}
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-leafnodes-volume
- mountPath: /etc/nats-certs/leafnodes/{{ $secretName }}
- {{- end }}
- {{- end }}
- {{- with .Values.gateway.tls }}
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-gateways-volume
- mountPath: /etc/nats-certs/gateways/{{ $secretName }}
- {{- end }}
-
- {{- with .Values.websocket.tls }}
- {{ $secretName := tpl .secret.name $ }}
- - name: {{ $secretName }}-ws-volume
- mountPath: /etc/nats-certs/ws/{{ $secretName }}
- {{- end }}
-
- {{- if .Values.leafnodes.enabled }}
- #
- # Leafnode credential volumes
- #
- {{- range .Values.leafnodes.remotes }}
- {{- with .credentials }}
- - name: {{ .secret.name }}-volume
- mountPath: /etc/nats-creds/{{ .secret.name }}
- {{- end }}
- {{- with .tls }}
- - name: {{ .secret.name }}-volume
- mountPath: /etc/nats-certs/leafnodes/{{ .secret.name }}
- {{- end }}
- {{- end }}
- {{- end }}
-
- {{- if .Values.additionalVolumeMounts }}
- {{- toYaml .Values.additionalVolumeMounts | nindent 10 }}
- {{- end }}
-
- #######################
- # #
- # Healthcheck Probes #
- # #
- #######################
- {{- if .Values.nats.healthcheck }}
-
- {{- with .Values.nats.healthcheck.liveness }}
- {{- if .enabled }}
- livenessProbe:
- httpGet:
- path: /
- port: 8222
- initialDelaySeconds: {{ .initialDelaySeconds }}
- timeoutSeconds: {{ .timeoutSeconds }}
- periodSeconds: {{ .periodSeconds }}
- successThreshold: {{ .successThreshold }}
- failureThreshold: {{ .failureThreshold }}
- {{- if .terminationGracePeriodSeconds }}
- terminationGracePeriodSeconds: {{ .terminationGracePeriodSeconds }}
- {{- end }}
- {{- end }}
- {{- end }}
-
- {{- with .Values.nats.healthcheck.readiness }}
- {{- if .enabled }}
- readinessProbe:
- httpGet:
- path: /
- port: 8222
- initialDelaySeconds: {{ .initialDelaySeconds }}
- timeoutSeconds: {{ .timeoutSeconds }}
- periodSeconds: {{ .periodSeconds }}
- successThreshold: {{ .successThreshold }}
- failureThreshold: {{ .failureThreshold }}
- {{- end }}
- {{- end }}
-
- {{- if .Values.nats.healthcheck.startup.enabled }}
- startupProbe:
- httpGet:
- {{- $parts := split ":" .Values.nats.image }}
- {{- $tag := $parts._1 }}
- {{- $version := semver $tag }}
- {{- $simpleVersion := printf "%d.%d.%d" $version.Major $version.Minor $version.Patch }}
- {{- if and (and (or .Release.IsUpgrade .Values.upgrade) (semverCompare "~2.7.1" $simpleVersion) .Values.nats.healthcheck.enableHealthz ) }}
- # During upgrades, healthz will be enabled instead to allow for a grace period
- # in case of JetStream enabled deployments to form quorum and streams to catch up.
- path: /healthz
- {{- else }}
- path: /
- {{- end }}
- port: 8222
- {{- with .Values.nats.healthcheck.startup }}
- initialDelaySeconds: {{ .initialDelaySeconds }}
- timeoutSeconds: {{ .timeoutSeconds }}
- periodSeconds: {{ .periodSeconds }}
- successThreshold: {{ .successThreshold }}
- failureThreshold: {{ .failureThreshold }}
- {{- end }}
- {{- end }}
-
- {{- end }}
-
- # Gracefully stop NATS Server on pod deletion or image upgrade.
- #
- lifecycle:
- preStop:
- exec:
- # Using the alpine based NATS image, we add an extra sleep that is
- # the same amount as the terminationGracePeriodSeconds to allow
- # the NATS Server to gracefully terminate the client connections.
- #
- command:
- - "/bin/sh"
- - "-c"
- - "nats-server -sl=ldm=/var/run/nats/nats.pid"
-
- #################################
- # #
- # NATS Configuration Reloader #
- # #
- #################################
- {{ if .Values.reloader.enabled }}
- - name: reloader
- image: {{ .Values.reloader.image }}
- imagePullPolicy: {{ .Values.reloader.pullPolicy }}
- {{- if .Values.reloader.securityContext }}
- securityContext:
- {{- .Values.reloader.securityContext | toYaml | nindent 10 }}
- {{- end }}
- resources:
- {{- toYaml .Values.reloader.resources | nindent 10 }}
- command:
- - "nats-server-config-reloader"
- - "-pid"
- - "/var/run/nats/nats.pid"
- - "-config"
- - "/etc/nats-config/nats.conf"
- {{- range .Values.reloader.extraConfigs }}
- - "-config"
- - {{ . | quote }}
- {{- end }}
- volumeMounts:
- - name: config-volume
- mountPath: /etc/nats-config
- - name: pid
- mountPath: /var/run/nats
- {{- if .Values.additionalVolumeMounts }}
- {{- toYaml .Values.additionalVolumeMounts | nindent 10 }}
- {{- end }}
- {{ end }}
-
- ##############################
- # #
- # NATS Prometheus Exporter #
- # #
- ##############################
- {{ if .Values.exporter.enabled }}
- - name: metrics
- image: {{ .Values.exporter.image }}
- imagePullPolicy: {{ .Values.exporter.pullPolicy }}
- {{- if .Values.exporter.securityContext }}
- securityContext:
- {{- .Values.exporter.securityContext | toYaml | nindent 10 }}
- {{- end }}
- resources:
- {{- toYaml .Values.exporter.resources | nindent 10 }}
- args:
- - -connz
- - -routez
- - -subz
- - -varz
- - -prefix=nats
- - -use_internal_server_id
- {{- if .Values.nats.jetstream.enabled }}
- - -jsz=all
- {{- end }}
- {{- if .Values.leafnodes.enabled }}
- - -leafz
- {{- end }}
- - http://localhost:8222/
- ports:
- - containerPort: 7777
- name: metrics
- {{ end }}
-
- {{- if .Values.additionalContainers }}
- {{- toYaml .Values.additionalContainers | nindent 6 }}
- {{- end }}
-
- volumeClaimTemplates:
- {{- if eq .Values.auth.resolver.type "full" }}
- {{- if and .Values.auth.resolver .Values.auth.resolver.store }}
- #####################################
- # #
- # Account Server Embedded JWT #
- # #
- #####################################
- - metadata:
- name: nats-jwt-pvc
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: {{ .Values.auth.resolver.store.size }}
- {{- end }}
- {{- end }}
-
- {{- if and .Values.nats.jetstream.fileStorage.enabled (not .Values.nats.jetstream.fileStorage.existingClaim) }}
- #####################################
- # #
- # Jetstream New Persistent Volume #
- # #
- #####################################
- - metadata:
- name: {{ include "nats.fullname" . }}-js-pvc
- {{- if .Values.nats.jetstream.fileStorage.annotations }}
- annotations:
- {{- range $key, $value := .Values.nats.jetstream.fileStorage.annotations }}
- {{ $key }}: {{ $value | quote }}
- {{- end }}
- {{- end }}
- spec:
- accessModes:
- {{- range .Values.nats.jetstream.fileStorage.accessModes }}
- - {{ . | quote }}
- {{- end }}
- resources:
- requests:
- storage: {{ .Values.nats.jetstream.fileStorage.size }}
- {{- if .Values.nats.jetstream.fileStorage.storageClassName }}
- storageClassName: {{ .Values.nats.jetstream.fileStorage.storageClassName | quote }}
- {{- end }}
- {{- end }}
diff --git a/charts/nats/templates/tests/test-request-reply.yaml b/charts/nats/templates/tests/test-request-reply.yaml
deleted file mode 100644
index 785ce53b..00000000
--- a/charts/nats/templates/tests/test-request-reply.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
- name: "{{ include "nats.fullname" . }}-test-request-reply"
- labels:
- {{- include "nats.labels" . | nindent 4 }}
- annotations:
- "helm.sh/hook": test
-spec:
- containers:
- - name: nats-box
- image: synadia/nats-box
- env:
- - name: NATS_HOST
- value: {{ template "nats.fullname" . }}
- command:
- - /bin/sh
- - -ec
- - |
- nats reply -s nats://$NATS_HOST:4222 'name.>' --command "echo {{1}}" &
- - |
- "&&"
- - |
- name=$(nats request -s nats://$NATS_HOST:4222 name.test '' 2>/dev/null)
- - |
- "&&"
- - |
- [ $name = test ]
-
- restartPolicy: Never
diff --git a/charts/nats/values.yaml b/charts/nats/values.yaml
deleted file mode 100644
index b59003fd..00000000
--- a/charts/nats/values.yaml
+++ /dev/null
@@ -1,677 +0,0 @@
-###############################
-# #
-# NATS Server Configuration #
-# #
-###############################
-nats:
- image: nats:2.7.2-alpine
- pullPolicy: IfNotPresent
-
- # The servers name prefix, must be used for example when we want a NATS cluster
- # spanning multiple Kubernetes clusters.
- serverNamePrefix: ""
-
- # Toggle profiling.
- # This enables nats-server pprof (profiling) port, so you can see goroutines
- # stacks, memory heap sizes, etc.
- profiling:
- enabled: false
- port: 6000
-
- # Toggle using health check probes to better detect failures.
- healthcheck:
- # NOTE: Only works on NATS Server +2.7.1.
- # This is recommended to be enabled for NATS JetStream deployments upgrades.
- enableHealthz: true
-
- # Enable liveness checks. If this fails, then the NATS Server will restarted.
- liveness:
- enabled: true
-
- initialDelaySeconds: 10
- timeoutSeconds: 5
- # NOTE: liveness check + terminationGracePeriodSeconds can introduce unecessarily long outages
- # due to the coupling between liveness probe and terminationGracePeriodSeconds.
- # To avoid this, we make the periodSeconds of the liveness check to be about half the default
- # time that it takes for lame duck graceful stop.
- #
- # In case of using Kubernetes +1.22 with probe-level terminationGracePeriodSeconds
- # we could revise this but for now keep a minimal liveness check.
- #
- # More info:
- #
- # https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#probe-level-terminationgraceperiodseconds
- # https://github.com/kubernetes/kubernetes/issues/64715
- #
- periodSeconds: 60
- successThreshold: 1
- failureThreshold: 3
- # Only for Kubernetes +1.22 that have pod level probes enabled.
- terminationGracePeriodSeconds:
-
- # Periodically check for the server to be ready for connections while
- # the NATS container is running.
- # Disabled by default since covered by startup probe and it is the same
- # as the liveness check.
- readiness:
- enabled: false
-
- initialDelaySeconds: 10
- timeoutSeconds: 5
- periodSeconds: 10
- successThreshold: 1
- failureThreshold: 3
-
- # Enable startup checks to confirm server is ready for traffic.
- # This is recommended for JetStream deployments since in cluster mode
- # it will try to ensure that the server is ready to serve streams.
- startup:
- enabled: true
-
- initialDelaySeconds: 10
- timeoutSeconds: 5
- periodSeconds: 10
- successThreshold: 1
- failureThreshold: 30
-
- # securityContext for the nats container
- securityContext: {}
-
- # Toggle whether to enable external access.
- # This binds a host port for clients, gateways and leafnodes.
- externalAccess: false
-
- # Toggle to disable client advertisements (connect_urls),
- # in case of running behind a load balancer (which is not recommended)
- # it might be required to disable advertisements.
- advertise: true
-
- # In case both external access and advertise are enabled
- # then a service account would be required to be able to
- # gather the public ip from a node.
- serviceAccount: "nats-server"
-
- # The number of connect attempts against discovered routes.
- connectRetries: 120
-
- # selector matchLabels for the server and service.
- # If left empty defaults are used.
- # This is helpful if you are updating from Chart version <=7.4
- selectorLabels: {}
-
- resources: {}
-
- client:
- port: 4222
- portName: "client"
-
- # Server settings.
- limits:
- maxConnections:
- maxSubscriptions:
- maxControlLine:
- maxPayload: "64MB"
-
- writeDeadline:
- maxPending:
- maxPings:
-
- # How many seconds should pass before sending a PING
- # to a client that has no activity.
- pingInterval:
-
- # NOTE: this should be at least the same as 'terminationGracePeriodSeconds'
- lameDuckDuration: "120s"
-
- # terminationGracePeriodSeconds determines how long to allow for a pod
- # to be restarted.
- terminationGracePeriodSeconds: 120
-
- logging:
- debug:
- trace:
- logtime:
- connectErrorReports:
- reconnectErrorReports:
-
- # customConfigSecret can be used to use an custom secret for the config
- # of the NATS Server.
- # NOTE: For this to work the name of the configuration has to be
- # called `nats.conf`.
- #
- # e.g. kubectl create secret generic custom-nats-conf --from-file nats.conf
- #
- # customConfigSecret:
- # name:
- #
- # Alternately, the generated config can be extended with extra imports using the below syntax.
- # The benefit of this is that cluster settings can be built up via helm values, but external
- # secrets can be referenced and imported alongside it.
- #
- # config:
- # :
- #
- # name: ""
- #
- # e.g:
- #
- # config:
- # - name: ssh-key
- # secret:
- # secretName: ssh-key
- # - name: config-vol
- # configMap:
- # name: log-config
-
- jetstream:
- enabled: false
-
- # Jetstream Domain
- domain:
-
- ##########################
- # #
- # Jetstream Encryption #
- # #
- ##########################
- encryption:
- # Use key if you want to provide the key via Helm Values
- # key: random_key
-
- # Use a secret reference if you want to get a key from a secret
- # secret:
- # name: "nats-jetstream-encryption"
- # key: "key"
-
- #############################
- # #
- # Jetstream Memory Storage #
- # #
- #############################
- memStorage:
- enabled: true
- size: 1Gi
-
- ############################
- # #
- # Jetstream File Storage #
- # #
- ############################
- fileStorage:
- enabled: false
- storageDirectory: /data
-
- # Set for use with existing PVC
- # existingClaim: jetstream-pvc
- # claimStorageSize: 1Gi
-
- # Use below block to create new persistent volume
- # only used if existingClaim is not specified
- size: 1Gi
- # storageClassName: ""
- accessModes:
- - ReadWriteOnce
- annotations:
- # key: "value"
-
- #######################
- # #
- # TLS Configuration #
- # #
- #######################
- #
- # # You can find more on how to setup and trouble shoot TLS connnections at:
- #
- # # https://docs.nats.io/nats-server/configuration/securing_nats/tls
- #
-
- # tls:
- # allow_non_tls: false
- # secret:
- # name: nats-client-tls
- # ca: "ca.crt"
- # cert: "tls.crt"
- # key: "tls.key"
-
-mqtt:
- enabled: false
- ackWait: 1m
- maxAckPending: 100
-
- #######################
- # #
- # TLS Configuration #
- # #
- #######################
- #
- # # You can find more on how to setup and trouble shoot TLS connnections at:
- #
- # # https://docs.nats.io/nats-server/configuration/securing_nats/tls
- #
-
- #
- # tls:
- # secret:
- # name: nats-mqtt-tls
- # ca: "ca.crt"
- # cert: "tls.crt"
- # key: "tls.key"
-
-nameOverride: ""
-namespaceOverride: ""
-
-# An array of imagePullSecrets, and they have to be created manually in the same namespace
-# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
-imagePullSecrets: []
-
-# Toggle whether to use setup a Pod Security Context
-# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
-securityContext: {}
-# securityContext:
-# fsGroup: 1000
-# runAsUser: 1000
-# runAsNonRoot: true
-
-# Affinity for pod assignment
-# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
-affinity: {}
-
-## Pod priority class name
-## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
-priorityClassName: null
-
-# Service topology
-# ref: https://kubernetes.io/docs/concepts/services-networking/service-topology/
-topologyKeys: []
-
-# Pod Topology Spread Constraints
-# ref https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
-topologySpreadConstraints: []
-# - maxSkew: 1
-# topologyKey: zone
-# whenUnsatisfiable: DoNotSchedule
-
-# Annotations to add to the NATS pods
-# ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
-podAnnotations: {}
-# key: "value"
-
-# Define a Pod Disruption Budget for the stateful set
-# ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
-podDisruptionBudget:
- # minAvailable: 1
- # maxUnavailable: 1
-
-# Node labels for pod assignment
-# Ref: https://kubernetes.io/docs/user-guide/node-selection/
-nodeSelector: {}
-
-# Node tolerations for server scheduling to nodes with taints
-# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-#
-tolerations: []
-# - key: "key"
-# operator: "Equal|Exists"
-# value: "value"
-# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
-
-# Annotations to add to the NATS StatefulSet
-statefulSetAnnotations: {}
-
-# Labels to add to the pods of the NATS StatefulSet
-statefulSetPodLabels: {}
-
-# Annotations to add to the NATS Service
-serviceAnnotations: {}
-
-# additionalContainers are the sidecar containers to add to the NATS StatefulSet
-additionalContainers: []
-
-# additionalVolumes are the additional volumes to add to the NATS StatefulSet
-additionalVolumes: []
-
-# additionalVolumeMounts are the additional volume mounts to add to the nats-server and nats-server-config-reloader containers
-additionalVolumeMounts: []
-
-cluster:
- enabled: false
- replicas: 3
- noAdvertise: false
-
- # Explicitly set routes for clustering.
- # When JetStream is enabled, the serverName must be unique in the cluster.
- extraRoutes: []
-
- # authorization:
- # user: foo
- # password: pwd
- # timeout: 0.5
-
-# Leafnode connections to extend a cluster:
-#
-# https://docs.nats.io/nats-server/configuration/leafnodes
-#
-leafnodes:
- enabled: false
- noAdvertise: false
- # remotes:
- # - url: "tls://connect.ngs.global:7422"
-
- #######################
- # #
- # TLS Configuration #
- # #
- #######################
- #
- # # You can find more on how to setup and trouble shoot TLS connnections at:
- #
- # # https://docs.nats.io/nats-server/configuration/securing_nats/tls
- #
-
- #
- # tls:
- # secret:
- # name: nats-client-tls
- # ca: "ca.crt"
- # cert: "tls.crt"
- # key: "tls.key"
-
-# Gateway connections to create a super cluster
-#
-# https://docs.nats.io/nats-server/configuration/gateways
-#
-gateway:
- enabled: false
- name: "default"
- # authorization:
- # user: foo
- # password: pwd
- # timeout: 0.5
- # rejectUnknownCluster: false
-
- # You can add an implicit advertise address instead of using from Node's IP
- # could also be a fqdn address
- # advertise: "nats.example.com"
-
- #############################
- # #
- # List of remote gateways #
- # #
- #############################
- # gateways:
- # - name: other
- # url: nats://my-gateway-url:7522
-
- #######################
- # #
- # TLS Configuration #
- # #
- #######################
- #
- # # You can find more on how to setup and trouble shoot TLS connnections at:
- #
- # # https://docs.nats.io/nats-server/configuration/securing_nats/tls
- #
- # tls:
- # secret:
- # name: nats-client-tls
- # ca: "ca.crt"
- # cert: "tls.crt"
- # key: "tls.key"
-
-# In case of both external access and advertisements being
-# enabled, an initializer container will be used to gather
-# the public ips.
-bootconfig:
- image: natsio/nats-boot-config:0.5.4
- pullPolicy: IfNotPresent
- securityContext: {}
-
-# NATS Box
-#
-# https://github.com/nats-io/nats-box
-#
-natsbox:
- enabled: true
- image: natsio/nats-box:0.8.1
- pullPolicy: IfNotPresent
- securityContext: {}
-
- # Labels to add to the natsbox deployment
- # ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
- additionalLabels: {}
-
- # An array of imagePullSecrets, and they have to be created manually in the same namespace
- # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
- imagePullSecrets: []
- # - name: dockerhub
-
- # credentials:
- # secret:
- # name: nats-sys-creds
- # key: sys.creds
-
- # Annotations to add to the box pods
- # ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
- podAnnotations: {}
- # key: "value"
-
- # Labels to add to the box pods
- # ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
- podLabels: {}
- # key: "value"
-
- # Affinity for nats box pod assignment
- # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
- affinity: {}
-
- # Node labels for pod assignment
- # Ref: https://kubernetes.io/docs/user-guide/node-selection/
- nodeSelector: {}
-
- # Node tolerations for server scheduling to nodes with taints
- # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
- #
- tolerations: []
- # - key: "key"
- # operator: "Equal|Exists"
- # value: "value"
- # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
-
- # Additional nats-box server Volume mounts
- extraVolumeMounts: []
-
- # Additional nats-box server Volumes
- extraVolumes: []
-
-# The NATS config reloader image to use.
-reloader:
- enabled: true
- image: natsio/nats-server-config-reloader:0.6.2
- pullPolicy: IfNotPresent
- securityContext: {}
- extraConfigs: []
-
-# Prometheus NATS Exporter configuration.
-exporter:
- enabled: true
- image: natsio/prometheus-nats-exporter:0.9.1
- pullPolicy: IfNotPresent
- securityContext: {}
- resources: {}
- # Prometheus operator ServiceMonitor support. Exporter has to be enabled
- serviceMonitor:
- enabled: false
- ## Specify the namespace where Prometheus Operator is running
- ##
- # namespace: monitoring
- labels: {}
- annotations: {}
- path: /metrics
- # interval:
- # scrapeTimeout:
-
-# Authentication setup
-auth:
- enabled: false
-
- # basic:
- # noAuthUser:
- # # List of users that can connect with basic auth,
- # # that belong to the global account.
- # users:
-
- # # List of accounts with users that can connect
- # # using basic auth.
- # accounts:
-
- # Reference to the Operator JWT.
- # operatorjwt:
- # configMap:
- # name: operator-jwt
- # key: KO.jwt
-
- # Token authentication
- # token:
-
- # NKey authentication
- # nkeys:
- # users:
-
- # Public key of the System Account
- # systemAccount:
-
- resolver:
- # Disables the resolver by default
- type: none
-
- ##########################################
- # #
- # Embedded NATS Account Server Resolver #
- # #
- ##########################################
- # type: full
-
- # If the resolver type is 'full', delete when enabled will rename the jwt.
- allowDelete: false
-
- # Interval at which a nats-server with a nats based account resolver will compare
- # it's state with one random nats based account resolver in the cluster and if needed,
- # exchange jwt and converge on the same set of jwt.
- interval: 2m
-
- # Operator JWT
- operator:
-
- # System Account Public NKEY
- systemAccount:
-
- # resolverPreload:
- # :
-
- # Directory in which the account JWTs will be stored.
- store:
- dir: "/accounts/jwt"
-
- # Size of the account JWT storage.
- size: 1Gi
-
- ##############################
- # #
- # Memory resolver settings #
- # #
- ##############################
- # type: memory
- #
- # Use a configmap reference which will be mounted
- # into the container.
- #
- # configMap:
- # name: nats-accounts
- # key: resolver.conf
-
- ##########################
- # #
- # URL resolver settings #
- # #
- ##########################
- # type: URL
- # url: "http://nats-account-server:9090/jwt/v1/accounts/"
-
-websocket:
- enabled: false
- port: 443
- noTLS: true
-
- sameOrigin: false
- allowedOrigins: []
-
-appProtocol:
- enabled: false
-
-# Network Policy configuration
-networkPolicy:
- enabled: false
- # Don't require client label for connections
- # When set to false, only pods with the correct client label will have network access to the ports
- # NATS is listening on. When true, NATS will accept connections from any source
- # (with the correct destination port).
- allowExternal: true
- # Add extra ingress rules to the NetworkPolicy
- # e.g:
- # extraIngress:
- # - ports:
- # - port: 1234
- # from:
- # - podSelector:
- # - matchLabels:
- # - role: frontend
- # - podSelector:
- # - matchExpressions:
- # - key: role
- # operator: In
- # values:
- # - frontend
- extraIngress: []
- # Add extra ingress rules to the NetworkPolicy
- # e.g:
- # extraEgress:
- # - ports:
- # - port: 1234
- # to:
- # - podSelector:
- # - matchLabels:
- # - role: frontend
- # - podSelector:
- # - matchExpressions:
- # - key: role
- # operator: In
- # values:
- # - frontend
- extraEgress: []
- # Labels to match to allow traffic from other namespaces
- ingressNSMatchLabels: {}
- # Pod labels to match to allow traffic from other namespaces
- ingressNSPodMatchLabels: {}
-
-# Cluster Domain configured on the kubelets
-# https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/
-k8sClusterDomain: cluster.local
-
-# Define if NATS is using FQDN name for clustering (i.e. nats-0.nats.default.svc.cluster.local) or short name (i.e. nats-0.nats.default).
-useFQDN: true
-
-# Add labels to all the deployed resources
-commonLabels: {}
-
-# podManagementPolicy controls how pods are created during initial scale up,
-# when replacing pods on nodes, or when scaling down.
-podManagementPolicy: Parallel
-
-# Toggle that this is an upgrade to enable healthz, in case not
-# using `helm upgrade` command to apply upgrades.
-upgrade: false
-
-service:
- type: LoadBalancer
-
diff --git a/client/main.go b/client/main.go
index e3c5611c..842de37f 100644
--- a/client/main.go
+++ b/client/main.go
@@ -1,17 +1,30 @@
package main
import (
+ "context"
"log"
"os"
"os/signal"
"syscall"
"github.com/intelops/kubviz/client/pkg/application"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
)
func main() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
log.Println("new client running...")
+
+ tp, err := opentelemetry.InitTracer()
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer func() {
+ if err := tp.Shutdown(context.Background()); err != nil {
+ log.Printf("Error shutting down tracer provider: %v", err)
+ }
+ }()
+
app := application.Start()
signals := make(chan os.Signal, 1)
diff --git a/client/pkg/application/application.go b/client/pkg/application/application.go
index d95e8ede..6d9c3c99 100644
--- a/client/pkg/application/application.go
+++ b/client/pkg/application/application.go
@@ -1,12 +1,22 @@
package application
import (
+ "context"
+ "database/sql"
"log"
+ "os"
+ "os/signal"
+ "syscall"
"github.com/intelops/kubviz/client/pkg/clickhouse"
"github.com/intelops/kubviz/client/pkg/clients"
"github.com/intelops/kubviz/client/pkg/config"
+ "github.com/intelops/kubviz/client/pkg/storage"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
"github.com/kelseyhightower/envconfig"
+ "github.com/robfig/cron/v3"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
)
type Application struct {
@@ -15,13 +25,44 @@ type Application struct {
dbClient clickhouse.DBInterface
}
+const (
+ EventsTable = "events"
+ RakkessTable = "rakkess"
+ DeprecatedAPIsTable = "DeprecatedAPIs"
+ DeletedAPIsTable = "DeletedAPIs"
+ JfrogContainerPushTable = "jfrogcontainerpush"
+ GetAllResourcesTable = "getall_resources"
+ OutdatedImagesTable = "outdated_images"
+ KubeScoreTable = "kubescore"
+ TrivyVulTable = "trivy_vul"
+ TrivyMisconfigTable = "trivy_misconfig"
+ TrivyImageTable = "trivyimage"
+ DockerHubBuildTable = "dockerhubbuild"
+ AzureContainerPushTable = "azurecontainerpush"
+ QuayContainerPushTable = "quaycontainerpush"
+ TrivySBOMTable = "trivysbom"
+ AzureDevOpsTable = "azure_devops"
+ GitHubTable = "github"
+ GitLabTable = "gitlab"
+ BitbucketTable = "bitbucket"
+ GiteaTable = "gitea"
+ KuberHealthy = "kuberhealthy"
+)
+
func Start() *Application {
log.Println("Client Application started...")
+
+ ctx := context.Background()
+ tracer := otel.Tracer("kubviz-client")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "Start")
+ span.SetAttributes(attribute.String("start-app-client", "application"))
+ defer span.End()
+
cfg := &config.Config{}
if err := envconfig.Process("", cfg); err != nil {
log.Fatalf("Could not parse env Config: %v", err)
}
- dbClient, err := clickhouse.NewDBClient(cfg)
+ dbClient, conn, err := clickhouse.NewDBClient(cfg)
if err != nil {
log.Fatal(err)
}
@@ -30,6 +71,53 @@ func Start() *Application {
if err != nil {
log.Fatal("Error establishing connection to NATS:", err)
}
+ // c := cron.New()
+ // _, err = c.AddFunc("@daily", func() {
+ // if err := exportDataForTables(conn); err != nil {
+ // log.Println("Error exporting data:", err)
+ // }
+ // })
+ // if err != nil {
+ // log.Fatal("Error adding cron job:", err)
+ // }
+
+ // // Listen for interrupt signals to stop the program
+ // interrupt := make(chan os.Signal, 1)
+ // signal.Notify(interrupt, os.Interrupt, syscall.SIGTERM)
+
+ // // Start the cron job scheduler
+ // c.Start()
+
+ // // Wait for an interrupt signal to stop the program
+ // <-interrupt
+
+ // // Stop the cron scheduler gracefully
+ // c.Stop()
+ if cfg.AwsEnable {
+ c := cron.New()
+ _, err = c.AddFunc("@daily", func() {
+ if err := exportDataForTables(conn); err != nil {
+ log.Println("Error exporting data:", err)
+ }
+ })
+ if err != nil {
+ log.Fatal("Error adding cron job:", err)
+ }
+
+ // Listen for interrupt signals to stop the program
+ interrupt := make(chan os.Signal, 1)
+ signal.Notify(interrupt, os.Interrupt, syscall.SIGTERM)
+
+ // Start the cron job scheduler
+ c.Start()
+
+ // Wait for an interrupt signal to stop the program
+ <-interrupt
+
+ // Stop the cron scheduler gracefully
+ c.Stop()
+ }
+
return &Application{
Config: cfg,
conn: natsContext,
@@ -42,3 +130,19 @@ func (app *Application) Close() {
app.conn.Close()
app.dbClient.Close()
}
+func exportDataForTables(db *sql.DB) error {
+ //pvcMountPath := "/mnt/client/kbz"
+ tables := []string{
+ EventsTable, RakkessTable, DeprecatedAPIsTable, DeletedAPIsTable, JfrogContainerPushTable, GetAllResourcesTable, OutdatedImagesTable, KubeScoreTable, TrivyVulTable, TrivyMisconfigTable, TrivyImageTable, DockerHubBuildTable, AzureContainerPushTable, QuayContainerPushTable, TrivySBOMTable, AzureDevOpsTable, GitHubTable, GitLabTable, BitbucketTable, KuberHealthy, GiteaTable,
+ }
+ for _, tableName := range tables {
+ err := storage.ExportExpiredData(tableName, db)
+ if err != nil {
+ log.Printf("Error exporting data for table %s: %v", tableName, err)
+ } else {
+ log.Printf("Export completed successfully for table %s.\n", tableName)
+ }
+ }
+
+ return nil
+}
diff --git a/client/pkg/clickhouse/db_client.go b/client/pkg/clickhouse/db_client.go
index 7ef2c634..7415a378 100644
--- a/client/pkg/clickhouse/db_client.go
+++ b/client/pkg/clickhouse/db_client.go
@@ -10,12 +10,16 @@ import (
"time"
"github.com/ClickHouse/clickhouse-go/v2"
+ "github.com/kuberhealthy/kuberhealthy/v2/pkg/health"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
"github.com/intelops/kubviz/client/pkg/config"
"github.com/intelops/kubviz/gitmodels/dbstatement"
"github.com/intelops/kubviz/model"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
)
type DBClient struct {
@@ -24,6 +28,7 @@ type DBClient struct {
conf *config.Config
}
type DBInterface interface {
+ InsertKuberhealthyMetrics(health.State)
InsertRakeesMetrics(model.RakeesMetrics)
InsertKetallEvent(model.Resource)
InsertOutdatedEvent(model.CheckResultfinal)
@@ -33,7 +38,7 @@ type DBInterface interface {
InsertGitEvent(string)
InsertKubeScoreMetrics(model.KubeScoreRecommendations)
InsertTrivyImageMetrics(metrics model.TrivyImage)
- InsertTrivySbomMetrics(metrics model.Reports)
+ InsertTrivySbomMetrics(metrics model.Sbom)
InsertTrivyMetrics(metrics model.Trivy)
RetriveKetallEvent() ([]model.Resource, error)
RetriveOutdatedEvent() ([]model.CheckResultfinal, error)
@@ -41,62 +46,119 @@ type DBInterface interface {
RetrieveKubvizEvent() ([]model.DbEvent, error)
InsertContainerEventDockerHub(model.DockerHubBuild)
InsertContainerEventAzure(model.AzureContainerPushEventPayload)
+ InsertContainerEventQuay(model.QuayImagePushPayload)
+ InsertContainerEventJfrog(model.JfrogContainerPushEventPayload)
InsertContainerEventGithub(string)
InsertGitCommon(metrics model.GitCommonAttribute, statement dbstatement.DBStatement) error
Close()
}
-func NewDBClient(conf *config.Config) (DBInterface, error) {
+func NewDBClient(conf *config.Config) (DBInterface, *sql.DB, error) {
ctx := context.Background()
- log.Println("Connecting to Clickhouse DB and creating schemas...")
- splconn, err := clickhouse.Open(&clickhouse.Options{
- Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DbPort)},
- Debug: true,
- Debugf: func(format string, v ...any) {
- fmt.Printf(format, v)
- },
- Settings: clickhouse.Settings{
- "allow_experimental_object_type": 1,
- },
- })
+ var connOptions clickhouse.Options
+
+ if conf.ClickHouseUsername != "" && conf.ClickHousePassword != "" {
+ fmt.Println("Using provided username and password")
+ connOptions = clickhouse.Options{
+ Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DbPort)},
+ Debug: true,
+ Auth: clickhouse.Auth{
+ Username: conf.ClickHouseUsername,
+ Password: conf.ClickHousePassword,
+ },
+ Debugf: func(format string, v ...interface{}) {
+ fmt.Printf(format, v...)
+ },
+ Settings: clickhouse.Settings{
+ "allow_experimental_object_type": 1,
+ },
+ }
+ fmt.Printf("Connecting to ClickHouse using username and password")
+ } else {
+ fmt.Println("Using connection without username and password")
+ connOptions = clickhouse.Options{
+ Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DbPort)},
+ Debug: true,
+ Debugf: func(format string, v ...interface{}) {
+ fmt.Printf(format, v...)
+ },
+ Settings: clickhouse.Settings{
+ "allow_experimental_object_type": 1,
+ },
+ }
+ fmt.Printf("Connecting to ClickHouse without usename and password")
+
+ }
+
+ splconn, err := clickhouse.Open(&connOptions)
if err != nil {
- return nil, err
+ return nil, nil, err
}
if err := splconn.Ping(ctx); err != nil {
if exception, ok := err.(*clickhouse.Exception); ok {
fmt.Printf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace)
} else {
- fmt.Println(err)
+ fmt.Println("Authentication error:", err) // Print the error message here
}
- return nil, err
+ return nil, nil, err
}
- tables := []DBStatement{kubvizTable, rakeesTable, kubePugDepricatedTable, kubepugDeletedTable, ketallTable, trivyTableImage, trivySbomTable, outdateTable, clickhouseExperimental, containerDockerhubTable, containerGithubTable, kubescoreTable, trivyTableVul, trivyTableMisconfig, dockerHubBuildTable, azureContainerPushEventTable, DBStatement(dbstatement.AzureDevopsTable), DBStatement(dbstatement.GithubTable), DBStatement(dbstatement.GitlabTable), DBStatement(dbstatement.BitbucketTable), DBStatement(dbstatement.GiteaTable)}
- for _, table := range tables {
- if err = splconn.Exec(context.Background(), string(table)); err != nil {
- return nil, err
+ var connOption clickhouse.Options
+
+ if conf.ClickHouseUsername != "" && conf.ClickHousePassword != "" {
+ fmt.Println("Using provided username and password")
+ connOption = clickhouse.Options{
+ Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DbPort)},
+ Debug: true,
+ Auth: clickhouse.Auth{
+ Username: conf.ClickHouseUsername,
+ Password: conf.ClickHousePassword,
+ },
+ }
+ } else {
+ fmt.Println("Using connection without username and password")
+ connOption = clickhouse.Options{
+ Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DbPort)},
}
}
- stdconn := clickhouse.OpenDB(&clickhouse.Options{
- Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DbPort)},
- })
+
+ stdconn := clickhouse.OpenDB(&connOption)
+
if err := stdconn.Ping(); err != nil {
if exception, ok := err.(*clickhouse.Exception); ok {
fmt.Printf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace)
} else {
- fmt.Println(err)
+ fmt.Println("Authentication error:", err)
}
- return nil, err
+ return nil, nil, err
}
- return &DBClient{splconn: splconn, conn: stdconn, conf: conf}, nil
+
+ return &DBClient{splconn: splconn, conn: stdconn, conf: conf}, stdconn, nil
}
+
func (c *DBClient) InsertContainerEventAzure(pushEvent model.AzureContainerPushEventPayload) {
- var (
- tx, _ = c.conn.Begin()
- stmt, _ = tx.Prepare(string(InsertAzureContainerPushEvent))
- )
+
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-container-azure")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertContainerEventAzure")
+ span.SetAttributes(attribute.String("container-azure-client", "insert"))
+ defer span.End()
+
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+
+ stmt, err := tx.Prepare(string(InsertAzureContainerPushEvent))
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
+
defer stmt.Close()
+
+ currentTime := time.Now().UTC()
+
registryURL := pushEvent.Request.Host
repositoryName := pushEvent.Target.Repository
tag := pushEvent.Target.Tag
@@ -108,7 +170,6 @@ func (c *DBClient) InsertContainerEventAzure(pushEvent model.AzureContainerPushE
size := pushEvent.Target.Size
shaID := pushEvent.Target.Digest
- // Marshaling the pushEvent into a JSON string
pushEventJSON, err := json.Marshal(pushEvent)
if err != nil {
log.Printf("Error while marshaling Azure Container Registry payload: %v", err)
@@ -121,9 +182,125 @@ func (c *DBClient) InsertContainerEventAzure(pushEvent model.AzureContainerPushE
tag,
imageName,
string(pushEventJSON),
- pushEvent.Timestamp,
size,
shaID,
+ currentTime,
+ ); err != nil {
+ log.Fatal(err)
+ }
+ if err := tx.Commit(); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func (c *DBClient) InsertContainerEventQuay(pushEvent model.QuayImagePushPayload) {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-container-quay")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertContainerEventQuay")
+ span.SetAttributes(attribute.String("container-quay-client", "insert"))
+ defer span.End()
+
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+
+ stmt, err := tx.Prepare(string(InsertQuayContainerPushEvent))
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
+
+ defer stmt.Close()
+
+ currentTime := time.Now().UTC()
+
+ dockerURL := pushEvent.DockerURL
+ repository := pushEvent.Repository
+ name := pushEvent.Name
+ nameSpace := pushEvent.Namespace
+ homePage := pushEvent.Homepage
+
+ var tag string
+ if pushEvent.UpdatedTags != nil {
+ tag = strings.Join(pushEvent.UpdatedTags, ",")
+ } else {
+ tag = ""
+ }
+
+ pushEventJSON, err := json.Marshal(pushEvent)
+ if err != nil {
+ log.Printf("Error while marshaling Quay Container Registry payload: %v", err)
+ return
+ }
+
+ if _, err := stmt.Exec(
+ name,
+ repository,
+ nameSpace,
+ dockerURL,
+ homePage,
+ tag,
+ string(pushEventJSON),
+ currentTime,
+ ); err != nil {
+ log.Fatal(err)
+ }
+ if err := tx.Commit(); err != nil {
+ log.Fatal(err)
+ }
+}
+
+func (c *DBClient) InsertContainerEventJfrog(pushEvent model.JfrogContainerPushEventPayload) {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-container-jfrog")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertContainerEventJfrog")
+ span.SetAttributes(attribute.String("container-jfrog-client", "insert"))
+ defer span.End()
+
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+
+ stmt, err := tx.Prepare(string(InsertJfrogContainerPushEvent))
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
+
+ defer stmt.Close()
+
+ currentTime := time.Now().UTC()
+
+ registryURL := pushEvent.Data.Path
+ repositoryName := pushEvent.Data.Name
+ tag := pushEvent.Data.Tag
+
+ if tag == "" {
+ tag = "latest"
+ }
+ imageName := pushEvent.Data.ImageName
+ size := pushEvent.Data.Size
+ shaID := pushEvent.Data.SHA256
+
+ pushEventJSON, err := json.Marshal(pushEvent)
+ if err != nil {
+ log.Printf("Error while marshaling Jfrog Container Registry payload: %v", err)
+ return
+ }
+
+ if _, err := stmt.Exec(
+ pushEvent.Domain,
+ pushEvent.EventType,
+ registryURL,
+ repositoryName,
+ shaID,
+ size,
+ imageName,
+ tag,
+ string(pushEventJSON),
+ currentTime,
); err != nil {
log.Fatal(err)
}
@@ -133,11 +310,26 @@ func (c *DBClient) InsertContainerEventAzure(pushEvent model.AzureContainerPushE
}
func (c *DBClient) InsertRakeesMetrics(metrics model.RakeesMetrics) {
- var (
- tx, _ = c.conn.Begin()
- stmt, _ = tx.Prepare(string(InsertRakees))
- )
+
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-rakees-metrics")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertRakeesMetrics")
+ span.SetAttributes(attribute.String("rakees-client", "insert"))
+ defer span.End()
+
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+ stmt, err := tx.Prepare(string(InsertRakees))
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
+
defer stmt.Close()
+
+ currentTime := time.Now().UTC()
+
if _, err := stmt.Exec(
metrics.ClusterName,
metrics.Name,
@@ -145,6 +337,7 @@ func (c *DBClient) InsertRakeesMetrics(metrics model.RakeesMetrics) {
metrics.Delete,
metrics.List,
metrics.Update,
+ currentTime,
); err != nil {
log.Fatal(err)
}
@@ -152,18 +345,35 @@ func (c *DBClient) InsertRakeesMetrics(metrics model.RakeesMetrics) {
log.Fatal(err)
}
}
+
func (c *DBClient) InsertKetallEvent(metrics model.Resource) {
- var (
- tx, _ = c.conn.Begin()
- stmt, _ = tx.Prepare(string(InsertKetall))
- )
+
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-ketall-event")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertKetallEvent")
+ span.SetAttributes(attribute.String("ketall-client", "insert"))
+ defer span.End()
+
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+ stmt, err := tx.Prepare(string(InsertKetall))
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
+
defer stmt.Close()
+
+ currentTime := time.Now().UTC()
+
if _, err := stmt.Exec(
metrics.ClusterName,
metrics.Namespace,
metrics.Kind,
metrics.Resource,
metrics.Age,
+ currentTime,
); err != nil {
log.Fatal(err)
}
@@ -171,12 +381,82 @@ func (c *DBClient) InsertKetallEvent(metrics model.Resource) {
log.Fatal(err)
}
}
+
+func (c *DBClient) InsertKuberhealthyMetrics(metrics health.State) {
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-kuberhealthy")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertKuberhealthy")
+ span.SetAttributes(attribute.String("kuberhealthy-client", "insert"))
+ defer span.End()
+
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+
+ stmt, err := tx.Prepare(InsertKuberhealthy)
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
+
+ for checkName, checkdata := range metrics.CheckDetails {
+ ok := uint8(0)
+ if checkdata.OK {
+ ok = uint8(1)
+ }
+ errors := strings.Join(checkdata.Errors, ", ")
+ kcd := model.KuberhealthyCheckDetail{
+ CurrentUUID: checkdata.CurrentUUID,
+ CheckName: checkName,
+ OK: ok,
+ Errors: errors,
+ RunDuration: checkdata.RunDuration,
+ Namespace: checkdata.Namespace,
+ Node: checkdata.Node,
+ LastRun: checkdata.LastRun.Time.UTC(),
+ AuthoritativePod: checkdata.AuthoritativePod,
+ }
+ if _, err := stmt.Exec(
+ kcd.CurrentUUID,
+ kcd.CheckName,
+ kcd.OK,
+ kcd.Errors,
+ kcd.RunDuration,
+ kcd.Namespace,
+ kcd.Node,
+ kcd.LastRun,
+ kcd.AuthoritativePod,
+ ); err != nil {
+ log.Fatal(err)
+ }
+ }
+ if err := tx.Commit(); err != nil {
+ log.Fatal(err)
+ }
+ stmt.Close()
+}
+
func (c *DBClient) InsertOutdatedEvent(metrics model.CheckResultfinal) {
- var (
- tx, _ = c.conn.Begin()
- stmt, _ = tx.Prepare(string(InsertOutdated))
- )
+
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-outdated-event")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertOutdatedEvent")
+ span.SetAttributes(attribute.String("outdated-client", "insert"))
+ defer span.End()
+
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+ stmt, err := tx.Prepare(string(InsertOutdated))
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
+
defer stmt.Close()
+
+ currentTime := time.Now().UTC()
+
if _, err := stmt.Exec(
metrics.ClusterName,
metrics.Namespace,
@@ -185,6 +465,7 @@ func (c *DBClient) InsertOutdatedEvent(metrics model.CheckResultfinal) {
metrics.Current,
metrics.LatestVersion,
metrics.VersionsBehind,
+ currentTime,
); err != nil {
log.Fatal(err)
}
@@ -192,11 +473,24 @@ func (c *DBClient) InsertOutdatedEvent(metrics model.CheckResultfinal) {
log.Fatal(err)
}
}
+
func (c *DBClient) InsertDeprecatedAPI(deprecatedAPI model.DeprecatedAPI) {
- var (
- tx, _ = c.conn.Begin()
- stmt, _ = tx.Prepare(string(InsertDepricatedApi))
- )
+
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-depricated-event")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertDeprecatedAPI")
+ span.SetAttributes(attribute.String("depricated-client", "insert"))
+ defer span.End()
+
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+ stmt, err := tx.Prepare(string(InsertDepricatedApi))
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
+
defer stmt.Close()
deprecated := uint8(0)
@@ -204,6 +498,8 @@ func (c *DBClient) InsertDeprecatedAPI(deprecatedAPI model.DeprecatedAPI) {
deprecated = 1
}
+ currentTime := time.Now().UTC()
+
for _, item := range deprecatedAPI.Items {
if _, err := stmt.Exec(
deprecatedAPI.ClusterName,
@@ -212,6 +508,7 @@ func (c *DBClient) InsertDeprecatedAPI(deprecatedAPI model.DeprecatedAPI) {
deprecatedAPI.Kind,
deprecated,
item.Scope,
+ currentTime,
); err != nil {
log.Fatal(err)
}
@@ -221,17 +518,32 @@ func (c *DBClient) InsertDeprecatedAPI(deprecatedAPI model.DeprecatedAPI) {
log.Fatal(err)
}
}
+
func (c *DBClient) InsertDeletedAPI(deletedAPI model.DeletedAPI) {
- var (
- tx, _ = c.conn.Begin()
- stmt, _ = tx.Prepare(string(InsertDeletedApi))
- )
+
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-deletedapi")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertDeletedAPI")
+ span.SetAttributes(attribute.String("deletedapi-client", "insert"))
+ defer span.End()
+
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+ stmt, err := tx.Prepare(string(InsertDeletedApi))
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
+
defer stmt.Close()
deleted := uint8(0)
if deletedAPI.Deleted {
deleted = 1
}
+ currentTime := time.Now().UTC()
+
for _, item := range deletedAPI.Items {
if _, err := stmt.Exec(
deletedAPI.ClusterName,
@@ -242,6 +554,7 @@ func (c *DBClient) InsertDeletedAPI(deletedAPI model.DeletedAPI) {
deletedAPI.Name,
deleted,
item.Scope,
+ currentTime,
); err != nil {
log.Fatal(err)
}
@@ -251,11 +564,24 @@ func (c *DBClient) InsertDeletedAPI(deletedAPI model.DeletedAPI) {
log.Fatal(err)
}
}
+
func (c *DBClient) InsertKubvizEvent(metrics model.Metrics) {
- var (
- tx, _ = c.conn.Begin()
- stmt, _ = tx.Prepare(string(InsertKubvizEvent))
- )
+
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-kubviz-event")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertKubvizEvent")
+ span.SetAttributes(attribute.String("kubvizevent-client", "insert"))
+ defer span.End()
+
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+ stmt, err := tx.Prepare(string(InsertKubvizEvent))
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
+
defer stmt.Close()
eventJson, _ := json.Marshal(metrics.Event)
formattedFirstTimestamp := metrics.Event.FirstTimestamp.Time.UTC().Format("2006-01-02 15:04:05")
@@ -273,6 +599,7 @@ func (c *DBClient) InsertKubvizEvent(metrics model.Metrics) {
metrics.Event.Reason,
metrics.Event.Source.Host,
string(eventJson),
+ metrics.ImageName,
formattedFirstTimestamp,
formattedLastTimestamp,
); err != nil {
@@ -284,6 +611,12 @@ func (c *DBClient) InsertKubvizEvent(metrics model.Metrics) {
}
func (c *DBClient) InsertGitEvent(event string) {
ctx := context.Background()
+
+ tracer := otel.Tracer("insert-git-event")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertGitEvent")
+ span.SetAttributes(attribute.String("git-client", "insert"))
+ defer span.End()
+
batch, err := c.splconn.PrepareBatch(ctx, "INSERT INTO git_json")
if err != nil {
log.Fatal(err)
@@ -299,6 +632,12 @@ func (c *DBClient) InsertGitEvent(event string) {
}
func (c *DBClient) InsertContainerEvent(event string) {
ctx := context.Background()
+
+ tracer := otel.Tracer("insert-container-event")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertContainerEvent")
+ span.SetAttributes(attribute.String("container-client", "insert"))
+ defer span.End()
+
batch, err := c.splconn.PrepareBatch(ctx, "INSERT INTO container_bridge")
if err != nil {
log.Fatal(err)
@@ -314,18 +653,52 @@ func (c *DBClient) InsertContainerEvent(event string) {
}
func (c *DBClient) InsertKubeScoreMetrics(metrics model.KubeScoreRecommendations) {
- var (
- tx, _ = c.conn.Begin()
- stmt, _ = tx.Prepare(InsertKubeScore)
- )
+
+ ctx := context.Background()
+
+ tracer := otel.Tracer("insert-kubescore-event")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertKubeScoreMetrics")
+ span.SetAttributes(attribute.String("kubescore-client", "insert"))
+ defer span.End()
+
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+ defer tx.Rollback()
+ stmt, err := tx.Prepare(InsertKubeScore)
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
defer stmt.Close()
- if _, err := stmt.Exec(
- metrics.ID,
- metrics.Namespace,
- metrics.ClusterName,
- metrics.Recommendations,
- ); err != nil {
- log.Fatal(err)
+
+ currentTime := time.Now().UTC()
+
+ for _, result := range metrics.Report {
+ for _, check := range result.Checks {
+ for _, comments := range check.Comments {
+
+ if _, err := stmt.Exec(
+ metrics.ID,
+ metrics.ClusterName,
+ result.ObjectName,
+ result.TypeMeta.Kind,
+ result.TypeMeta.APIVersion,
+ result.ObjectMeta.Name,
+ result.ObjectMeta.Namespace,
+ check.Check.TargetType,
+ comments.Description,
+ comments.Path,
+ comments.Summary,
+ result.FileName,
+ int64(result.FileRow),
+ currentTime,
+ ); err != nil {
+ log.Println("Error while inserting KubeScore metrics:", err)
+ }
+ }
+
+ }
}
if err := tx.Commit(); err != nil {
log.Fatal(err)
@@ -333,13 +706,24 @@ func (c *DBClient) InsertKubeScoreMetrics(metrics model.KubeScoreRecommendations
}
func (c *DBClient) InsertTrivyMetrics(metrics model.Trivy) {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-trivy-metrics")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertTrivyMetrics")
+ span.SetAttributes(attribute.String("trivy-metrics-client", "insert"))
+ defer span.End()
+
for _, finding := range metrics.Report.Findings {
for _, result := range finding.Results {
for _, vulnerability := range result.Vulnerabilities {
- var (
- tx, _ = c.conn.Begin()
- stmt, _ = tx.Prepare(InsertTrivyVul)
- )
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+ stmt, err := tx.Prepare(InsertTrivyVul)
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
if _, err := stmt.Exec(
metrics.ID,
metrics.ClusterName,
@@ -365,11 +749,21 @@ func (c *DBClient) InsertTrivyMetrics(metrics model.Trivy) {
}
stmt.Close()
}
+
for _, misconfiguration := range result.Misconfigurations {
- var (
- tx, _ = c.conn.Begin()
- stmt, _ = tx.Prepare(InsertTrivyMisconfig)
- )
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+ stmt, err := tx.Prepare(InsertTrivyMisconfig)
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
+
+ defer stmt.Close()
+
+ currentTime := time.Now().UTC()
+
if _, err := stmt.Exec(
metrics.ID,
metrics.ClusterName,
@@ -386,25 +780,37 @@ func (c *DBClient) InsertTrivyMetrics(metrics model.Trivy) {
misconfiguration.Resolution,
misconfiguration.Severity,
string(misconfiguration.Status),
+ currentTime,
); err != nil {
log.Fatal(err)
}
if err := tx.Commit(); err != nil {
log.Fatal(err)
}
- stmt.Close()
}
}
}
}
func (c *DBClient) InsertTrivyImageMetrics(metrics model.TrivyImage) {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-trivy-image")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertTrivyImageMetrics")
+ span.SetAttributes(attribute.String("trivy-image-client", "insert"))
+ defer span.End()
+ currentTime := time.Now().UTC()
for _, result := range metrics.Report.Results {
for _, vulnerability := range result.Vulnerabilities {
- var (
- tx, _ = c.conn.Begin()
- stmt, _ = tx.Prepare(InsertTrivyImage)
- )
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+ stmt, err := tx.Prepare(InsertTrivyImage)
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
+
if _, err := stmt.Exec(
metrics.ID,
metrics.ClusterName,
@@ -424,6 +830,7 @@ func (c *DBClient) InsertTrivyImageMetrics(metrics model.TrivyImage) {
vulnerability.Severity,
vulnerability.PublishedDate,
vulnerability.LastModifiedDate,
+ currentTime,
); err != nil {
log.Fatal(err)
}
@@ -435,55 +842,95 @@ func (c *DBClient) InsertTrivyImageMetrics(metrics model.TrivyImage) {
}
}
-func (c *DBClient) InsertTrivySbomMetrics(metrics model.Reports) {
- log.Println("####started inserting value")
- result := metrics.Report
+func (c *DBClient) InsertTrivySbomMetrics(metrics model.Sbom) {
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-trivy-sbom")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertTrivySbomMetrics")
+ defer span.End()
+
tx, err := c.conn.Begin()
if err != nil {
- log.Println("error in conn Begin", err)
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
}
- defer tx.Rollback()
stmt, err := tx.Prepare(InsertTrivySbom)
if err != nil {
- log.Println("error in prepare", err)
+ log.Fatalf("error preparing statement: %v", err)
}
- defer stmt.Close()
- for _, com := range result.Components {
- if len(result.Metadata.Tools) == 0 || len(com.Properties) == 0 || len(com.Hashes) == 0 || len(com.Licenses) == 0 {
+
+ data := metrics.Report
+ bomFormat, _ := data["bomFormat"].(string) //CycloneDX
+ serialNumber, _ := data["serialNumber"].(string) // exmplvalue:urn:uuid:146625a5-531a-40fa-a205-174448c6c569
+
+ // fetching metadata
+ metadata, ok := data["metadata"].(map[string]interface{})
+ if !ok {
+ log.Println("error: metadata not found or not in expected format")
+ return
+ }
+
+ // inside metadata
+ // taking component
+ component, ok := metadata["component"].(map[string]interface{})
+ if !ok {
+ log.Println("error: component not found or not in expected format")
+ return
+ }
+ //timestamp, _ := metadata["timestamp"].(time.Time)
+ var eventTime time.Time
+ rawTimestamp, ok := metadata["timestamp"].(string)
+ if !ok {
+ log.Println("error: timestamp not found or not in expected format")
+ return
+ }
+ eventTime, err = time.Parse(time.RFC3339, rawTimestamp)
+ if err != nil {
+ log.Println("error parsing timestamp:", err)
+ return
+ }
+ // inside metadata
+ // taking component
+ // inside component taking bomRef, componentType, componentName, packageURL
+ bomRef, _ := component["bom-ref"].(string) //pkg:oci/redis@sha256:873c49204b64258778a1f34d23a962de526021e9a63b09236d6d7c86e2dd13e9?repository_url=public.ecr.aws%2Fdocker%2Flibrary%2Fredis\u0026arch=amd64
+ componentType, _ := component["type"].(string) //container
+ componentName, _ := component["name"].(string) //public.ecr.aws/docker/library/redis@sha256:873c49204b64258778a1f34d23a962de526021e9a63b09236d6d7c86e2dd13e9
+ packageURL, _ := component["purl"].(string) //pkg:oci/redis@sha256:873c49204b64258778a1f34d23a962de526021e9a63b09236d6d7c86e2dd13e9?repository_url=public.ecr.aws%2Fdocker%2Flibrary%2Fredis\u0026arch=amd64
+ // fetching other componets
+ Components, ok := data["components"].([]interface{})
+ if !ok {
+ log.Println("error: components not found or not in expected format")
+ }
+ var otherComponentName string
+ // Iterate over the components to find the desired name
+ for _, otherComponent := range Components {
+ componentsMap, ok := otherComponent.(map[string]interface{})
+ if !ok {
+ log.Println("error: component not in expected format")
continue
}
- for _, depend := range result.Dependencies {
- if _, err := stmt.Exec(
- metrics.ID,
- result.Schema,
- result.BomFormat,
- result.SpecVersion,
- result.SerialNumber,
- int32(result.Version),
- result.Metadata.Timestamp,
- result.Metadata.Tools[0].Vendor,
- result.Metadata.Tools[0].Name,
- result.Metadata.Tools[0].Version,
- com.BomRef,
- com.Type,
- com.Name,
- com.Version,
- com.Properties[0].Name,
- com.Properties[0].Value,
- com.Hashes[0].Alg,
- com.Hashes[0].Content,
- com.Licenses[0].Expression,
- com.Purl,
- depend.Ref,
- ); err != nil {
- log.Fatal(err)
- }
+ if name, ok := componentsMap["name"].(string); ok {
+ otherComponentName = name // alpine
+ break
}
}
+
+ if _, err := stmt.Exec(
+ metrics.ID,
+ metrics.ClusterName,
+ bomFormat,
+ serialNumber,
+ bomRef,
+ componentName,
+ componentType,
+ packageURL,
+ eventTime,
+ otherComponentName,
+ ); err != nil {
+ log.Fatal(err)
+ }
if err := tx.Commit(); err != nil {
log.Fatal(err)
}
- log.Println("value inserted")
+ stmt.Close()
}
func (c *DBClient) Close() {
_ = c.conn.Close()
@@ -585,11 +1032,26 @@ func (c *DBClient) RetrieveKubvizEvent() ([]model.DbEvent, error) {
}
func (c *DBClient) InsertContainerEventDockerHub(build model.DockerHubBuild) {
- var (
- tx, _ = c.conn.Begin()
- stmt, _ = tx.Prepare(string(InsertDockerHubBuild))
- )
+
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-container-dockerhub")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertContainerEventDockerHub")
+ span.SetAttributes(attribute.String("container-dockerhub-client", "insert"))
+ defer span.End()
+
+ tx, err := c.conn.Begin()
+ if err != nil {
+ log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err)
+ }
+ stmt, err := tx.Prepare(string(InsertDockerHubBuild))
+
+ if err != nil {
+ log.Fatalf("error preparing statement: %v", err)
+ }
defer stmt.Close()
+
+ currentTime := time.Now().UTC()
+
if _, err := stmt.Exec(
build.PushedBy,
build.ImageTag,
@@ -597,6 +1059,7 @@ func (c *DBClient) InsertContainerEventDockerHub(build model.DockerHubBuild) {
build.DateCreated,
build.Owner,
build.Event,
+ currentTime,
); err != nil {
log.Fatal(err)
}
@@ -606,6 +1069,13 @@ func (c *DBClient) InsertContainerEventDockerHub(build model.DockerHubBuild) {
}
func (c *DBClient) InsertContainerEventGithub(event string) {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("insert-container-github")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "InsertContainerEventGithub")
+ span.SetAttributes(attribute.String("container-github-client", "insert"))
+ defer span.End()
+
var image model.GithubImage
err := json.Unmarshal([]byte(event), &image)
if err != nil {
@@ -653,6 +1123,9 @@ func (c *DBClient) InsertGitCommon(metrics model.GitCommonAttribute, statement d
return err
}
defer stmt.Close()
+
+ currentTime := time.Now().UTC()
+
if _, err := stmt.Exec(
metrics.Author,
metrics.GitProvider,
@@ -660,7 +1133,7 @@ func (c *DBClient) InsertGitCommon(metrics model.GitCommonAttribute, statement d
metrics.CommitUrl,
metrics.EventType,
metrics.RepoName,
- metrics.TimeStamp,
+ currentTime,
metrics.Event,
); err != nil {
return err
diff --git a/client/pkg/clickhouse/statements.go b/client/pkg/clickhouse/statements.go
index 7646af14..c75571c3 100644
--- a/client/pkg/clickhouse/statements.go
+++ b/client/pkg/clickhouse/statements.go
@@ -26,9 +26,11 @@ CREATE TABLE IF NOT EXISTS rakkess (
Create String,
Delete String,
List String,
- Update String
+ Update String,
+ EventTime DateTime('UTC')
) engine=File(TabSeparated)
`
+
const kubePugDepricatedTable DBStatement = `
CREATE TABLE IF NOT EXISTS DeprecatedAPIs (
ClusterName String,
@@ -36,9 +38,11 @@ CREATE TABLE IF NOT EXISTS DeprecatedAPIs (
Description String,
Kind String,
Deprecated UInt8,
- Scope String
+ Scope String,
+ EventTime DateTime('UTC')
) engine=File(TabSeparated)
`
+
const kubepugDeletedTable DBStatement = `
CREATE TABLE IF NOT EXISTS DeletedAPIs (
ClusterName String,
@@ -48,18 +52,37 @@ CREATE TABLE IF NOT EXISTS DeletedAPIs (
Version String,
Name String,
Deleted UInt8,
- Scope String
+ Scope String,
+ EventTime DateTime('UTC')
) engine=File(TabSeparated)
`
-const ketallTable DBStatement = `
-CREATE TABLE IF NOT EXISTS getall_resources (
- ClusterName String,
- Namespace String,
- Kind String,
- Resource String,
- Age String
+
+const jfrogContainerPushEventTable DBStatement = `
+CREATE TABLE IF NOT EXISTS jfrogcontainerpush (
+ Domain String,
+ EventType String,
+ RegistryURL String,
+ RepositoryName String,
+ SHAID String,
+ Size Int32,
+ ImageName String,
+ Tag String,
+ Event String,
+ EventTime DateTime('UTC')
) engine=File(TabSeparated)
`
+
+const ketallTable DBStatement = `
+ CREATE TABLE IF NOT EXISTS getall_resources (
+ ClusterName String,
+ Namespace String,
+ Kind String,
+ Resource String,
+ Age String,
+ EventTime DateTime('UTC')
+ ) engine=File(TabSeparated)
+ `
+
const outdateTable DBStatement = `
CREATE TABLE IF NOT EXISTS outdated_images (
ClusterName String,
@@ -68,17 +91,21 @@ CREATE TABLE IF NOT EXISTS outdated_images (
CurrentImage String,
CurrentTag String,
LatestVersion String,
- VersionsBehind Int64
+ VersionsBehind Int64,
+ EventTime DateTime('UTC')
) engine=File(TabSeparated)
`
+
const kubescoreTable DBStatement = `
- CREATE TABLE IF NOT EXISTS kubescore (
- id UUID,
- namespace String,
- cluster_name String,
- recommendations String
- ) engine=File(TabSeparated)
- `
+CREATE TABLE IF NOT EXISTS kubescore (
+ id UUID,
+ namespace String,
+ cluster_name String,
+ recommendations String,
+ EventTime DateTime('UTC')
+) engine=File(TabSeparated)
+`
+
const trivyTableVul DBStatement = `
CREATE TABLE IF NOT EXISTS trivy_vul (
id UUID,
@@ -101,24 +128,26 @@ const trivyTableVul DBStatement = `
`
const trivyTableMisconfig DBStatement = `
- CREATE TABLE IF NOT EXISTS trivy_misconfig (
- id UUID,
- cluster_name String,
- namespace String,
- kind String,
- name String,
- misconfig_id String,
- misconfig_avdid String,
- misconfig_type String,
- misconfig_title String,
- misconfig_desc String,
- misconfig_msg String,
- misconfig_query String,
- misconfig_resolution String,
- misconfig_severity String,
- misconfig_status String
- ) engine=File(TabSeparated)
+ CREATE TABLE IF NOT EXISTS trivy_misconfig (
+ id UUID,
+ cluster_name String,
+ namespace String,
+ kind String,
+ name String,
+ misconfig_id String,
+ misconfig_avdid String,
+ misconfig_type String,
+ misconfig_title String,
+ misconfig_desc String,
+ misconfig_msg String,
+ misconfig_query String,
+ misconfig_resolution String,
+ misconfig_severity String,
+ misconfig_status String,
+ EventTime DateTime('UTC')
+ ) engine=File(TabSeparated)
`
+
const trivyTableImage DBStatement = `
CREATE TABLE IF NOT EXISTS trivyimage (
id UUID,
@@ -142,9 +171,11 @@ const dockerHubBuildTable DBStatement = `
RepositoryName String,
DateCreated String,
Owner String,
- Event String
+ Event String,
+ EventTime DateTime('UTC')
) engine=File(TabSeparated)
`
+
const azureContainerPushEventTable DBStatement = `
CREATE TABLE IF NOT EXISTS azurecontainerpush (
RegistryURL String,
@@ -152,50 +183,54 @@ const azureContainerPushEventTable DBStatement = `
Tag String,
ImageName String,
Event String,
- Timestamp String,
Size Int32,
- SHAID String
+ SHAID String,
+ EventTime DateTime('UTC')
+ ) engine=File(TabSeparated)
+ `
+
+const quayContainerPushEventTable DBStatement = `
+ CREATE TABLE IF NOT EXISTS quaycontainerpush (
+ name String,
+ repository String,
+ nameSpace String,
+ dockerURL String,
+ homePage String,
+ tag String,
+ Event String,
+ EventTime DateTime('UTC')
) engine=File(TabSeparated)
`
+
const trivySbomTable DBStatement = `
CREATE TABLE IF NOT EXISTS trivysbom (
id UUID,
- schema String,
- bom_format String,
- spec_version String,
- serial_number String,
+ cluster_name String,
+ image_name String,
+ package_name String,
+ package_url String,
+ bom_ref String,
+ serial_number String,
version INTEGER,
- metadata_timestamp DateTime('UTC'),
- metatool_vendor String,
- metatool_name String,
- metatool_version String,
- component_bom_ref String,
- component_type String,
- component_name String,
- component_version String,
- component_property_name String,
- component_property_value String,
- component_hash_alg String,
- component_hash_content String,
- component_license_exp String,
- component_purl String,
- dependency_ref String
+ bom_format String
) engine=File(TabSeparated)
`
-const InsertDockerHubBuild DBStatement = "INSERT INTO dockerhubbuild (PushedBy, ImageTag, RepositoryName, DateCreated, Owner, Event) VALUES (?, ?, ?, ?, ?, ?)"
-const InsertRakees DBStatement = "INSERT INTO rakkess (ClusterName, Name, Create, Delete, List, Update) VALUES (?, ?, ?, ?, ?, ?)"
-const InsertKetall DBStatement = "INSERT INTO getall_resources (ClusterName, Namespace, Kind, Resource, Age) VALUES (?, ?, ?, ?, ?)"
-const InsertOutdated DBStatement = "INSERT INTO outdated_images (ClusterName, Namespace, Pod, CurrentImage, CurrentTag, LatestVersion, VersionsBehind) VALUES (?, ?, ?, ?, ?, ?, ?)"
-const InsertDepricatedApi DBStatement = "INSERT INTO DeprecatedAPIs (ClusterName, ObjectName, Description, Kind, Deprecated, Scope) VALUES (?, ?, ?, ?, ?, ?)"
-const InsertDeletedApi DBStatement = "INSERT INTO DeletedAPIs (ClusterName, ObjectName, Group, Kind, Version, Name, Deleted, Scope) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
-const InsertKubvizEvent DBStatement = "INSERT INTO events (ClusterName, Id, EventTime, OpType, Name, Namespace, Kind, Message, Reason, Host, Event, FirstTime, LastTime) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
+const InsertDockerHubBuild DBStatement = "INSERT INTO dockerhubbuild (PushedBy, ImageTag, RepositoryName, DateCreated, Owner, Event, EventTime) VALUES (?, ?, ?, ?, ?, ?, ?)"
+const InsertRakees DBStatement = "INSERT INTO rakkess (ClusterName, Name, Create, Delete, List, Update, EventTime) VALUES (?, ?, ?, ?, ?, ?, ?)"
+const InsertKetall DBStatement = "INSERT INTO getall_resources (ClusterName, Namespace, Kind, Resource, Age, EventTime) VALUES (?, ?, ?, ?, ?, ?)"
+const InsertOutdated DBStatement = "INSERT INTO outdated_images (ClusterName, Namespace, Pod, CurrentImage, CurrentTag, LatestVersion, VersionsBehind, EventTime) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
+const InsertDepricatedApi DBStatement = "INSERT INTO DeprecatedAPIs (ClusterName, ObjectName, Description, Kind, Deprecated, Scope, EventTime) VALUES (?, ?, ?, ?, ?, ?, ?)"
+const InsertDeletedApi DBStatement = "INSERT INTO DeletedAPIs (ClusterName, ObjectName, Group, Kind, Version, Name, Deleted, Scope, EventTime) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"
+const InsertKubvizEvent DBStatement = "INSERT INTO events (ClusterName, Id, EventTime, OpType, Name, Namespace, Kind, Message, Reason, Host, Event, ImageName, FirstTime, LastTime) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
const clickhouseExperimental DBStatement = `SET allow_experimental_object_type=1;`
-const containerDockerhubTable DBStatement = `CREATE table IF NOT EXISTS container_dockerhub(event JSON) ENGINE = MergeTree ORDER BY tuple();`
const containerGithubTable DBStatement = `CREATE table IF NOT EXISTS container_github(event JSON) ENGINE = MergeTree ORDER BY tuple();`
-const InsertKubeScore string = "INSERT INTO kubescore (id, namespace, cluster_name, recommendations) VALUES (?, ?, ?, ?)"
+const InsertKubeScore string = "INSERT INTO kubescore(id,clustername,object_name,kind,apiVersion,name,namespace,target_type,description,path,summary,file_name,file_row,EventTime) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)"
const InsertTrivyVul string = "INSERT INTO trivy_vul (id, cluster_name, namespace, kind, name, vul_id, vul_vendor_ids, vul_pkg_id, vul_pkg_name, vul_pkg_path, vul_installed_version, vul_fixed_version, vul_title, vul_severity, vul_published_date, vul_last_modified_date) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?. ?)"
-const InsertTrivyImage string = "INSERT INTO trivyimage (id, cluster_name, artifact_name, vul_id, vul_pkg_id, vul_pkg_name, vul_installed_version, vul_fixed_version, vul_title, vul_severity, vul_published_date, vul_last_modified_date) VALUES ( ?, ?,?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
-const InsertTrivyMisconfig string = "INSERT INTO trivy_misconfig (id, cluster_name, namespace, kind, name, misconfig_id, misconfig_avdid, misconfig_type, misconfig_title, misconfig_desc, misconfig_msg, misconfig_query, misconfig_resolution, misconfig_severity, misconfig_status) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?. ?, ?)"
-const InsertAzureContainerPushEvent DBStatement = "INSERT INTO azurecontainerpush (RegistryURL, RepositoryName, Tag, ImageName, Event, Timestamp, Size, SHAID) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
-const InsertTrivySbom string = "INSERT INTO trivysbom (id, schema, bom_format,spec_version,serial_number, version, metadata_timestamp,metatool_vendor,metatool_name,metatool_version,component_bom_ref,component_type,component_name,component_version,component_property_name,component_property_value,component_hash_alg,component_hash_content,component_license_exp,component_purl,dependency_ref) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"
+const InsertTrivyImage string = "INSERT INTO trivyimage (id, cluster_name, artifact_name, vul_id, vul_pkg_id, vul_pkg_name, vul_installed_version, vul_fixed_version, vul_title, vul_severity, vul_published_date, vul_last_modified_date,EventTime) VALUES ( ?, ?,?, ?, ?, ?, ?, ?, ?, ?, ?, ?,?)"
+const InsertTrivyMisconfig string = "INSERT INTO trivy_misconfig (id, cluster_name, namespace, kind, name, misconfig_id, misconfig_avdid, misconfig_type, misconfig_title, misconfig_desc, misconfig_msg, misconfig_query, misconfig_resolution, misconfig_severity, misconfig_status, EventTime) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
+const InsertAzureContainerPushEvent DBStatement = "INSERT INTO azurecontainerpush (RegistryURL, RepositoryName, Tag, ImageName, Event, Size, SHAID, EventTime) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?)"
+const InsertTrivySbom string = "INSERT INTO trivysbom (id, cluster_name, bom_format, serial_number, bom_ref, image_name, component_type, package_url, event_time, other_component_name) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
+const InsertQuayContainerPushEvent DBStatement = "INSERT INTO quaycontainerpush (name, repository, nameSpace, dockerURL, homePage, tag, Event, EventTime) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
+const InsertJfrogContainerPushEvent DBStatement = "INSERT INTO jfrogcontainerpush (Domain, EventType, RegistryURL, RepositoryName, SHAID, Size, ImageName, Tag, Event, EventTime) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
+const InsertKuberhealthy string = "INSERT INTO kuberhealthy (CurrentUUID, CheckName, OK, Errors, RunDuration, Namespace, Node, LastRun, AuthoritativePod) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"
diff --git a/client/pkg/clients/bridge_client.go b/client/pkg/clients/bridge_client.go
index 87f93014..9b721805 100644
--- a/client/pkg/clients/bridge_client.go
+++ b/client/pkg/clients/bridge_client.go
@@ -1,6 +1,7 @@
package clients
import (
+ "context"
"encoding/json"
"errors"
"log"
@@ -15,7 +16,10 @@ import (
"github.com/intelops/kubviz/gitmodels/azuremodel"
"github.com/intelops/kubviz/gitmodels/dbstatement"
"github.com/intelops/kubviz/model"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
"github.com/nats-io/nats.go"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
)
// ErrHeaderEmpty defines an error occur when header is empty in git stream
@@ -36,6 +40,13 @@ const (
// the respective funcs to insert data into clickhouse DB
func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
log.Printf("Creating nats consumer %s with subject: %s \n", bridgeConsumer, bridgeSubject)
+
+ ctx:=context.Background()
+ tracer := otel.Tracer("git-client")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "SubscribeGitBridgeNats")
+ span.SetAttributes(attribute.String("git-subscribe", "Subscribe"))
+ defer span.End()
+
n.stream.Subscribe(string(bridgeSubject), func(msg *nats.Msg) {
msg.Ack()
gitprovider := msg.Header.Get("GitProvider")
@@ -89,7 +100,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
gca.CommitUrl = pl.Resource.Repository.RemoteURL + "/commit/" + pl.Resource.RefUpdates[0].NewObjectID
gca.EventType = string(azuremodel.GitPushEventType)
gca.RepoName = pl.Resource.Repository.Name
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertAzureDevops)
log.Println("Inserted AzureDevops metrics:", string(msg.Data))
@@ -108,7 +119,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
gca.CommitUrl = pl.Resource.Repository.RemoteURL + "/commit/" + pl.Resource.LastMergeCommit.CommitID
gca.EventType = string(azuremodel.GitPullRequestMergedEventType)
gca.RepoName = pl.Resource.Repository.Name
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertAzureDevops)
log.Println("Inserted AzureDevops metrics:", string(msg.Data))
@@ -121,7 +132,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
gca.CommitID = ""
gca.CommitUrl = ""
gca.EventType = event
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertAzureDevops)
log.Println("Inserted GitHub metrics:", string(msg.Data))
@@ -155,7 +166,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
}
gca.EventType = string(github.PushEvent)
gca.RepoName = pl.Repository.Name
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertGithub)
log.Println("Inserted GitHub metrics:", string(msg.Data))
@@ -179,7 +190,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
gca.CommitUrl = pl.PullRequest.HTMLURL
gca.EventType = string(github.PullRequestEvent)
gca.RepoName = pl.Repository.Name
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertGithub)
log.Println("Inserted GitHub metrics:", string(msg.Data))
@@ -193,7 +204,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
gca.CommitID = ""
gca.CommitUrl = ""
gca.EventType = event
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertGithub)
log.Println("Inserted GitHub metrics:", string(msg.Data))
@@ -219,7 +230,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
gca.CommitUrl = pl.CompareURL
gca.EventType = string(gitea.PushEvent)
gca.RepoName = pl.Repo.Name
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertGitea)
log.Println("Inserted Gitea metrics:", string(msg.Data))
@@ -253,7 +264,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
} else {
gca.RepoName = ""
}
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertGitea)
log.Println("Inserted Gitea metrics:", string(msg.Data))
@@ -265,7 +276,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
gca.CommitID = ""
gca.CommitUrl = ""
gca.EventType = event
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.RepoName = ""
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertGitea)
@@ -292,7 +303,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
gca.CommitUrl = pl.Project.WebURL + "/commit/" + pl.After
gca.EventType = string(gitlab.PushEvents)
gca.RepoName = pl.Project.Name
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertGitlab)
log.Println("Inserted Gitlab metrics:", string(msg.Data))
@@ -312,7 +323,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
gca.CommitUrl = pl.ObjectAttributes.URL
gca.EventType = string(gitlab.MergeRequestEvents)
gca.RepoName = pl.Project.Name
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertGitlab)
log.Println("Inserted Gitlab metrics:", string(msg.Data))
@@ -325,7 +336,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
gca.CommitID = ""
gca.CommitUrl = ""
gca.EventType = event
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.RepoName = ""
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertGitlab)
@@ -353,7 +364,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
}
gca.EventType = string(bitbucket.RepoPushEvent)
gca.RepoName = pl.Repository.Name
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertBitbucket)
log.Println("Inserted BitBucket metrics:", string(msg.Data))
@@ -372,7 +383,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
gca.CommitUrl = pl.PullRequest.Links.HTML.Href
gca.EventType = string(bitbucket.PullRequestMergedEvent)
gca.RepoName = pl.Repository.Name
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertBitbucket)
log.Println("Inserted BitBucket metrics:", string(msg.Data))
@@ -384,7 +395,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) {
gca.CommitID = ""
gca.CommitUrl = ""
gca.EventType = event
- gca.TimeStamp = time.Now().Format(time.DateTime)
+ gca.TimeStamp = time.Now().UTC()
gca.RepoName = ""
gca.Event = string(msg.Data)
conn.InsertGitCommon(gca, dbstatement.InsertBitbucket)
diff --git a/client/pkg/clients/clients.go b/client/pkg/clients/clients.go
index 0ac4fef2..bee3fa2e 100644
--- a/client/pkg/clients/clients.go
+++ b/client/pkg/clients/clients.go
@@ -7,6 +7,7 @@ import (
"github.com/intelops/kubviz/client/pkg/clickhouse"
"github.com/intelops/kubviz/client/pkg/config"
+ "github.com/intelops/kubviz/pkg/mtlsnats"
"github.com/nats-io/nats.go"
)
@@ -21,9 +22,34 @@ func NewNATSContext(conf *config.Config, dbClient clickhouse.DBInterface) (*NATS
log.Println("Waiting before connecting to NATS at:", conf.NatsAddress)
time.Sleep(1 * time.Second)
- conn, err := nats.Connect(conf.NatsAddress, nats.Name("Github metrics"), nats.Token(conf.NatsToken))
- if err != nil {
- return nil, err
+ //conn, err := nats.Connect(conf.NatsAddress, nats.Name("Github metrics"), nats.Token(conf.NatsToken))
+
+ var conn *nats.Conn
+ var err error
+ var mtlsConfig mtlsnats.MtlsConfig
+
+ if mtlsConfig.IsEnabled {
+ tlsConfig, err := mtlsnats.GetTlsConfig()
+ if err != nil {
+ log.Println("error while getting tls config ", err)
+ time.Sleep(time.Minute * 30)
+ } else {
+ conn, err = nats.Connect(conf.NatsAddress,
+ nats.Name("Github metrics"),
+ nats.Token(conf.NatsToken),
+ nats.Secure(tlsConfig),
+ )
+ if err != nil {
+ log.Println("error while connecting with mtls ", err)
+ }
+ }
+ }
+
+ if conn == nil {
+ conn, err = nats.Connect(conf.NatsAddress, nats.Name("Github metrics"), nats.Token(conf.NatsToken))
+ if err != nil {
+ return nil, fmt.Errorf("error while connecting with token: %w", err)
+ }
}
ctx := &NATSContext{
diff --git a/client/pkg/clients/container_client.go b/client/pkg/clients/container_client.go
index 3688a102..ff571614 100644
--- a/client/pkg/clients/container_client.go
+++ b/client/pkg/clients/container_client.go
@@ -1,6 +1,7 @@
package clients
import (
+ "context"
"encoding/json"
"errors"
"log"
@@ -8,7 +9,10 @@ import (
"github.com/intelops/kubviz/client/pkg/clickhouse"
"github.com/intelops/kubviz/model"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
"github.com/nats-io/nats.go"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
)
var (
@@ -26,6 +30,13 @@ const (
)
func (n *NATSContext) SubscribeContainerNats(conn clickhouse.DBInterface) {
+
+ ctx:=context.Background()
+ tracer := otel.Tracer("container-client")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "SubscribeContainerNats")
+ span.SetAttributes(attribute.String("container-subscribe", "Subscribe"))
+ defer span.End()
+
n.stream.Subscribe(string(containerSubject), func(msg *nats.Msg) {
msg.Ack()
repoName := msg.Header.Get("REPO_NAME")
@@ -59,6 +70,26 @@ func (n *NATSContext) SubscribeContainerNats(conn clickhouse.DBInterface) {
// Extract the necessary information from pushEvent and insert into ClickHouse
conn.InsertContainerEventAzure(pushEvent)
log.Println("Inserted Azure Container Registry metrics:", string(msg.Data))
+ } else if repoName == "Quay_Container_Registry" {
+ var pushEvent model.QuayImagePushPayload
+ err := json.Unmarshal(msg.Data, &pushEvent)
+ if err != nil {
+ log.Printf("Error while unmarshaling Quay Container Registry payload: %v", err)
+ return
+ }
+ // Extract the necessary information from pushEvent and insert into ClickHouse
+ conn.InsertContainerEventQuay(pushEvent)
+ log.Println("Inserted Quay Container Registry metrics:", string(msg.Data))
+ } else if repoName == "Jfrog_Container_Registry" {
+ var pushEvent model.JfrogContainerPushEventPayload
+ err := json.Unmarshal(msg.Data, &pushEvent)
+ if err != nil {
+ log.Printf("Error while unmarshaling Jfrog Container Registry payload: %v", err)
+ return
+ }
+ // Extract the necessary information from pushEvent and insert into ClickHouse.
+ conn.InsertContainerEventJfrog(pushEvent)
+ log.Println("Inserted Jfrog Container Registry metrics:", string(msg.Data))
}
}, nats.Durable(string(containerConsumer)), nats.ManualAck())
diff --git a/client/pkg/clients/kubviz_client.go b/client/pkg/clients/kubviz_client.go
index 394bf8a1..2e83a8b5 100644
--- a/client/pkg/clients/kubviz_client.go
+++ b/client/pkg/clients/kubviz_client.go
@@ -1,13 +1,20 @@
package clients
import (
+ "context"
"encoding/json"
"log"
"github.com/intelops/kubviz/constants"
+ "github.com/intelops/kubviz/pkg/opentelemetry"
+ "github.com/kelseyhightower/envconfig"
+ "github.com/kuberhealthy/kuberhealthy/v2/pkg/health"
"github.com/nats-io/nats.go"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
"github.com/intelops/kubviz/client/pkg/clickhouse"
+ "github.com/intelops/kubviz/client/pkg/config"
"github.com/intelops/kubviz/model"
)
@@ -18,6 +25,16 @@ type SubscriptionInfo struct {
}
func (n *NATSContext) SubscribeAllKubvizNats(conn clickhouse.DBInterface) {
+
+ ctx := context.Background()
+ tracer := otel.Tracer("kubviz-client")
+ _, span := tracer.Start(opentelemetry.BuildContext(ctx), "SubscribeAllKubvizNats")
+ span.SetAttributes(attribute.String("kubviz-subscribe", "subscribe"))
+ defer span.End()
+ cfg := &config.Config{}
+ if err := envconfig.Process("", cfg); err != nil {
+ log.Fatalf("Could not parse env Config: %v", err)
+ }
subscribe := func(sub SubscriptionInfo) {
n.stream.Subscribe(sub.Subject, sub.Handler, nats.Durable(sub.Consumer), nats.ManualAck())
}
@@ -25,7 +42,7 @@ func (n *NATSContext) SubscribeAllKubvizNats(conn clickhouse.DBInterface) {
subscriptions := []SubscriptionInfo{
{
Subject: constants.KetallSubject,
- Consumer: constants.KetallConsumer,
+ Consumer: cfg.KetallConsumer,
Handler: func(msg *nats.Msg) {
msg.Ack()
var metrics model.Resource
@@ -40,7 +57,7 @@ func (n *NATSContext) SubscribeAllKubvizNats(conn clickhouse.DBInterface) {
},
{
Subject: constants.RakeesSubject,
- Consumer: constants.RakeesConsumer,
+ Consumer: cfg.RakeesConsumer,
Handler: func(msg *nats.Msg) {
msg.Ack()
var metrics model.RakeesMetrics
@@ -53,9 +70,25 @@ func (n *NATSContext) SubscribeAllKubvizNats(conn clickhouse.DBInterface) {
log.Println()
},
},
+ {
+ Subject: constants.KUBERHEALTHY_SUBJECT,
+ Consumer: cfg.KuberhealthyConsumer,
+ Handler: func(msg *nats.Msg) {
+ msg.Ack()
+ var metrics health.State
+ err := json.Unmarshal(msg.Data, &metrics)
+ if err != nil {
+ log.Println("failed to unmarshal from nats", err)
+ return
+ }
+ log.Printf("Kuberhealthy Metrics Received: %#v,", metrics)
+ conn.InsertKuberhealthyMetrics(metrics)
+ log.Println()
+ },
+ },
{
Subject: constants.OutdatedSubject,
- Consumer: constants.OutdatedConsumer,
+ Consumer: cfg.OutdatedConsumer,
Handler: func(msg *nats.Msg) {
msg.Ack()
var metrics model.CheckResultfinal
@@ -70,7 +103,7 @@ func (n *NATSContext) SubscribeAllKubvizNats(conn clickhouse.DBInterface) {
},
{
Subject: constants.DeprecatedSubject,
- Consumer: constants.DeprecatedConsumer,
+ Consumer: cfg.DeprecatedConsumer,
Handler: func(msg *nats.Msg) {
msg.Ack()
var metrics model.DeprecatedAPI
@@ -85,7 +118,7 @@ func (n *NATSContext) SubscribeAllKubvizNats(conn clickhouse.DBInterface) {
},
{
Subject: constants.DeletedSubject,
- Consumer: constants.DeletedConsumer,
+ Consumer: cfg.DeletedConsumer,
Handler: func(msg *nats.Msg) {
msg.Ack()
var metrics model.DeletedAPI
@@ -100,7 +133,7 @@ func (n *NATSContext) SubscribeAllKubvizNats(conn clickhouse.DBInterface) {
},
{
Subject: constants.TRIVY_IMAGE_SUBJECT,
- Consumer: constants.Trivy_Image_Consumer,
+ Consumer: cfg.TrivyImageConsumer,
Handler: func(msg *nats.Msg) {
msg.Ack()
var metrics model.TrivyImage
@@ -115,13 +148,14 @@ func (n *NATSContext) SubscribeAllKubvizNats(conn clickhouse.DBInterface) {
},
{
Subject: constants.TRIVY_SBOM_SUBJECT,
- Consumer: constants.Trivy_Sbom_Consumer,
+ Consumer: cfg.TrivySbomConsumer,
Handler: func(msg *nats.Msg) {
msg.Ack()
- var metrics model.Reports
+ var metrics model.Sbom
err := json.Unmarshal(msg.Data, &metrics)
if err != nil {
- log.Println("failed to unmarshal in nats", err)
+ log.Println("failed to unmarshal from nats", err)
+ return
}
log.Printf("Trivy sbom Metrics Received: %#v,", metrics)
conn.InsertTrivySbomMetrics(metrics)
@@ -130,7 +164,7 @@ func (n *NATSContext) SubscribeAllKubvizNats(conn clickhouse.DBInterface) {
},
{
Subject: constants.KubvizSubject,
- Consumer: constants.KubvizConsumer,
+ Consumer: cfg.KubvizConsumer,
Handler: func(msg *nats.Msg) {
msg.Ack()
var metrics model.Metrics
@@ -145,7 +179,7 @@ func (n *NATSContext) SubscribeAllKubvizNats(conn clickhouse.DBInterface) {
},
{
Subject: constants.KUBESCORE_SUBJECT,
- Consumer: constants.KubscoreConsumer,
+ Consumer: cfg.KubscoreConsumer,
Handler: func(msg *nats.Msg) {
msg.Ack()
var metrics model.KubeScoreRecommendations
@@ -160,7 +194,7 @@ func (n *NATSContext) SubscribeAllKubvizNats(conn clickhouse.DBInterface) {
},
{
Subject: constants.TRIVY_K8S_SUBJECT,
- Consumer: constants.TrivyConsumer,
+ Consumer: cfg.TrivyConsumer,
Handler: func(msg *nats.Msg) {
msg.Ack()
var metrics model.Trivy
diff --git a/client/pkg/config/config.go b/client/pkg/config/config.go
index b50c970e..94045005 100644
--- a/client/pkg/config/config.go
+++ b/client/pkg/config/config.go
@@ -1,8 +1,34 @@
package config
type Config struct {
- NatsAddress string `envconfig:"NATS_ADDRESS"`
- NatsToken string `envconfig:"NATS_TOKEN"`
- DbPort int `envconfig:"DB_PORT"`
- DBAddress string `envconfig:"DB_ADDRESS"`
+ NatsAddress string `envconfig:"NATS_ADDRESS"`
+ NatsToken string `envconfig:"NATS_TOKEN"`
+ DbPort int `envconfig:"DB_PORT"`
+ DBAddress string `envconfig:"DB_ADDRESS"`
+ ClickHouseUsername string `envconfig:"CLICKHOUSE_USERNAME"`
+ ClickHousePassword string `envconfig:"CLICKHOUSE_PASSWORD"`
+ KetallConsumer string `envconfig:"KETALL_EVENTS_CONSUMER" required:"true"`
+ RakeesConsumer string `envconfig:"RAKEES_METRICS_CONSUMER" required:"true"`
+ OutdatedConsumer string `envconfig:"OUTDATED_EVENTS_CONSUMER" required:"true"`
+ DeprecatedConsumer string `envconfig:"DEPRECATED_API_CONSUMER" required:"true"`
+ DeletedConsumer string `envconfig:"DELETED_API_CONSUMER" required:"true"`
+ KubvizConsumer string `envconfig:"KUBVIZ_EVENTS_CONSUMER" required:"true"`
+ KubscoreConsumer string `envconfig:"KUBSCORE_CONSUMER" required:"true"`
+ TrivyConsumer string `envconfig:"TRIVY_CONSUMER" required:"true"`
+ TrivyImageConsumer string `envconfig:"TRIVY_IMAGE_CONSUMER" required:"true"`
+ TrivySbomConsumer string `envconfig:"TRIVY_SBOM_CONSUMER" required:"true"`
+ KuberhealthyConsumer string `envconfig:"KUBERHEALTHY_CONSUMER" required:"true"`
+ AwsEnable bool `envconfig:"AWS_ENABLE" default:"false"`
+ AWSRegion string `envconfig:"AWS_REGION"`
+ AWSAccessKey string `envconfig:"AWS_ACCESS_KEY"`
+ AWSSecretKey string `envconfig:"AWS_SECRET_KEY"`
+ S3BucketName string `envconfig:"S3_BUCKET_NAME"`
+ S3ObjectKey string `envconfig:"S3_OBJECT_KEY"`
+}
+
+type GraphQlConfig struct {
+ DbPort int `envconfig:"DB_PORT"`
+ DBAddress string `envconfig:"DB_ADDRESS"`
+ ClickHouseUsername string `envconfig:"CLICKHOUSE_USERNAME"`
+ ClickHousePassword string `envconfig:"CLICKHOUSE_PASSWORD"`
}
diff --git a/client/pkg/storage/store.go b/client/pkg/storage/store.go
new file mode 100644
index 00000000..b5684c6b
--- /dev/null
+++ b/client/pkg/storage/store.go
@@ -0,0 +1,125 @@
+package storage
+
+import (
+ "database/sql"
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/intelops/kubviz/client/pkg/config"
+ "github.com/kelseyhightower/envconfig"
+)
+
+func ExportExpiredData(tableName string, db *sql.DB) error {
+ columns, err := getTableColumns(db, tableName)
+ if err != nil {
+ return fmt.Errorf("error getting columns for table %s: %v", tableName, err)
+ }
+
+ // Construct SQL query
+ query := fmt.Sprintf("SELECT * FROM %s WHERE ExportedAt IS NULL", tableName)
+
+ // Query expired data
+ rows, err := db.Query(query)
+ if err != nil {
+ return fmt.Errorf("error querying ClickHouse: %v", err)
+ }
+ defer rows.Close()
+
+ // Construct CSV data in memory
+ var csvData strings.Builder
+ csvData.WriteString(columns + "\n") // Write CSV header
+
+ for rows.Next() {
+ // Assuming a dynamic structure, scan the columns into a slice of interface{}
+ columnValues := make([]interface{}, len(strings.Split(columns, ",")))
+ for i := range columnValues {
+ columnValues[i] = new(interface{})
+ }
+
+ err := rows.Scan(columnValues...)
+ if err != nil {
+ return fmt.Errorf("error scanning ClickHouse row: %v", err)
+ }
+
+ // Write the values to the CSV data
+ var rowData []string
+ for _, value := range columnValues {
+ // Dereference the pointer to get the interface{} value, then format it as a string
+ rowData = append(rowData, fmt.Sprintf("%v", *value.(*interface{})))
+ }
+ csvData.WriteString(strings.Join(rowData, ",") + "\n")
+ }
+
+ // Upload the CSV data to S3
+ err = uploadToS3(&csvData, fmt.Sprintf("exported_data_%s.csv", tableName))
+ if err != nil {
+ return fmt.Errorf("error uploading CSV to S3: %v", err)
+ }
+
+ // Update ExportedAt column with the current timestamp for exported rows
+ updateQuery := fmt.Sprintf("ALTER TABLE %s UPDATE ExportedAt = now() WHERE ExportedAt IS NULL", tableName)
+ _, err = db.Exec(updateQuery)
+ if err != nil {
+ return fmt.Errorf("error updating ExportedAt column: %v", err)
+ }
+
+ return nil
+}
+
+func getTableColumns(db *sql.DB, tableName string) (string, error) {
+ // Query to get column names
+ query := fmt.Sprintf("DESCRIBE TABLE %s", tableName)
+ rows, err := db.Query(query)
+ if err != nil {
+ return "", err
+ }
+ defer rows.Close()
+
+ // Get column names
+ var columns []string
+ for rows.Next() {
+ var columnName string
+ rows.Scan(&columnName)
+ columns = append(columns, columnName)
+ }
+
+ return strings.Join(columns, ","), nil
+}
+
+func uploadToS3(csvData *strings.Builder, s3ObjectKey string) error {
+ cfg := &config.Config{}
+ if err := envconfig.Process("", cfg); err != nil {
+ log.Fatalf("Could not parse env Config: %v", err)
+ }
+
+ // Set up AWS S3 session
+ sess, err := session.NewSession(&aws.Config{
+ Region: aws.String(cfg.AWSRegion),
+ Credentials: credentials.NewStaticCredentials(cfg.AWSAccessKey, cfg.AWSSecretKey, ""),
+ })
+ if err != nil {
+ return fmt.Errorf("error creating S3 session: %v", err)
+ }
+
+ // Create an S3 service client
+ s3Client := s3.New(sess)
+
+ // Upload the CSV data to S3
+ _, err = s3Client.PutObject(&s3.PutObjectInput{
+ Bucket: aws.String(cfg.S3BucketName),
+ Key: aws.String((s3ObjectKey)),
+ Body: strings.NewReader(csvData.String()),
+ })
+ if err != nil {
+ return fmt.Errorf("error uploading data to S3: %v", err)
+ }
+
+ fmt.Printf("Data uploaded to S3: %s\n", s3ObjectKey)
+
+ return nil
+}
diff --git a/cmd/cli/commands/sql.go b/cmd/cli/commands/sql.go
new file mode 100644
index 00000000..c9d0d610
--- /dev/null
+++ b/cmd/cli/commands/sql.go
@@ -0,0 +1,69 @@
+package commands
+
+import (
+ "fmt"
+ "log"
+ "os"
+
+ _ "github.com/golang-migrate/migrate/v4/source/file"
+ "github.com/intelops/kubviz/cmd/cli/config"
+ "github.com/spf13/cobra"
+)
+
+var rootCmd = &cobra.Command{
+ Use: "migration",
+ Short: "CLI for managing migrations",
+ Long: `A CLI tool developed to manage migrations for Kubviz Client ClickHouse.`,
+}
+
+func Execute() {
+ if err := rootCmd.Execute(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
+
+var sqlCmd = &cobra.Command{
+ Use: "sql",
+ Short: "Manage SQL migrations",
+ Long: `The sql subcommand is used to manage SQL migrations for Kubviz Client ClickHouse.
+You can execute migrations using the -e flag and confirm with --yes.`,
+ Example: `
+# Execute migrations and confirm
+migration sql -e --yes
+
+# Execute migrations without confirmation
+migration sql -e --no`,
+ Run: func(cmd *cobra.Command, args []string) {
+ executeFlag, _ := cmd.Flags().GetBool("execute")
+ yesFlag, _ := cmd.Flags().GetBool("yes")
+
+ if !executeFlag && !yesFlag {
+ cmd.Help()
+ return
+ }
+
+ if executeFlag {
+ if yesFlag {
+ cfg, err := config.New()
+ if err != nil {
+ log.Fatalf("failed to parse the env : %v", err.Error())
+ return
+ }
+ if err := cfg.Migrate(); err != nil {
+ log.Fatalf("failed to migrate : %v", err.Error())
+ return
+ }
+ } else {
+ fmt.Println("Clickhouse Migration skipped due to --no flag.")
+ }
+ }
+ },
+}
+
+func init() {
+ sqlCmd.Flags().BoolP("execute", "e", false, "Execute the migrations")
+ sqlCmd.Flags().BoolP("yes", "y", false, "Confirm execution")
+
+ rootCmd.AddCommand(sqlCmd)
+}
diff --git a/cmd/cli/config/config.go b/cmd/cli/config/config.go
new file mode 100644
index 00000000..7862f1c3
--- /dev/null
+++ b/cmd/cli/config/config.go
@@ -0,0 +1,24 @@
+package config
+
+import (
+ "github.com/kelseyhightower/envconfig"
+)
+
+type Config struct {
+ DbPort int `envconfig:"DB_PORT" required:"true"`
+ DBAddress string `envconfig:"DB_ADDRESS" required:"true"`
+ ClickHouseUsername string `envconfig:"CLICKHOUSE_USERNAME"`
+ ClickHousePassword string `envconfig:"CLICKHOUSE_PASSWORD"`
+ SchemaPath string `envconfig:"SCHEMA_PATH" default:"/sql"`
+ TtlInterval string `envconfig:"TTL_INTERVAL" default:"1"`
+ TtlUnit string `envconfig:"TTL_UNIT" default:"MONTH"`
+}
+
+func New() (*Config, error) {
+ var cfg Config
+ err := envconfig.Process("", &cfg)
+ if err != nil {
+ return nil, err
+ }
+ return &cfg, nil
+}
diff --git a/cmd/cli/config/utils.go b/cmd/cli/config/utils.go
new file mode 100644
index 00000000..360fbadc
--- /dev/null
+++ b/cmd/cli/config/utils.go
@@ -0,0 +1,123 @@
+package config
+
+import (
+ "bytes"
+ "database/sql"
+ "fmt"
+ "html/template"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/ClickHouse/clickhouse-go/v2"
+ "github.com/golang-migrate/migrate/v4"
+ ch "github.com/golang-migrate/migrate/v4/database/clickhouse"
+ _ "github.com/golang-migrate/migrate/v4/source/file"
+)
+
+func (cfg *Config) openClickHouseConn() (*sql.DB, error) {
+
+ var options clickhouse.Options
+ if cfg.ClickHouseUsername != "" && cfg.ClickHousePassword != "" {
+ fmt.Println("Using provided username and password")
+ options = clickhouse.Options{
+ Addr: []string{fmt.Sprintf("%s:%d", cfg.DBAddress, cfg.DbPort)},
+ Debug: true,
+ Auth: clickhouse.Auth{
+ Username: cfg.ClickHouseUsername,
+ Password: cfg.ClickHousePassword,
+ },
+ }
+
+ } else {
+ fmt.Println("Using connection without username and password")
+ options = clickhouse.Options{
+ Addr: []string{fmt.Sprintf("%s:%d", cfg.DBAddress, cfg.DbPort)},
+ }
+ }
+
+ conn := clickhouse.OpenDB(&options)
+ if err := conn.Ping(); err != nil {
+ if exception, ok := err.(*clickhouse.Exception); ok {
+ return nil, fmt.Errorf("[%d] %s %s", exception.Code, exception.Message, exception.StackTrace)
+ } else {
+ return nil, err
+ }
+ }
+ return conn, nil
+}
+
+func (cfg *Config) processSQLTemplate(filePath string) (string, error) {
+
+ file, err := os.Open(filePath)
+ if err != nil {
+ return "", err
+ }
+ data, err := io.ReadAll(file)
+ if err != nil {
+ return "", err
+ }
+ tmpl, err := template.New("sql").Parse(string(data))
+ if err != nil {
+ return "", err
+ }
+
+ params := map[string]string{
+ "TTLValue": cfg.TtlInterval,
+ "TTLUnit": cfg.TtlUnit,
+ }
+
+ var buf bytes.Buffer
+ err = tmpl.Execute(&buf, params)
+ if err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+func (cfg *Config) Migrate() error {
+ dir := cfg.SchemaPath
+ files, err := os.ReadDir(dir)
+ if err != nil {
+ return fmt.Errorf("failed to read directory %w", err)
+ }
+
+ for _, file := range files {
+ if strings.HasSuffix(file.Name(), ".up.sql") {
+ fullPath := filepath.Join(dir, file.Name())
+ processedSQL, err := cfg.processSQLTemplate(fullPath)
+ if err != nil {
+ return fmt.Errorf("failed to process the sql template for file %s : %w", file.Name(), err)
+ }
+
+ err = os.WriteFile(fullPath, []byte(processedSQL), 0644)
+ if err != nil {
+ return fmt.Errorf("failed to write to file %s : %w", fullPath, err)
+ }
+ }
+ }
+
+ conn, err := cfg.openClickHouseConn()
+ if err != nil {
+ return fmt.Errorf("unable to create a clickhouse conection %w", err)
+ }
+
+ driver, err := ch.WithInstance(conn, &ch.Config{})
+ if err != nil {
+ return fmt.Errorf("failed to create migrate driver %w", err)
+ }
+ m, err := migrate.NewWithDatabaseInstance(
+ fmt.Sprintf("file://%s", dir),
+ "clickhouse",
+ driver,
+ )
+ if err != nil {
+ return fmt.Errorf("clickhouse migration initialization failed %w", err)
+ }
+ if err := m.Up(); err != nil && err != migrate.ErrNoChange {
+ return fmt.Errorf("migration failed %w", err)
+ }
+ fmt.Println("Clickhouse Migration applied successfully!")
+ return nil
+}
diff --git a/cmd/cli/main.go b/cmd/cli/main.go
new file mode 100644
index 00000000..2c8f6fed
--- /dev/null
+++ b/cmd/cli/main.go
@@ -0,0 +1,7 @@
+package main
+
+import "github.com/intelops/kubviz/cmd/cli/commands"
+
+func main() {
+ commands.Execute()
+}
diff --git a/constants/constants.go b/constants/constants.go
index ba5aa050..6dbffaaa 100644
--- a/constants/constants.go
+++ b/constants/constants.go
@@ -1,6 +1,7 @@
package constants
const (
+ KUBERHEALTHY_SUBJECT = "METRICS.kuberhealthy"
KUBESCORE_SUBJECT = "METRICS.kubescore"
TRIVY_K8S_SUBJECT = "METRICS.trivyk8s"
StreamSubjects = "METRICS.*"
diff --git a/dockerfiles/client/Dockerfile b/dockerfiles/client/Dockerfile
index 31af58c8..fd9f5b18 100644
--- a/dockerfiles/client/Dockerfile
+++ b/dockerfiles/client/Dockerfile
@@ -11,7 +11,7 @@ RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o k8smetri
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
-FROM gcr.io/distroless/static:nonroot
+FROM golang:alpine
WORKDIR /
COPY --from=builder /workspace/k8smetrics_client .
USER 65532:65532
diff --git a/dockerfiles/graphqlserver/Dockerfile b/dockerfiles/graphqlserver/Dockerfile
new file mode 100644
index 00000000..740b2c36
--- /dev/null
+++ b/dockerfiles/graphqlserver/Dockerfile
@@ -0,0 +1,11 @@
+FROM golang:1.19 AS builder
+WORKDIR /
+COPY ./ ./
+
+RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o ./build/graphqlserver graphqlserver/server.go
+
+FROM scratch
+COPY --from=builder ./build/graphqlserver server
+
+USER 65532:65532
+ENTRYPOINT ["/server"]
diff --git a/dockerfiles/migration/Dockerfile b/dockerfiles/migration/Dockerfile
new file mode 100644
index 00000000..6cc51b93
--- /dev/null
+++ b/dockerfiles/migration/Dockerfile
@@ -0,0 +1,22 @@
+FROM golang:1.20 as builder
+
+WORKDIR /workspace
+COPY ./ ./
+RUN go mod download
+
+RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o migration cmd/cli/main.go
+
+RUN chmod +x /workspace/script/wait-for-clickhouse.sh
+
+# Use distroless as minimal base image to package the manager binary
+# Refer to https://github.com/GoogleContainerTools/distroless for more details
+FROM golang:alpine
+RUN apk add --no-cache netcat-openbsd
+WORKDIR /
+COPY --from=builder /workspace/migration .
+COPY --from=builder /workspace/sql /sql
+COPY --from=builder /workspace/script /script
+RUN chmod -R 777 /sql
+USER 65532:65532
+
+ENTRYPOINT ["/migration"]
diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md
index 8a48ff66..00d8c796 100644
--- a/docs/CONFIGURATION.md
+++ b/docs/CONFIGURATION.md
@@ -14,11 +14,11 @@ Please replace the section with the specific ingress host nam
Possible values are:
Values | Platform |
------- | -------- |
+------ | -------- |
`/github` | GitHub |
`/gitlab` | GitLab |
`/gitea` | Gitea |
-`/bitbucket` | BitBucket |
+`/bitbucket` | BitBucket |
`/azure` | Azure |
2. The URL for a Container Registry will appear in the following format:
@@ -32,8 +32,11 @@ Please replace the section with the specific ingress host nam
Possible values are:
Values | Platform |
------- | -------- |
+------ | -------- |
`/event/docker/hub` | DockerHub |
+`/event/azure/container` | Azure |
+`/event/jfrog/container` | JFrog |
+`/event/quay/container` | Quay |
diff --git a/docs/CONFIGURATION_HEALTHCHECK.md b/docs/CONFIGURATION_HEALTHCHECK.md
new file mode 100644
index 00000000..4c056750
--- /dev/null
+++ b/docs/CONFIGURATION_HEALTHCHECK.md
@@ -0,0 +1,82 @@
+## Introduction
+
+All health checks are enabled by default upon installing the KubViz agent. They are automatically included, but if you don't need them, you can disable them.
+
+```yaml
+kuberhealthy:
+ enabled: false
+...
+```
+
+## Types of Checks
+
+Check Name | Description |
+------ | -------- |
+Daemonset check | Ensures daemonsets can be successfully deployed |
+DNS status check | Checks for failures with DNS, including resolving within the cluster and outside of the cluster |
+Deployment check | Ensures that a Deployment and Service can be provisioned, created, and serve traffic within the Kubernetes cluster |
+Image pull check | Verifies that an image can be pulled from an image repository |
+Pod status check | Checks for unhealthy pod statuses in a target namespace |
+Pod restart | Checks for excessive pod restarts in any namespace |
+Resource quota check | Checks if resource quotas (CPU & memory) are available |
+
+## Configuration
+
+- Daemonset, Deployment, and DNS checks are enabled by default.
+
+- Pod Status, Pod Restart, Image Pull, and Resource Quota checks need to be manually enabled.
+
+```yaml
+check:
+ podRestarts:
+ enabled: true
+...
+```
+
+```yaml
+ podStatus:
+ enabled: true
+...
+```
+
+```yaml
+ imagePullCheck:
+ enabled: true
+...
+```
+
+```yaml
+ resourceQuota:
+ enabled: true
+...
+```
+
+### Additional configuration for image-pull check
+
+1. Pull the test image from docker hub
+
+```bash
+docker pull kuberhealthy/test-check
+```
+
+2. Push this image on the repository you need tested.
+
+```bash
+docker push my.repository/repo/test-check
+```
+
+- The pod is designed to attempt a pull of the test image from the remote repository (never from local). If the image is unavailable, an error will be reported to the API
+
+### Additional configuration for resource quota check
+
+This check tests if namespace resource quotas CPU and memory are under a specified threshold or percentage.
+
+You need to add the namespaces to the 'WHITELIST'.
+
+```yaml
+ extraEnvs:
+ BLACKLIST: "default"
+ WHITELIST: "kube-system,kubviz"
+...
+```
+
diff --git a/docs/CONFIGURATION_TTL.md b/docs/CONFIGURATION_TTL.md
new file mode 100644
index 00000000..cf4a165e
--- /dev/null
+++ b/docs/CONFIGURATION_TTL.md
@@ -0,0 +1,17 @@
+# Configuring TTL: Guidelines and Instructions
+
+- **TTL_INTERVAL**: This parameter sets the numeric value for the TTL duration. For instance, if you wish for data to expire after a duration of 2 time units, set this value to 2. The default value is 1.
+
+- **TTL_UNIT**: This parameter specifies the time unit for the TTL duration. It accepts valid values such as SECOND, MINUTE, HOUR, DAY, MONTH, and more. For example, to set a TTL of 2 hours, you would set TTL_INTERVAL to 2 and TTL_UNIT to HOUR. The default unit is MONTH.
+
+# Usage
+
+## Setting Environment Variables
+
+To configure TTL for your application, set the desired environment variables. Here's an example of how to do this:
+
+```bash
+export TTL_INTERVAL=5
+export TTL_UNIT=MINUTE
+```
+
diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md
index 76918c21..20425d2e 100644
--- a/docs/CONTRIBUTING.md
+++ b/docs/CONTRIBUTING.md
@@ -28,7 +28,7 @@ You are more than welcome to open issues in this project to [suggest new feature
This project is written in Golang.
-You need
+You need
`Go 1.16+`
diff --git a/gitmodels/dbstatement/dbstatement.go b/gitmodels/dbstatement/dbstatement.go
index c51d2236..09f67b28 100644
--- a/gitmodels/dbstatement/dbstatement.go
+++ b/gitmodels/dbstatement/dbstatement.go
@@ -3,75 +3,76 @@ package dbstatement
type DBStatement string
const AzureDevopsTable DBStatement = `
- CREATE TABLE IF NOT EXISTS azure_devops (
- Author String,
- Provider String,
- CommitID String,
- CommitUrl String,
- EventType String,
- RepoName String,
- TimeStamp String,
- Event String
- ) engine=File(TabSeparated)
+CREATE TABLE IF NOT EXISTS azure_devops (
+ Author String,
+ Provider String,
+ CommitID String,
+ CommitUrl String,
+ EventType String,
+ RepoName String,
+ TimeStamp DateTime('UTC'),
+ Event String
+) engine=File(TabSeparated)
`
const InsertAzureDevops DBStatement = "INSERT INTO azure_devops ( Author, Provider, CommitID, CommitUrl, EventType, RepoName, TimeStamp, Event) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
const GithubTable DBStatement = `
- CREATE TABLE IF NOT EXISTS github (
- Author String,
- Provider String,
- CommitID String,
- CommitUrl String,
- EventType String,
- RepoName String,
- TimeStamp String,
- Event String
- ) engine=File(TabSeparated)
+CREATE TABLE IF NOT EXISTS github (
+ Author String,
+ Provider String,
+ CommitID String,
+ CommitUrl String,
+ EventType String,
+ RepoName String,
+ TimeStamp DateTime('UTC'),
+ Event String
+) engine=File(TabSeparated)
`
const InsertGithub DBStatement = "INSERT INTO github ( Author, Provider, CommitID, CommitUrl, EventType, RepoName, TimeStamp, Event) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
const GitlabTable DBStatement = `
- CREATE TABLE IF NOT EXISTS gitlab (
- Author String,
- Provider String,
- CommitID String,
- CommitUrl String,
- EventType String,
- RepoName String,
- TimeStamp String,
- Event String
- ) engine=File(TabSeparated)
+CREATE TABLE IF NOT EXISTS gitlab (
+ Author String,
+ Provider String,
+ CommitID String,
+ CommitUrl String,
+ EventType String,
+ RepoName String,
+ TimeStamp DateTime('UTC'),
+ Event String
+) engine=File(TabSeparated)
`
+
const InsertGitlab DBStatement = "INSERT INTO gitlab ( Author, Provider, CommitID, CommitUrl, EventType, RepoName, TimeStamp, Event) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
const BitbucketTable DBStatement = `
- CREATE TABLE IF NOT EXISTS bitbucket (
- Author String,
- Provider String,
- CommitID String,
- CommitUrl String,
- EventType String,
- RepoName String,
- TimeStamp String,
- Event String
- ) engine=File(TabSeparated)
+CREATE TABLE IF NOT EXISTS bitbucket (
+ Author String,
+ Provider String,
+ CommitID String,
+ CommitUrl String,
+ EventType String,
+ RepoName String,
+ TimeStamp DateTime('UTC'),
+ Event String
+) engine=File(TabSeparated)
`
const InsertBitbucket DBStatement = "INSERT INTO bitbucket ( Author, Provider, CommitID, CommitUrl, EventType, RepoName, TimeStamp, Event) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
const GiteaTable DBStatement = `
- CREATE TABLE IF NOT EXISTS gitea (
- Author String,
- Provider String,
- CommitID String,
- CommitUrl String,
- EventType String,
- RepoName String,
- TimeStamp String,
- Event String
- ) engine=File(TabSeparated)
+CREATE TABLE IF NOT EXISTS gitea (
+ Author String,
+ Provider String,
+ CommitID String,
+ CommitUrl String,
+ EventType String,
+ RepoName String,
+ TimeStamp DateTime('UTC'),
+ Event String
+) engine=File(TabSeparated)
`
const InsertGitea DBStatement = "INSERT INTO gitea ( Author, Provider, CommitID, CommitUrl, EventType, RepoName, TimeStamp, Event) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
diff --git a/go.mod b/go.mod
index 8188081c..cbb62157 100644
--- a/go.mod
+++ b/go.mod
@@ -3,26 +3,38 @@ module github.com/intelops/kubviz
go 1.20
require (
+ github.com/99designs/gqlgen v0.17.42
github.com/ClickHouse/clickhouse-go/v2 v2.10.1
github.com/aquasecurity/trivy v0.43.1
+ github.com/aws/aws-sdk-go v1.44.245
github.com/blang/semver v3.5.1+incompatible
github.com/corneliusweig/tabwriter v0.0.0-20190512204542-5f8a091e83b5
github.com/davecgh/go-spew v1.1.1
github.com/docker/docker v24.0.4+incompatible
github.com/genuinetools/reg v0.16.1
github.com/getkin/kin-openapi v0.118.0
- github.com/ghodss/yaml v1.0.0
github.com/gin-gonic/gin v1.9.1
github.com/go-co-op/gocron v1.30.1
github.com/go-playground/webhooks/v6 v6.2.0
- github.com/google/uuid v1.3.0
+ github.com/golang-migrate/migrate/v4 v4.16.2
+ github.com/google/uuid v1.3.1
github.com/hashicorp/go-version v1.6.0
+ github.com/intelops/go-common v1.0.19
github.com/kelseyhightower/envconfig v1.4.0
+ github.com/kuberhealthy/kuberhealthy/v2 v2.7.1
github.com/nats-io/nats.go v1.27.1
github.com/pkg/errors v0.9.1
+ github.com/robfig/cron/v3 v3.0.1
github.com/sirupsen/logrus v1.9.3
- github.com/vijeyash1/go-github-container v1.0.0
- golang.org/x/term v0.10.0
+ github.com/spf13/cobra v1.7.0
+ github.com/vektah/gqlparser/v2 v2.5.10
+ github.com/zegl/kube-score v1.17.0
+ go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.46.1
+ go.opentelemetry.io/otel v1.21.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
+ go.opentelemetry.io/otel/sdk v1.21.0
+ golang.org/x/term v0.14.0
k8s.io/api v0.27.3
k8s.io/apimachinery v0.27.3
k8s.io/cli-runtime v0.27.3
@@ -31,8 +43,12 @@ require (
)
require (
+ cloud.google.com/go v0.110.7 // indirect
+ cloud.google.com/go/iam v1.1.1 // indirect
+ cloud.google.com/go/storage v1.30.1 // indirect
github.com/ClickHouse/ch-go v0.52.1 // indirect
github.com/CycloneDX/cyclonedx-go v0.7.2-0.20230625092137-07e2f29defc3 // indirect
+ github.com/agnivade/levenshtein v1.1.1 // indirect
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect
github.com/andybalholm/brotli v1.0.5 // indirect
github.com/aquasecurity/go-dep-parser v0.0.0-20230626110909-e7ea5097483b // indirect
@@ -41,6 +57,7 @@ require (
github.com/aquasecurity/trivy-db v0.0.0-20230703082116-dc52e83376ce // indirect
github.com/aquasecurity/trivy-kubernetes v0.5.7-0.20230628140707-dae3bdb6ee81 // indirect
github.com/bytedance/sonic v1.9.1 // indirect
+ github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
@@ -48,13 +65,14 @@ require (
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
- github.com/fatih/color v1.14.1 // indirect
+ github.com/fatih/color v1.15.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-faster/city v1.0.1 // indirect
github.com/go-faster/errors v0.6.1 // indirect
- github.com/go-logr/logr v1.2.4 // indirect
+ github.com/go-logr/logr v1.3.0 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.1 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
@@ -66,20 +84,26 @@ require (
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
- github.com/google/go-cmp v0.5.9 // indirect
+ github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-containerregistry v0.15.2 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
+ github.com/googleapis/gax-go/v2 v2.11.0 // indirect
+ github.com/gorilla/websocket v1.5.0 // indirect
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
+ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/hashicorp/go-multierror v1.1.1 // indirect
+ github.com/hashicorp/golang-lru/v2 v2.0.3 // indirect
github.com/imdario/mergo v0.3.15 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/invopop/yaml v0.1.0 // indirect
+ github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.16.5 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/knqyf263/go-rpm-version v0.0.0-20220614171824-631e686d1075 // indirect
- github.com/kr/pretty v0.3.1 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@@ -87,6 +111,7 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
+ github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
@@ -106,12 +131,12 @@ require (
github.com/pierrec/lz4/v4 v4.1.17 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
- github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/samber/lo v1.38.1 // indirect
github.com/segmentio/asm v1.2.0 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
+ github.com/showa-93/go-mask v0.6.0 // indirect
+ github.com/sosodev/duration v1.1.0 // indirect
github.com/spdx/tools-golang v0.5.0 // indirect
- github.com/spf13/cobra v1.7.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.0 // indirect
github.com/stretchr/testify v1.8.4 // indirect
@@ -119,23 +144,27 @@ require (
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/xlab/treeprint v1.1.0 // indirect
go.etcd.io/bbolt v1.3.7 // indirect
- go.opentelemetry.io/otel v1.14.0 // indirect
- go.opentelemetry.io/otel/trace v1.14.0 // indirect
+ go.opentelemetry.io/otel/metric v1.21.0 // indirect
+ go.opentelemetry.io/otel/trace v1.21.0 // indirect
+ go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
go.uber.org/atomic v1.10.0 // indirect
- go.uber.org/goleak v1.2.1 // indirect
go.uber.org/multierr v1.9.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/arch v0.3.0 // indirect
- golang.org/x/crypto v0.10.0 // indirect
+ golang.org/x/crypto v0.15.0 // indirect
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect
- golang.org/x/net v0.11.0 // indirect
- golang.org/x/oauth2 v0.7.0 // indirect
- golang.org/x/sys v0.10.0 // indirect
- golang.org/x/text v0.10.0 // indirect
+ golang.org/x/net v0.18.0 // indirect
+ golang.org/x/oauth2 v0.11.0 // indirect
+ golang.org/x/sys v0.14.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
+ google.golang.org/api v0.126.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb // indirect
+ google.golang.org/grpc v1.59.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/go.sum b/go.sum
index babc6d30..4e586f75 100644
--- a/go.sum
+++ b/go.sum
@@ -1,16 +1,67 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
-cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o=
+cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
-cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k=
-cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y=
+cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM=
+cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/99designs/gqlgen v0.17.42 h1:BVWDOb2VVHQC5k3m6oa0XhDnxltLLrU4so7x/u39Zu4=
+github.com/99designs/gqlgen v0.17.42/go.mod h1:GQ6SyMhwFbgHR0a8r2Wn8fYgEwPxxmndLFPhU63+cJE=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
+github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ClickHouse/ch-go v0.52.1 h1:nucdgfD1BDSHjbNaG3VNebonxJzD8fX8jbuBpfo5VY0=
github.com/ClickHouse/ch-go v0.52.1/go.mod h1:B9htMJ0hii/zrC2hljUKdnagRBuLqtRG/GrU3jqCwRk=
+github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0=
github.com/ClickHouse/clickhouse-go/v2 v2.10.1 h1:WCnusqEeCO/9sLFVIv57le/O1ydUb+x9+SYYhJ11fsY=
github.com/ClickHouse/clickhouse-go/v2 v2.10.1/go.mod h1:teXfZNM90iQ99Jnuht+dxQXCuhDZ8nvvMoTJOFrcmcg=
github.com/CycloneDX/cyclonedx-go v0.7.2-0.20230625092137-07e2f29defc3 h1:NqeV+ZMqpcosu0Xg2VW14Ru9ayBs/toe2oihS7sN6Xo=
@@ -24,16 +75,24 @@ github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
+github.com/Pallinder/go-randomdata v1.1.0/go.mod h1:yHmJgulpD2Nfrm0cR9tI/+oAgRqCQQixsA8HyRZfV9Y=
github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903 h1:ZK3C5DtzV2nVAQTx5S5jQvMeDqWtD1By5mOoyY/xJek=
+github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
+github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/alecthomas/chroma v0.10.0 h1:7XDcGkCQopCNKjZHfYrNLraA+M7e0fMiJ/Mfikbfjek=
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc=
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/andybalholm/cascadia v1.3.1 h1:nhxRkql1kdYCc8Snf7D5/D3spOX+dBgjA6u8x004T2c=
github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU=
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
github.com/aquasecurity/bolt-fixtures v0.0.0-20200903104109-d34e7f983986 h1:2a30xLN2sUZcMXl50hg+PJCIDdJgIvIbVcKqLJ/ZrtM=
@@ -50,8 +109,13 @@ github.com/aquasecurity/trivy-db v0.0.0-20230703082116-dc52e83376ce h1:swoQLWQoZ
github.com/aquasecurity/trivy-db v0.0.0-20230703082116-dc52e83376ce/go.mod h1:cXuqKo+FaMY0ixJNoUcyDHdfCBRPWOysI2Td8N4fRsg=
github.com/aquasecurity/trivy-kubernetes v0.5.7-0.20230628140707-dae3bdb6ee81 h1:5/tKpCr861auON/CMHSXnRzNixx1FTWAeHSwV0PtA0U=
github.com/aquasecurity/trivy-kubernetes v0.5.7-0.20230628140707-dae3bdb6ee81/go.mod h1:GCm7uq++jz7Ij8cA9mAorpKJ9/qSBCl7v6EKYA8DxJ8=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
+github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
+github.com/aws/aws-sdk-go v1.25.24/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.44.245 h1:KtY2s4q31/kn33AdV63R5t77mdxsI7rq3YT7Mgo805M=
+github.com/aws/aws-sdk-go v1.44.245/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -65,6 +129,9 @@ github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s=
github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
+github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=
@@ -76,6 +143,11 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs=
+github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/codingsince1985/checksum v1.1.0/go.mod h1:oOS5kmF4DPKaeMAT7CCXcvakSB7s08Om5FdZTwV/CEk=
github.com/containerd/containerd v1.2.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg=
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -90,6 +162,10 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
+github.com/denverdino/aliyungo v0.0.0-20191023002520-dba750c0c223/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
+github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
+github.com/dhui/dktest v0.3.16 h1:i6gq2YQEtcrjKbeJpBkWjE8MmLZPYllcjOFbTZuPDnw=
github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E=
github.com/docker/cli v0.0.0-20190913211141-95327f4e6241/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v23.0.5+incompatible h1:ufWmAOuD3Vmr7JP2G5K3cyuNC4YZWiAsuDEvFVVDafE=
@@ -110,17 +186,28 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ=
github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
-github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
-github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
+github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
+github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e/go.mod h1:2H9hjfbpSMHwY503FclkV/lZTBh2YlOmLLSda12uL8c=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/genuinetools/pkg v0.0.0-20181022210355-2fcf164d37cb/go.mod h1:XTcrCYlXPxnxL2UpnwuRn7tcaTn9HAhxFoFJucootk8=
@@ -145,16 +232,30 @@ github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4=
github.com/go-git/go-git/v5 v5.7.0 h1:t9AudWVLmqzlo+4bqdf7GY+46SUuRsx59SboFxkq2aE=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gorp/gorp/v3 v3.0.5 h1:PUjzYdYu3HBOh8LE+UUmRG2P0IRDak9XMeGNvaeq4Ow=
+github.com/go-ini/ini v1.49.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8=
github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
@@ -168,6 +269,7 @@ github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QX
github.com/go-playground/webhooks/v6 v6.2.0 h1:SV/Euz3xoTc7LQanUtXaYhVQU0rw4DaxNhNKOBZ90JI=
github.com/go-playground/webhooks/v6 v6.2.0/go.mod h1:GCocmfMtpJdkEOM1uG9p2nXzg1kY5X/LtvQgtPHUaaA=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
@@ -178,23 +280,44 @@ github.com/gogits/go-gogs-client v0.0.0-20200905025246-8bb8a50cb355/go.mod h1:cY
github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-migrate/migrate/v4 v4.16.2 h1:8coYbMKUyInrFk1lfGfRovTLAW7PhWp8qQDT2iKfuoA=
+github.com/golang-migrate/migrate/v4 v4.16.2/go.mod h1:pfcJX4nPHaVdc5nmdCikFBWtm+UBpiZjRNNsyBbp0/o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
@@ -203,55 +326,117 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.15.2 h1:MMkSh+tjSdnmJZO7ljvEqV1DjfekB6VUEAZgy3a+TQE=
github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/licenseclassifier/v2 v2.0.0 h1:1Y57HHILNf4m0ABuMVb6xk4vAJYEUO0gDxNpog0pyeA=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
-github.com/google/s2a-go v0.1.3 h1:FAgZmpLl/SXurPEZyCMPBIiiYeTbqfjlbdnCNTAkbGE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
+github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
-github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4=
+github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.11.1/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-getter v1.7.1 h1:SWiSWN/42qdpR0MdhaOc/bLR48PLuP1ZQtYLRlM69uY=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru/v2 v2.0.3 h1:kmRrRLlInXvng0SmLxmQpQkpbYAvcXm7NPDrgxJa9mE=
+github.com/hashicorp/golang-lru/v2 v2.0.3/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl/v2 v2.14.1 h1:x0BpjfZ+CYdbiz+8yZTQ+gdLO7IXvOut7Da+XJayx34=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/influxdata/influxdb1-client v0.0.0-20190402204710-8ff2fc3824fc/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
+github.com/integrii/flaggy v1.2.2/go.mod h1:tnTxHeTJbah0gQ6/K0RW0J7fMUBk9MCF5blhm43LNpI=
+github.com/intelops/go-common v1.0.19 h1:K53TIISpeTONS4g1squwGvo+1wmSbv2Kqp31mpw1090=
+github.com/intelops/go-common v1.0.19/go.mod h1:GDDr2xP2uqtjMgATC4BLDt29kC7W9R3EW+8Du2LlNt8=
github.com/invopop/yaml v0.1.0 h1:YW3WGUoJEXYfzWBjn00zIlrw7brGVD0fUKRYDPAPhrc=
github.com/invopop/yaml v0.1.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
@@ -267,16 +452,19 @@ github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8t
github.com/knqyf263/go-rpm-version v0.0.0-20220614171824-631e686d1075 h1:aC6MEAs3PE3lWD7lqrJfDxHd6hcced9R4JTZu85cJwU=
github.com/knqyf263/go-rpm-version v0.0.0-20220614171824-631e686d1075/go.mod h1:i4sF0l1fFnY1aiw08QQSwVAFxHEm311Me3WsU/X7nL0=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
-github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kuberhealthy/kuberhealthy/v2 v2.7.1 h1:N7kVL1HbO2zEu+zkcMa5oXQDhij9De38ZYL+p959bgk=
+github.com/kuberhealthy/kuberhealthy/v2 v2.7.1/go.mod h1:NRbBdxZLgIf+cy8PUau9q74nOqPLeC9HWr5faYKd9G8=
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
@@ -304,19 +492,25 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/moby/buildkit v0.11.6 h1:VYNdoKk5TVxN7k4RvZgdeM4GOyRvIi4Z8MXOY7xvyUs=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
+github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
@@ -326,8 +520,10 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/nats-io/jwt/v2 v2.4.1 h1:Y35W1dgbbz2SQUYDPCaclXcuqleVmpbRa7646Jf2EX4=
github.com/nats-io/nats-server/v2 v2.9.19 h1:OF9jSKZGo425C/FcVVIvNgpd36CUe7aVTTXEZRJk6kA=
github.com/nats-io/nats-server/v2 v2.9.19/go.mod h1:aTb/xtLCGKhfTFLxP591CMWfkdgBmcUUSkiSOe5A3gw=
@@ -337,8 +533,22 @@ github.com/nats-io/nkeys v0.4.4 h1:xvBJ8d69TznjcQl9t6//Q5xXuVhyYiSos6RPtvQNTwA=
github.com/nats-io/nkeys v0.4.4/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E=
github.com/open-policy-agent/opa v0.45.0 h1:P5nuhVRtR+e58fk3CMMbiqr6ZFyWQPNOC3otsorGsFs=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -370,6 +580,7 @@ github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsK
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
@@ -383,9 +594,9 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
-github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rubenv/sql-migrate v1.3.1 h1:Vx+n4Du8X8VTYuXbhNxdEUoh6wiJERA0GlWocR5FrbA=
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
@@ -395,31 +606,42 @@ github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM=
github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
-github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
+github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/showa-93/go-mask v0.6.0 h1:nNW3dgEocYB7QCGzgRx9wlYrepEg+tRw/keg7u1ftY8=
+github.com/showa-93/go-mask v0.6.0/go.mod h1:aswIj007gm0EPAzOGES9ACy1jDm3QT08/LPSClMp410=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skeema/knownhosts v1.1.1 h1:MTk78x9FPgDFVFkDLTrsnnfCJl7g1C/nnKvePgrIngE=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/sosodev/duration v1.1.0 h1:kQcaiGbJaIsRqgQy7VGlZrVw1giWO+lDoX3MCPnpVO4=
+github.com/sosodev/duration v1.1.0/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM=
github.com/spdx/tools-golang v0.5.0 h1:/fqihV2Jna7fmow65dHpgKNsilgLK7ICpd2tkCnPEyY=
github.com/spdx/tools-golang v0.5.0/go.mod h1:kkGlrSXXfHwuSzHQZJRV3aKu9ZXCq/MSf2+xyiJH1lM=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -440,8 +662,8 @@ github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
-github.com/vijeyash1/go-github-container v1.0.0 h1:SWtzxwGFFSCn8UB27IMcCbQ9xg1l6sQgk3pW2aD0fsQ=
-github.com/vijeyash1/go-github-container v1.0.0/go.mod h1:yljHpWvbjXtjy48MXoBonmrTBUYNk8iA0cACfyU0Om4=
+github.com/vektah/gqlparser/v2 v2.5.10 h1:6zSM4azXC9u4Nxy5YmdmGu4uKamfwsdKTwp5zsEealU=
+github.com/vektah/gqlparser/v2 v2.5.10/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
@@ -453,25 +675,50 @@ github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk=
github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/yashtewari/glob-intersection v0.1.0 h1:6gJvMYQlTDOL3dMsPF6J0+26vwX9MB8/1q3uAdhmTrg=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zclconf/go-cty v1.10.0 h1:mp9ZXQeIcN8kAwuqorjH+Q+njbJKjLrvB2yIh4q7U+0=
github.com/zclconf/go-cty-yaml v1.0.2 h1:dNyg4QLTrv2IfJpm7Wtxi55ed5gLGOlPrZ6kMd51hY0=
+github.com/zegl/kube-score v1.17.0 h1:vedzK0pm5yOb1ocm5gybMNYsJRG8iTAatbo3LFIWbUc=
+github.com/zegl/kube-score v1.17.0/go.mod h1:0pt4Lt36uTKPiCQbXQFow29eaAbgMLI9RoESjBoGSq0=
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.mongodb.org/mongo-driver v1.11.1/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
-go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM=
-go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
-go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M=
-go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
+go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.46.1 h1:mMv2jG58h6ZI5t5S9QCVGdzCmAsTakMa3oxVgpSD44g=
+go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.46.1/go.mod h1:oqRuNKG0upTaDPbLVCG8AD0G2ETrfDtmh7jViy7ox6M=
+go.opentelemetry.io/contrib/propagators/b3 v1.21.1 h1:WPYiUgmw3+b7b3sQ1bFBFAf0q+Di9dvNc3AtYfnT4RQ=
+go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
+go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
+go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
+go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
+go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
+go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
+go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
+go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
+go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
+go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
-go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
-go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
@@ -479,73 +726,217 @@ go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
-golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
+golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
+golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU=
-golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
+golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
-golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
+golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
-golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
+golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
-golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
+golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
-golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -553,40 +944,172 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-google.golang.org/api v0.121.0 h1:8Oopoo8Vavxx6gt+sgs8s8/X60WBAtKQq6JqnkF+xow=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
+google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o=
+google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb h1:Isk1sSH7bovx8Rti2wZK0UZF6oraBDK74uoyLEEVFN0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
+google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
@@ -595,21 +1118,29 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -618,34 +1149,54 @@ gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
helm.sh/helm/v3 v3.12.1 h1:lzU7etZX24A6BTMXYQF3bFq0ECfD8s+fKlNBBL8AbEc=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg=
k8s.io/api v0.27.3 h1:yR6oQXXnUEBWEWcvPWS0jQL575KoAboQPfJAuKNrw5Y=
k8s.io/api v0.27.3/go.mod h1:C4BNvZnQOF7JA/0Xed2S+aUyJSfTGkGFxLXz9MnpIpg=
k8s.io/apiextensions-apiserver v0.27.2 h1:iwhyoeS4xj9Y7v8YExhUwbVuBhMr3Q4bd/laClBV6Bo=
+k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI=
k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM=
k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=
k8s.io/apiserver v0.27.2 h1:p+tjwrcQEZDrEorCZV2/qE8osGTINPuS5ZNqWAvKm5E=
k8s.io/cli-runtime v0.27.3 h1:h592I+2eJfXj/4jVYM+tu9Rv8FEc/dyCoD80UJlMW2Y=
k8s.io/cli-runtime v0.27.3/go.mod h1:LzXud3vFFuDFXn2LIrWnscPgUiEj7gQQcYZE2UPn9Kw=
+k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU=
k8s.io/client-go v0.27.3 h1:7dnEGHZEJld3lYwxvLl7WoehK6lAq7GvgjxpA3nv1E8=
k8s.io/client-go v0.27.3/go.mod h1:2MBEKuTo6V1lbKy3z1euEGnhPfGZLKTS9tiJ2xodM48=
k8s.io/component-base v0.27.2 h1:neju+7s/r5O4x4/txeUONNTS9r1HsPbyoPBAtHsDCpo=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/kops v1.11.0/go.mod h1:Rj0HgVofTwl4lTemGYjf2uUNLoNsEZMhdmt8jG74xLI=
+k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg=
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
k8s.io/kubectl v0.27.2 h1:sSBM2j94MHBFRWfHIWtEXWCicViQzZsb177rNsKBhZg=
+k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk=
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go v1.2.2 h1:0E9tOHUfrNH7TCDk5KU0jVBEzCqbfdyuVfGmJ7ZeRPE=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.13.2 h1:kejWfLeJhUsTGioDoFNJET5LQe/ajzXhJGYoU+pJsiA=
sigs.k8s.io/kustomize/api v0.13.2/go.mod h1:DUp325VVMFVcQSq+ZxyDisA8wtldwHxLZbr1g94UHsw=
sigs.k8s.io/kustomize/kyaml v0.14.1 h1:c8iibius7l24G2wVAGZn/Va2wNys03GXLjYVIcFVxKA=
sigs.k8s.io/kustomize/kyaml v0.14.1/go.mod h1:AN1/IpawKilWD7V+YvQwRGUvuUOOWpjsHu6uHwonSF4=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/grafana/azure-dashboard.json b/grafana/azure-dashboard.json
new file mode 100644
index 00000000..b90566c4
--- /dev/null
+++ b/grafana/azure-dashboard.json
@@ -0,0 +1,499 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 53,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 2,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "// Check if data.series exists\nif (context.panel.data.series && context.panel.data.series.length > 0) {\n const eventTypes = context.panel.data.series[0].fields[0].values;\n const authors = context.panel.data.series[0].fields[1].values;\n const repoNames = context.panel.data.series[0].fields[2].values;\n const total = context.panel.data.series[0].fields[3].values; // Assuming you have a field named \"Total\"\n\n // Create nodes and links\n const nodes = [];\n const links = [];\n\n authors.forEach((author, index) => {\n const sourceNode = {\n name: author,\n category: 0, // Category for source nodes\n symbolSize: 60, // Size for source nodes\n };\n\n const eventTypeNode = {\n name: eventTypes[index],\n category: 1, // Category for eventType nodes\n symbolSize: 40, // Size for eventType nodes\n };\n\n const repoNode = {\n name: repoNames[index],\n category: 2, // Category for repo nodes\n symbolSize: 30, // Size for repo nodes\n };\n\n const totalNode = {\n name: `Total: ${total[index]}`, // Assuming you have an array \"total\"\n category: 3, // Category for total nodes\n symbolSize: 20, // Size for total nodes\n };\n\n // Ensure source, eventType, repo, and total nodes are unique before adding them\n if (!nodes.some((node) => node.name === sourceNode.name)) {\n nodes.push(sourceNode);\n }\n\n if (!nodes.some((node) => node.name === eventTypeNode.name)) {\n nodes.push(eventTypeNode);\n }\n\n if (!nodes.some((node) => node.name === repoNode.name)) {\n nodes.push(repoNode);\n }\n\n if (!nodes.some((node) => node.name === totalNode.name)) {\n nodes.push(totalNode);\n }\n\n // Create links between author, eventType, repo, and total nodes\n links.push({\n source: author,\n target: eventTypes[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: repoNames[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: totalNode.name,\n });\n });\n\n // Create categories for nodes\n const categories = [\n {\n name: 'Authors',\n },\n {\n name: 'Event Type',\n },\n {\n name: 'Repo Names',\n },\n {\n name: 'Total', // Add a category for \"Total\" nodes\n },\n ];\n\n // Create ECharts option\n const option = {\n tooltip: {\n trigger: 'item',\n formatter: '{b}',\n },\n legend: {\n x: 'left',\n data: ['Authors', 'Event Types', 'Repo Names', 'Total'], // Add \"Total\" to legend data\n },\n series: [\n {\n type: 'graph',\n layout: 'circular',\n roam: true,\n label: {\n show: true,\n textStyle: { // Add textStyle property to configure text style\n color: '#000', // Set the text color to a brighter color, such as white (#FFF)\n },\n },\n force: {\n repulsion: 100,\n gravity: 0.1,\n edgeLength: 150,\n },\n data: nodes,\n links: links,\n draggable: true,\n categories: categories,\n edgeSymbol: [\"none\", \"arrow\"],\n\n // Increase the size of arrow marks\n edgeSymbolSize: [12, 12], // Set the arrow size here\n lineStyle: {\n color: \"#000000\",\n curveness: 0,\n opacity: 0.3,\n },\n },\n ],\n };\n\n // Return the ECharts option\n return option;\n} else {\n // Display a custom message when data is not available\n const option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n\n return option;\n}",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"azure_devops\" \nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author IN ($Author)\nGROUP BY EventType, Author, RepoName",
+ "rawQuery": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"azure_devops\" \nWHERE TimeStamp >= toDateTime(1694534229) AND TimeStamp <= toDateTime(1694620629) AND EventType IN ('git.push') AND Author IN ('Anila Soman')\nGROUP BY EventType, Author, RepoName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Azure Contributions",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 10
+ },
+ "id": 5,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let options; // Initialize the options variable\n\nif (!context.panel.data || !context.panel.data.series || context.panel.data.series.length === 0 || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n options = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract Author and Push_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const pushEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Push_Events').values;\n\n // Create the ECharts options\n options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Push_Events'],\n orient: 'vertical', // Change the orientation to vertical\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: pushEvents,\n type: 'line',\n areaStyle: {\n color: 'rgba(0, 128, 255, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'blue', // Set the line color\n },\n name: 'Push_Events',\n },\n ],\n };\n}\n\nreturn options;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Push_Events\nFROM default.azure_devops\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'git.push'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Push_Events\nFROM default.azure_devops\nWHERE TimeStamp >= toDateTime(1694534188) AND TimeStamp <= toDateTime(1694620588) AND EventType = 'git.push'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of Azure push events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 10
+ },
+ "id": 6,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\n// Define a default options object\nconst defaultOptions = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n};\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = defaultOptions;\n} else {\n // Extract Author and Merge_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const mergeEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Merge_Events').values;\n\n // Create the ECharts options\n const options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Merge_Events'],\n orient: 'vertical',\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: mergeEvents,\n type: 'line',\n name: 'Merge_Events',\n areaStyle: {\n color: 'rgba(255, 0, 0, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'red', // Set the line color\n },\n },\n ],\n };\n\n option = options; // Assign the options to the outer variable\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Merge_Events\nFROM default.azure_devops \nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'git.pullrequest.merged'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Merge_Events\nFROM default.azure_devops \nWHERE TimeStamp >= toDateTime(1694534208) AND TimeStamp <= toDateTime(1694620608) AND EventType = 'git.pullrequest.merged'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of Azure Merge events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "light-red",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 18
+ },
+ "id": 3,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'git.push'",
+ "rawQuery": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE TimeStamp >= toDateTime(1694534146) AND TimeStamp <= toDateTime(1694620546) AND EventType = 'git.push'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Azure Push event counts",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "light-red",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 12,
+ "y": 18
+ },
+ "id": 4,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'git.pullrequest.merged'",
+ "rawQuery": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE TimeStamp >= toDateTime(1694534169) AND TimeStamp <= toDateTime(1694620569) AND EventType = 'git.pullrequest.merged'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Azure Merge events count",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 24
+ },
+ "id": 1,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.azure_devops\nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author IN ($Author)",
+ "rawQuery": "SELECT * FROM default.azure_devops\nWHERE TimeStamp >= toDateTime(1694534128) AND TimeStamp <= toDateTime(1694620528) AND EventType IN ('git.push') AND Author IN ('Anila Soman')",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Azure Events",
+ "type": "table"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "definition": "SELECT EventType FROM default.azure_devops",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "eventType",
+ "options": [],
+ "query": "SELECT EventType FROM default.azure_devops",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "definition": "SELECT Author FROM default.azure_devops",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "Author",
+ "options": [],
+ "query": "SELECT Author FROM default.azure_devops",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "Azure",
+ "uid": "dd66838a-ffda-4de2-944f-1828d1671fc9",
+ "version": 2,
+ "weekStart": ""
+}
diff --git a/grafana/bitBucket-dashboard.json b/grafana/bitBucket-dashboard.json
new file mode 100644
index 00000000..63d77351
--- /dev/null
+++ b/grafana/bitBucket-dashboard.json
@@ -0,0 +1,498 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 60,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 2,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "// Check if data.series exists\nif (context.panel.data.series && context.panel.data.series.length > 0) {\n const eventTypes = context.panel.data.series[0].fields[0].values;\n const authors = context.panel.data.series[0].fields[1].values;\n const repoNames = context.panel.data.series[0].fields[2].values;\n const total = context.panel.data.series[0].fields[3].values; // Assuming you have a field named \"Total\"\n\n // Create nodes and links\n const nodes = [];\n const links = [];\n\n authors.forEach((author, index) => {\n const sourceNode = {\n name: author,\n category: 0, // Category for source nodes\n symbolSize: 60, // Size for source nodes\n };\n\n const eventTypeNode = {\n name: eventTypes[index],\n category: 1, // Category for eventType nodes\n symbolSize: 40, // Size for eventType nodes\n };\n\n const repoNode = {\n name: repoNames[index],\n category: 2, // Category for repo nodes\n symbolSize: 30, // Size for repo nodes\n };\n\n const totalNode = {\n name: `Total: ${total[index]}`, // Assuming you have an array \"total\"\n category: 3, // Category for total nodes\n symbolSize: 20, // Size for total nodes\n };\n\n // Ensure source, eventType, repo, and total nodes are unique before adding them\n if (!nodes.some((node) => node.name === sourceNode.name)) {\n nodes.push(sourceNode);\n }\n\n if (!nodes.some((node) => node.name === eventTypeNode.name)) {\n nodes.push(eventTypeNode);\n }\n\n if (!nodes.some((node) => node.name === repoNode.name)) {\n nodes.push(repoNode);\n }\n\n if (!nodes.some((node) => node.name === totalNode.name)) {\n nodes.push(totalNode);\n }\n\n // Create links between author, eventType, repo, and total nodes\n links.push({\n source: author,\n target: eventTypes[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: repoNames[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: totalNode.name,\n });\n });\n\n // Create categories for nodes\n const categories = [\n {\n name: 'Authors',\n },\n {\n name: 'Event Type',\n },\n {\n name: 'Repo Names',\n },\n {\n name: 'Total', // Add a category for \"Total\" nodes\n },\n ];\n\n // Create ECharts option\n const option = {\n tooltip: {\n trigger: 'item',\n formatter: '{b}',\n },\n legend: {\n x: 'left',\n data: ['Authors', 'Event Types', 'Repo Names', 'Total'], // Add \"Total\" to legend data\n },\n series: [\n {\n type: 'graph',\n layout: 'circular',\n roam: true,\n label: {\n show: true,\n textStyle: { // Add textStyle property to configure text style\n color: '#000', // Set the text color to a brighter color, such as white (#FFF)\n },\n },\n force: {\n repulsion: 100,\n gravity: 0.1,\n edgeLength: 150,\n },\n data: nodes,\n links: links,\n draggable: true,\n categories: categories,\n edgeSymbol: [\"none\", \"arrow\"],\n\n // Increase the size of arrow marks\n edgeSymbolSize: [12, 12], // Set the arrow size here\n lineStyle: {\n color: \"#000000\",\n curveness: 0,\n opacity: 0.3,\n },\n },\n ],\n };\n\n // Return the ECharts option\n return option;\n} else {\n // Display a custom message when data is not available\n const option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n\n return option;\n}",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"bitbucket\" \nWHERE $timeFilterByColumn(TimeStamp) AND EventType In ($eventType) AND Author IN ($Author)\nGROUP BY EventType, Author, RepoName",
+ "rawQuery": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"bitbucket\" \nWHERE TimeStamp >= toDateTime(1694536440) AND TimeStamp <= toDateTime(1694622840) AND EventType In ('repo:push','pullrequest:created','pullrequest:fulfilled') AND Author IN ('')\nGROUP BY EventType, Author, RepoName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "BitBucket Events",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 9
+ },
+ "id": 5,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let options; // Initialize the options variable\n\nif (!context.panel.data || !context.panel.data.series || context.panel.data.series.length === 0 || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n options = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract Author and Push_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const pushEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Push_Events').values;\n\n // Create the ECharts options\n options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Push_Events'],\n orient: 'vertical', // Change the orientation to vertical\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: pushEvents,\n type: 'line',\n areaStyle: {\n color: 'rgba(0, 128, 255, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'blue', // Set the line color\n },\n name: 'Push_Events',\n },\n ],\n };\n}\n\nreturn options;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Push_Events\nFROM default.bitbucket\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'repo:push'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Push_Events\nFROM default.bitbucket\nWHERE TimeStamp >= toDateTime(1694536383) AND TimeStamp <= toDateTime(1694622783) AND EventType = 'repo:push'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of BitBucket Push events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 9
+ },
+ "id": 6,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\n// Define a default options object\nconst defaultOptions = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n};\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = defaultOptions;\n} else {\n // Extract Author and Merge_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const mergeEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Merge_Events').values;\n\n // Create the ECharts options\n const options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Merge_Events'],\n orient: 'vertical',\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: mergeEvents,\n type: 'line',\n name: 'Merge_Events',\n areaStyle: {\n color: 'rgba(255, 0, 0, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'red', // Set the line color\n },\n },\n ],\n };\n\n option = options; // Assign the options to the outer variable\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Merge_Events\nFROM default.bitbucket\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'pullrequest:fulfilled'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Merge_Events\nFROM default.bitbucket\nWHERE TimeStamp >= toDateTime(1694536421) AND TimeStamp <= toDateTime(1694622821) AND EventType = 'pullrequest:fulfilled'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of BitBucket Merge events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "light-blue",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 17
+ },
+ "id": 3,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'repo:push'",
+ "rawQuery": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE TimeStamp >= toDateTime(1694536343) AND TimeStamp <= toDateTime(1694622743) AND EventType = 'repo:push'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "BitBucket Push Events Count",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "light-blue",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 12,
+ "y": 17
+ },
+ "id": 4,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'pullrequest:fulfilled'",
+ "rawQuery": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE TimeStamp >= toDateTime(1694536364) AND TimeStamp <= toDateTime(1694622764) AND EventType = 'pullrequest:fulfilled'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "BitBucket Merge events count",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 24,
+ "x": 0,
+ "y": 23
+ },
+ "id": 1,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.bitbucket\nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author In ($Author)",
+ "rawQuery": "SELECT * FROM default.bitbucket\nWHERE TimeStamp >= toDateTime(1694536323) AND TimeStamp <= toDateTime(1694622723) AND EventType IN ('repo:push','pullrequest:created','pullrequest:fulfilled') AND Author In ('')",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "BitBucket Events",
+ "type": "table"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "definition": "SELECT EventType FROM default.bitbucket",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "eventType",
+ "options": [],
+ "query": "SELECT EventType FROM default.bitbucket",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "definition": "SELECT Author FROM default.bitbucket",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "Author",
+ "options": [],
+ "query": "SELECT Author FROM default.bitbucket",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "BitBucket",
+ "uid": "a7772dd5-76c7-48f3-8462-b39fbc20941c",
+ "version": 2,
+ "weekStart": ""
+}
diff --git a/grafana/containerBridge-dashboard.json b/grafana/containerBridge-dashboard.json
new file mode 100644
index 00000000..7ebca535
--- /dev/null
+++ b/grafana/containerBridge-dashboard.json
@@ -0,0 +1,329 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 10,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 4,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.quaycontainerpush\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime",
+ "rawQuery": "SELECT * FROM default.quaycontainerpush\nWHERE EventTime >= toDateTime(1694165676) AND EventTime <= toDateTime(1694252076)\nORDER BY EventTime",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Quay Container Registry",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 8
+ },
+ "id": 3,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.jfrogcontainerpush\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.jfrogcontainerpush\nWHERE EventTime >= toDateTime(1694226084) AND EventTime <= toDateTime(1694247684)\nORDER BY EventTime DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "JFrog Container Registry",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 16
+ },
+ "id": 2,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.azurecontainerpush\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.azurecontainerpush\nWHERE EventTime >= toDateTime(1694165623) AND EventTime <= toDateTime(1694252023)\nORDER BY EventTime DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Azure Container Registry",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 24
+ },
+ "id": 1,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.dockerhubbuild\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC\n",
+ "rawQuery": "SELECT * FROM default.dockerhubbuild\nWHERE EventTime >= toDateTime(1694165564) AND EventTime <= toDateTime(1694251964)\nORDER BY EventTime DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "DockerHub",
+ "type": "table"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "Container-Bridge",
+ "uid": "cf8cf066-b241-48c8-9e3d-863eed33bcf3",
+ "version": 2,
+ "weekStart": ""
+}
\ No newline at end of file
diff --git a/grafana/giTea-dashboard.json b/grafana/giTea-dashboard.json
new file mode 100644
index 00000000..9dccd2af
--- /dev/null
+++ b/grafana/giTea-dashboard.json
@@ -0,0 +1,503 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 61,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 2,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "// Check if data.series exists\nif (context.panel.data.series && context.panel.data.series.length > 0) {\n const eventTypes = context.panel.data.series[0].fields[0].values;\n const authors = context.panel.data.series[0].fields[1].values;\n const repoNames = context.panel.data.series[0].fields[2].values;\n const total = context.panel.data.series[0].fields[3].values; // Assuming you have a field named \"Total\"\n\n // Create nodes and links\n const nodes = [];\n const links = [];\n\n authors.forEach((author, index) => {\n const sourceNode = {\n name: author,\n category: 0, // Category for source nodes\n symbolSize: 60, // Size for source nodes\n };\n\n const eventTypeNode = {\n name: eventTypes[index],\n category: 1, // Category for eventType nodes\n symbolSize: 40, // Size for eventType nodes\n };\n\n const repoNode = {\n name: repoNames[index],\n category: 2, // Category for repo nodes\n symbolSize: 30, // Size for repo nodes\n };\n\n const totalNode = {\n name: `Total: ${total[index]}`, // Assuming you have an array \"total\"\n category: 3, // Category for total nodes\n symbolSize: 20, // Size for total nodes\n };\n\n // Ensure source, eventType, repo, and total nodes are unique before adding them\n if (!nodes.some((node) => node.name === sourceNode.name)) {\n nodes.push(sourceNode);\n }\n\n if (!nodes.some((node) => node.name === eventTypeNode.name)) {\n nodes.push(eventTypeNode);\n }\n\n if (!nodes.some((node) => node.name === repoNode.name)) {\n nodes.push(repoNode);\n }\n\n if (!nodes.some((node) => node.name === totalNode.name)) {\n nodes.push(totalNode);\n }\n\n // Create links between author, eventType, repo, and total nodes\n links.push({\n source: author,\n target: eventTypes[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: repoNames[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: totalNode.name,\n });\n });\n\n // Create categories for nodes\n const categories = [\n {\n name: 'Authors',\n },\n {\n name: 'Event Type',\n },\n {\n name: 'Repo Names',\n },\n {\n name: 'Total', // Add a category for \"Total\" nodes\n },\n ];\n\n // Create ECharts option\n const option = {\n tooltip: {\n trigger: 'item',\n formatter: '{b}',\n },\n legend: {\n x: 'left',\n data: ['Authors', 'Event Types', 'Repo Names', 'Total'], // Add \"Total\" to legend data\n },\n series: [\n {\n type: 'graph',\n layout: 'circular',\n roam: true,\n label: {\n show: true,\n textStyle: { // Add textStyle property to configure text style\n color: '#000', // Set the text color to a brighter color, such as white (#FFF)\n },\n },\n force: {\n repulsion: 100,\n gravity: 0.1,\n edgeLength: 150,\n },\n data: nodes,\n links: links,\n draggable: true,\n categories: categories,\n edgeSymbol: [\"none\", \"arrow\"],\n\n // Increase the size of arrow marks\n edgeSymbolSize: [12, 12], // Set the arrow size here\n lineStyle: {\n color: \"#000000\",\n curveness: 0,\n opacity: 0.3,\n },\n },\n ],\n };\n\n // Return the ECharts option\n return option;\n} else {\n // Display a custom message when data is not available\n const option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n\n return option;\n}",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"gitea\" \nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author IN ($Author)\nGROUP BY EventType, Author, RepoName",
+ "rawQuery": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"gitea\" \nWHERE TimeStamp >= toDateTime(1694535218) AND TimeStamp <= toDateTime(1694621618) AND EventType IN ('push','pull_request') AND Author IN ('')\nGROUP BY EventType, Author, RepoName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GiTea Contributions",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 10
+ },
+ "id": 5,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let options; // Initialize the options variable\n\nif (!context.panel.data || !context.panel.data.series || context.panel.data.series.length === 0 || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n options = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract Author and Push_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const pushEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Push_Events').values;\n\n // Create the ECharts options\n options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Push_Events'],\n orient: 'vertical', // Change the orientation to vertical\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: pushEvents,\n type: 'line',\n areaStyle: {\n color: 'rgba(0, 128, 255, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'blue', // Set the line color\n },\n name: 'Push_Events',\n },\n ],\n };\n}\n\nreturn options;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Push_Events\nFROM default.gitea\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'push'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Push_Events\nFROM default.gitea\nWHERE TimeStamp >= toDateTime(1694535157) AND TimeStamp <= toDateTime(1694621557) AND EventType = 'push'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of Gitea push events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 10
+ },
+ "id": 6,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\n// Define a default options object\nconst defaultOptions = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n};\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = defaultOptions;\n} else {\n // Extract Author and Merge_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const mergeEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Merge_Events').values;\n\n // Create the ECharts options\n const options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Merge_Events'],\n orient: 'vertical',\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: mergeEvents,\n type: 'line',\n name: 'Merge_Events',\n areaStyle: {\n color: 'rgba(255, 0, 0, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'red', // Set the line color\n },\n },\n ],\n };\n\n option = options; // Assign the options to the outer variable\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Merge_Events\nFROM default.gitea\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'pull_request'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Merge_Events\nFROM default.gitea\nWHERE TimeStamp >= toDateTime(1694535179) AND TimeStamp <= toDateTime(1694621579) AND EventType = 'pull_request'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of Gitea Merge events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 0,
+ "y": 18
+ },
+ "id": 3,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS GiTea FROM default.gitea\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'push'",
+ "rawQuery": "SELECT count(*) AS GiTea FROM default.gitea\nWHERE TimeStamp >= toDateTime(1694535111) AND TimeStamp <= toDateTime(1694621511) AND EventType = 'push'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": false
+ }
+ ],
+ "title": "BitBucket Push event count",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 12,
+ "x": 12,
+ "y": 18
+ },
+ "id": 4,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS Gitea FROM default.gitea\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'pull_request'",
+ "rawQuery": "SELECT count(*) AS Gitea FROM default.gitea\nWHERE TimeStamp >= toDateTime(1694535137) AND TimeStamp <= toDateTime(1694621537) AND EventType = 'pull_request'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": false
+ }
+ ],
+ "title": "BitBucket Merge events count",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 24
+ },
+ "id": 1,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.gitea\nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author IN ($Author)",
+ "rawQuery": "SELECT * FROM default.gitea\nWHERE TimeStamp >= toDateTime(1694535089) AND TimeStamp <= toDateTime(1694621489) AND EventType IN ('push','pull_request') AND Author IN ('')",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GiTea Events",
+ "type": "table"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": true,
+ "text": [
+ "All"
+ ],
+ "value": [
+ "$__all"
+ ]
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "definition": "SELECT EventType FROM default.gitea",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "eventType",
+ "options": [],
+ "query": "SELECT EventType FROM default.gitea",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "definition": "SELECT Author FROM default.gitea",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "Author",
+ "options": [],
+ "query": "SELECT Author FROM default.gitea",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "GiTea",
+ "uid": "a1c6d705-91b0-4718-99b2-d93b0221bca9",
+ "version": 2,
+ "weekStart": ""
+}
diff --git a/grafana/gitBridge-dashboard.json b/grafana/gitBridge-dashboard.json
deleted file mode 100644
index ed3c6d0f..00000000
--- a/grafana/gitBridge-dashboard.json
+++ /dev/null
@@ -1,2227 +0,0 @@
-{
- "annotations": {
- "list": [
- {
- "builtIn": 1,
- "datasource": "-- Grafana --",
- "enable": true,
- "hide": true,
- "iconColor": "rgba(0, 211, 255, 1)",
- "name": "Annotations & Alerts",
- "target": {
- "limit": 100,
- "matchAny": false,
- "tags": [],
- "type": "dashboard"
- },
- "type": "dashboard"
- }
- ]
- },
- "editable": true,
- "fiscalYearStartMonth": 0,
- "graphTooltip": 0,
- "id": 9,
- "links": [],
- "liveNow": false,
- "panels": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 5,
- "x": 0,
- "y": 0
- },
- "id": 42,
- "options": {
- "colorMode": "background",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "intervalFactor": 1,
- "query": "SELECT count(*) AS GitHub FROM default.github\nWHERE EventType = 'push'",
- "rawQuery": "SELECT count(*) AS GitHub FROM default.github\nWHERE EventType = 'push'",
- "refId": "A",
- "round": "0s",
- "skip_comments": true
- }
- ],
- "title": "GITHUB Push events",
- "transparent": true,
- "type": "stat"
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "yellow",
- "value": null
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 4,
- "x": 5,
- "y": 0
- },
- "id": 43,
- "options": {
- "colorMode": "background",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "intervalFactor": 1,
- "query": "SELECT count(*) Gitlab FROM default.gitlab\nWHERE EventType = 'Push Hook'",
- "rawQuery": "SELECT count(*) Gitlab FROM default.gitlab\nWHERE EventType = 'Push Hook'",
- "refId": "A",
- "round": "0s",
- "skip_comments": true
- }
- ],
- "title": "GITLAB Push events",
- "transparent": true,
- "type": "stat"
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "light-blue",
- "value": null
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 5,
- "x": 9,
- "y": 0
- },
- "id": 44,
- "options": {
- "colorMode": "background",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "intervalFactor": 1,
- "query": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE EventType = 'repo:push'",
- "rawQuery": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE EventType = 'repo:push'",
- "refId": "A",
- "round": "0s",
- "skip_comments": true
- }
- ],
- "title": "BITBUCKET Push events",
- "transparent": true,
- "type": "stat"
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "orange",
- "value": null
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 5,
- "x": 14,
- "y": 0
- },
- "id": 45,
- "options": {
- "colorMode": "background",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "intervalFactor": 1,
- "query": "SELECT count(*) AS GiTea FROM default.gitea\nWHERE EventType = 'push'",
- "rawQuery": "SELECT count(*) AS GiTea FROM default.gitea\nWHERE EventType = 'push'",
- "refId": "A",
- "round": "0s",
- "skip_comments": true
- }
- ],
- "title": "GITEA Push events",
- "transparent": true,
- "type": "stat"
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "light-red",
- "value": null
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 5,
- "x": 19,
- "y": 0
- },
- "id": 46,
- "options": {
- "colorMode": "background",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE EventType = 'git.push'",
- "rawQuery": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE EventType = 'git.push'",
- "refId": "A",
- "round": "0s",
- "skip_comments": true
- }
- ],
- "title": "AZURE Push events",
- "transparent": true,
- "type": "stat"
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 5,
- "x": 0,
- "y": 5
- },
- "id": 37,
- "options": {
- "colorMode": "background",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "intervalFactor": 1,
- "query": "SELECT count(*) AS GitHub FROM default.github\nWHERE EventType = 'pull_request'",
- "rawQuery": "SELECT count(*) AS GitHub FROM default.github\nWHERE EventType = 'pull_request'",
- "refId": "A",
- "round": "0s",
- "skip_comments": true
- }
- ],
- "title": "GITHUB Merge events",
- "transparent": true,
- "type": "stat"
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "yellow",
- "value": null
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 4,
- "x": 5,
- "y": 5
- },
- "id": 38,
- "options": {
- "colorMode": "background",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "intervalFactor": 1,
- "query": "SELECT count(*) AS GitLab FROM default.gitlab\nWHERE EventType = 'Merge Request Hook'",
- "rawQuery": "SELECT count(*) AS GitLab FROM default.gitlab\nWHERE EventType = 'Merge Request Hook'",
- "refId": "A",
- "round": "0s",
- "skip_comments": true
- }
- ],
- "title": "GITLAB Merge events",
- "transparent": true,
- "type": "stat"
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "light-blue",
- "value": null
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 5,
- "x": 9,
- "y": 5
- },
- "id": 39,
- "options": {
- "colorMode": "background",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "intervalFactor": 1,
- "query": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE EventType = 'pullrequest:fulfilled'",
- "rawQuery": "SELECT count(*) AS BitBucket FROM default.bitbucket\nWHERE EventType = 'pullrequest:fulfilled'",
- "refId": "A",
- "round": "0s",
- "skip_comments": true
- }
- ],
- "title": "BITBUCKET Merge events",
- "transparent": true,
- "type": "stat"
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "orange",
- "value": null
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 5,
- "x": 14,
- "y": 5
- },
- "id": 40,
- "options": {
- "colorMode": "background",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Gitea FROM default.gitea\nWHERE EventType = 'pull_request'",
- "rawQuery": "SELECT count(*) AS Gitea FROM default.gitea\nWHERE EventType = 'pull_request'",
- "refId": "A",
- "round": "0s",
- "skip_comments": true
- }
- ],
- "title": "GITEA Merge events",
- "transparent": true,
- "type": "stat"
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "light-red",
- "value": null
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 5,
- "w": 5,
- "x": 19,
- "y": 5
- },
- "id": 41,
- "options": {
- "colorMode": "background",
- "graphMode": "area",
- "justifyMode": "auto",
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
- "fields": "",
- "values": false
- },
- "textMode": "auto"
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE EventType = 'git.pullrequest.merged'",
- "rawQuery": "SELECT count(*) AS Azure FROM default.azure_devops\nWHERE EventType = 'git.pullrequest.merged'",
- "refId": "A",
- "round": "0s",
- "skip_comments": true
- }
- ],
- "title": "AZURE Merge events",
- "transparent": true,
- "type": "stat"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of Push events on Azure by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "#17bcc1",
- "mode": "fixed"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 10
- },
- "id": 36,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "fullHighlight": false,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Azure_Push_Events\nFROM default.azure_devops\nWHERE EventType = 'git.push'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "Azure Push Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of Merge events on Azure by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "#17bcc1",
- "mode": "fixed"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 10
- },
- "id": 34,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "fullHighlight": false,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Azure_Merge_Events\nFROM default.azure_devops\nWHERE EventType = 'git.pullrequest.merged'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "Azure Merge Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of Push events on BitBucket by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "#e8f808",
- "mode": "fixed"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 18
- },
- "id": 28,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "fullHighlight": false,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Push_Events\nFROM default.bitbucket\nWHERE EventType = 'repo:push'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "BitBucket Push Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of Merge events on BitBucket by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "#e8f808",
- "mode": "fixed"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 18
- },
- "id": 30,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "fullHighlight": false,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Merge_Events\nFROM default.bitbucket\nWHERE EventType = 'pullrequest:fulfilled'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "BitBucket Merge Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of Push events on GiTea by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "#f46565",
- "mode": "fixed"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 0,
- "y": 26
- },
- "id": 24,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "fullHighlight": false,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Push_Events\nFROM default.gitea\nWHERE EventType = 'push'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GiTea Push Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of Merge events on GiTea by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "#f46565",
- "mode": "fixed"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 12,
- "x": 12,
- "y": 26
- },
- "id": 26,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "fullHighlight": false,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "builderOptions": {
- "database": "default",
- "fields": [],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [],
- "table": ""
- },
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "database": "default",
- "fields": [],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [],
- "table": ""
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Merge_Events\nFROM default.gitea\nWHERE EventType = 'pull_request'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GiTea Merge Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of push events on GitLab by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "light-blue",
- "mode": "fixed"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 0,
- "y": 34
- },
- "id": 20,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "fullHighlight": false,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Push_Events\nFROM default.gitlab\nWHERE EventType = 'Push Hook'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GitLab Push Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of merge events on GitLab by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "fixedColor": "light-blue",
- "mode": "fixed"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 12,
- "y": 34
- },
- "id": 22,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "fullHighlight": false,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Merge_Events\nFROM default.gitlab\nWHERE EventType = 'Merge Request Hook'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GitLab Merge Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of push events on Github by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "left",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 0,
- "y": 44
- },
- "id": 16,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "fullHighlight": false,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "table",
- "placement": "bottom",
- "showLegend": true
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Push_Events\nFROM default.github\nWHERE EventType = 'push'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "Github Push Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "The panel displays the count of merge events on Github by each author. It provides insights into the contribution activity of different authors within the specified repository.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "palette-classic"
- },
- "custom": {
- "axisCenteredZero": false,
- "axisColorMode": "text",
- "axisLabel": "",
- "axisPlacement": "auto",
- "axisSoftMin": 0,
- "fillOpacity": 80,
- "gradientMode": "opacity",
- "hideFrom": {
- "legend": false,
- "tooltip": false,
- "viz": false
- },
- "lineWidth": 1,
- "scaleDistribution": {
- "type": "linear"
- },
- "thresholdsStyle": {
- "mode": "off"
- }
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- },
- "unit": "short"
- },
- "overrides": []
- },
- "gridPos": {
- "h": 10,
- "w": 12,
- "x": 12,
- "y": 44
- },
- "id": 18,
- "options": {
- "barRadius": 0.2,
- "barWidth": 0.3,
- "fullHighlight": false,
- "groupWidth": 0.7,
- "legend": {
- "calcs": [],
- "displayMode": "list",
- "placement": "bottom",
- "showLegend": true
- },
- "orientation": "auto",
- "showValue": "always",
- "stacking": "none",
- "tooltip": {
- "mode": "single",
- "sort": "none"
- },
- "xTickLabelRotation": 0,
- "xTickLabelSpacing": 0
- },
- "targets": [
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
- },
- "queryType": "sql",
- "rawSql": "SELECT Author, count(*) AS Merge_Events\nFROM default.github\nWHERE EventType = 'pull_request'\nGROUP BY Author",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "Github Merge Events by Author",
- "transparent": true,
- "type": "barchart"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "This panel displays all the events occured in the Azure Procider",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "custom": {
- "align": "center",
- "cellOptions": {
- "type": "color-text"
- },
- "filterable": true,
- "inspect": false
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 7,
- "w": 24,
- "x": 0,
- "y": 54
- },
- "id": 14,
- "options": {
- "cellHeight": "sm",
- "footer": {
- "countRows": false,
- "fields": "",
- "reducer": [
- "sum"
- ],
- "show": false
- },
- "showHeader": true
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "builderOptions": {
- "database": "default",
- "fields": [
- "Author",
- "CommitID",
- "CommitUrl",
- "EventType",
- "RepoName",
- "TimeStamp"
- ],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [
- {
- "dir": "DESC",
- "name": "TimeStamp"
- }
- ],
- "table": "azure_devops"
- },
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT Author, CommitID, CommitUrl, EventType, RepoName, TimeStamp FROM default.\"azure_devops\" ORDER BY TimeStamp DESC LIMIT 100",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "Azure",
- "transparent": true,
- "type": "table"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "This panel displays all the events occured in the BitBucket Providers",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "custom": {
- "align": "center",
- "cellOptions": {
- "type": "color-text"
- },
- "filterable": true,
- "inspect": false
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 61
- },
- "id": 12,
- "options": {
- "cellHeight": "sm",
- "footer": {
- "countRows": false,
- "fields": "",
- "reducer": [
- "sum"
- ],
- "show": false
- },
- "showHeader": true
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "builderOptions": {
- "database": "default",
- "fields": [
- "Author",
- "CommitID",
- "CommitUrl",
- "EventType",
- "RepoName",
- "TimeStamp"
- ],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [
- {
- "dir": "DESC",
- "name": "TimeStamp"
- }
- ],
- "table": "bitbucket"
- },
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT Author, CommitID, CommitUrl, EventType, RepoName, TimeStamp FROM default.\"bitbucket\" ORDER BY TimeStamp DESC LIMIT 100",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "BitBucket",
- "transparent": true,
- "type": "table"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "This panel displays all the events occured in the GiTea provider",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "custom": {
- "align": "center",
- "cellOptions": {
- "type": "color-text"
- },
- "filterable": true,
- "inspect": false
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 9,
- "w": 24,
- "x": 0,
- "y": 69
- },
- "id": 10,
- "options": {
- "cellHeight": "sm",
- "footer": {
- "countRows": false,
- "fields": "",
- "reducer": [
- "sum"
- ],
- "show": false
- },
- "showHeader": true
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "builderOptions": {
- "database": "default",
- "fields": [
- "Author",
- "CommitID",
- "CommitUrl",
- "EventType",
- "RepoName",
- "TimeStamp"
- ],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [
- {
- "dir": "DESC",
- "name": "TimeStamp"
- }
- ],
- "table": "gitea"
- },
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT Author, CommitID, CommitUrl, EventType, RepoName, TimeStamp FROM default.\"gitea\" ORDER BY TimeStamp DESC LIMIT 100",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GiTea",
- "transparent": true,
- "type": "table"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "This panel displays all the events occured in the GitLab provider.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "custom": {
- "align": "center",
- "cellOptions": {
- "type": "color-text"
- },
- "filterable": true,
- "inspect": false
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 78
- },
- "id": 8,
- "options": {
- "cellHeight": "sm",
- "footer": {
- "countRows": false,
- "fields": "",
- "reducer": [
- "sum"
- ],
- "show": false
- },
- "showHeader": true
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "builderOptions": {
- "database": "default",
- "fields": [
- "Author",
- "CommitID",
- "CommitUrl",
- "EventType",
- "RepoName",
- "TimeStamp"
- ],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [
- {
- "dir": "DESC",
- "name": "TimeStamp"
- }
- ],
- "table": "gitlab"
- },
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT Author, CommitID, CommitUrl, EventType, RepoName, TimeStamp FROM default.\"gitlab\" ORDER BY TimeStamp DESC LIMIT 100",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GitLab",
- "transparent": true,
- "type": "table"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "description": "This panel displays all the events occured in the GitHub provider.",
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
- },
- "custom": {
- "align": "center",
- "cellOptions": {
- "type": "color-text"
- },
- "filterable": true,
- "inspect": false
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 9,
- "w": 24,
- "x": 0,
- "y": 86
- },
- "id": 6,
- "options": {
- "cellHeight": "sm",
- "footer": {
- "countRows": false,
- "fields": "",
- "reducer": [
- "sum"
- ],
- "show": false
- },
- "showHeader": true
- },
- "pluginVersion": "10.0.3",
- "targets": [
- {
- "builderOptions": {
- "database": "default",
- "fields": [
- "Author",
- "CommitID",
- "CommitUrl",
- "EventType",
- "RepoName",
- "TimeStamp"
- ],
- "filters": [],
- "limit": 100,
- "mode": "list",
- "orderBy": [
- {
- "dir": "DESC",
- "name": "TimeStamp"
- }
- ],
- "table": "github"
- },
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT Author, CommitID, CommitUrl, EventType, RepoName, TimeStamp FROM default.\"github\" ORDER BY TimeStamp DESC LIMIT 100",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "GitHub",
- "transparent": true,
- "type": "table"
- }
- ],
- "refresh": "",
- "schemaVersion": 38,
- "style": "dark",
- "tags": [],
- "templating": {
- "list": []
- },
- "time": {
- "from": "now-6h",
- "to": "now"
- },
- "timepicker": {},
- "timezone": "",
- "title": "GitBridge",
- "uid": "u3EJcUqVk",
- "version": 1,
- "weekStart": ""
-}
diff --git a/grafana/gitHub-dashboard.json b/grafana/gitHub-dashboard.json
new file mode 100644
index 00000000..2c3a5786
--- /dev/null
+++ b/grafana/gitHub-dashboard.json
@@ -0,0 +1,497 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 57,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 1,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "// Check if data.series exists\nif (context.panel.data.series && context.panel.data.series.length > 0) {\n const eventTypes = context.panel.data.series[0].fields[0].values;\n const authors = context.panel.data.series[0].fields[1].values;\n const repoNames = context.panel.data.series[0].fields[2].values;\n const total = context.panel.data.series[0].fields[3].values; // Assuming you have a field named \"Total\"\n\n // Create nodes and links\n const nodes = [];\n const links = [];\n\n authors.forEach((author, index) => {\n const sourceNode = {\n name: author,\n category: 0, // Category for source nodes\n symbolSize: 60, // Size for source nodes\n };\n\n const eventTypeNode = {\n name: eventTypes[index],\n category: 1, // Category for eventType nodes\n symbolSize: 40, // Size for eventType nodes\n };\n\n const repoNode = {\n name: repoNames[index],\n category: 2, // Category for repo nodes\n symbolSize: 30, // Size for repo nodes\n };\n\n const totalNode = {\n name: `Total: ${total[index]}`, // Assuming you have an array \"total\"\n category: 3, // Category for total nodes\n symbolSize: 20, // Size for total nodes\n };\n\n // Ensure source, eventType, repo, and total nodes are unique before adding them\n if (!nodes.some((node) => node.name === sourceNode.name)) {\n nodes.push(sourceNode);\n }\n\n if (!nodes.some((node) => node.name === eventTypeNode.name)) {\n nodes.push(eventTypeNode);\n }\n\n if (!nodes.some((node) => node.name === repoNode.name)) {\n nodes.push(repoNode);\n }\n\n if (!nodes.some((node) => node.name === totalNode.name)) {\n nodes.push(totalNode);\n }\n\n // Create links between author, eventType, repo, and total nodes\n links.push({\n source: author,\n target: eventTypes[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: repoNames[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: totalNode.name,\n });\n });\n\n // Create categories for nodes\n const categories = [\n {\n name: 'Authors',\n },\n {\n name: 'Event Type',\n },\n {\n name: 'Repo Names',\n },\n {\n name: 'Total', // Add a category for \"Total\" nodes\n },\n ];\n\n // Create ECharts option\n const option = {\n tooltip: {\n trigger: 'item',\n formatter: '{b}',\n },\n legend: {\n x: 'left',\n data: ['Authors', 'Event Types', 'Repo Names', 'Total'], // Add \"Total\" to legend data\n },\n series: [\n {\n type: 'graph',\n layout: 'circular',\n roam: true,\n label: {\n show: true,\n textStyle: { // Add textStyle property to configure text style\n color: '#000', // Set the text color to a brighter color, such as white (#FFF)\n },\n },\n force: {\n repulsion: 100,\n gravity: 0.1,\n edgeLength: 150,\n },\n data: nodes,\n links: links,\n draggable: true,\n categories: categories,\n edgeSymbol: [\"none\", \"arrow\"],\n\n // Increase the size of arrow marks\n edgeSymbolSize: [12, 12], // Set the arrow size here\n lineStyle: {\n color: \"#000000\",\n curveness: 0,\n opacity: 0.3,\n },\n },\n ],\n };\n\n // Return the ECharts option\n return option;\n} else {\n // Handle the case when data.series does not exist\n return {};\n}",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"github\" \nWHERE $timeFilterByColumn(TimeStamp) AND Author IN ($Author) AND EventType IN ($eventType)\nGROUP BY EventType, Author, RepoName",
+ "rawQuery": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"github\" \nWHERE TimeStamp >= toDateTime(1694597007) AND TimeStamp <= toDateTime(1694618607) AND Author IN ('ahinvinith') AND EventType IN ('push','pull_request')\nGROUP BY EventType, Author, RepoName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitHub Contributors",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 10
+ },
+ "id": 5,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let options; // Initialize the options variable\n\nif (!context.panel.data || !context.panel.data.series || context.panel.data.series.length === 0 || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n options = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract Author and Push_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const pushEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Push_Events').values;\n\n // Create the ECharts options\n options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Push_Events'],\n orient: 'vertical', // Change the orientation to vertical\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: pushEvents,\n type: 'line',\n areaStyle: {\n color: 'rgba(0, 128, 255, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'blue', // Set the line color\n },\n name: 'Push_Events',\n },\n ],\n };\n}\n\nreturn options;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Push_Events\nFROM default.github\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'push'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Push_Events\nFROM default.github\nWHERE TimeStamp >= toDateTime(1694596958) AND TimeStamp <= toDateTime(1694618558) AND EventType = 'push'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of GitHub push events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 10
+ },
+ "id": 6,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\n// Define a default options object\nconst defaultOptions = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n};\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = defaultOptions;\n} else {\n // Extract Author and Merge_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const mergeEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Merge_Events').values;\n\n // Create the ECharts options\n const options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Merge_Events'],\n orient: 'vertical',\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: mergeEvents,\n type: 'line',\n name: 'Merge_Events',\n areaStyle: {\n color: 'rgba(255, 0, 0, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'red', // Set the line color\n },\n },\n ],\n };\n\n option = options; // Assign the options to the outer variable\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Merge_Events\nFROM default.github\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'pull_request'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Merge_Events\nFROM default.github\nWHERE TimeStamp >= toDateTime(1694596980) AND TimeStamp <= toDateTime(1694618580) AND EventType = 'pull_request'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of GitHub Merge events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 18
+ },
+ "id": 3,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "const values = context.panel.data.series[0].fields[0].values;\n\nreturn {\n series: [\n {\n type: 'liquidFill',\n radius: '90%',\n data: values, // Use the raw values here\n label: {\n formatter: '{a|{c}}',\n rich: {\n a: {\n color: '#000', // You can set the text color here\n },\n },\n },\n tooltip: {\n formatter: '{a}: {c}', // This formatter displays the raw values\n },\n },\n ],\n};\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS GitHub FROM default.github\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'pull_request'",
+ "rawQuery": "SELECT count(*) AS GitHub FROM default.github\nWHERE TimeStamp >= toDateTime(1694596901) AND TimeStamp <= toDateTime(1694618501) AND EventType = 'pull_request'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitHub Merge event Count",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 18
+ },
+ "id": 4,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "const values = context.panel.data.series[0].fields[0].values;\n\nreturn {\n series: [\n {\n type: 'liquidFill',\n radius: '90%',\n data: values, // Use the raw values here\n label: {\n formatter: '{a|{c}}',\n rich: {\n a: {\n color: '#000', // You can set the text color here\n },\n },\n },\n tooltip: {\n formatter: '{a}: {c}', // This formatter displays the raw values\n },\n },\n ],\n};\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS GitHub FROM default.github\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'push'",
+ "rawQuery": "SELECT count(*) AS GitHub FROM default.github\nWHERE TimeStamp >= toDateTime(1694596932) AND TimeStamp <= toDateTime(1694618532) AND EventType = 'push'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitHub Push events count",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 25
+ },
+ "id": 2,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.github\nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author IN ($Author)\nORDER BY TimeStamp DESC",
+ "rawQuery": "SELECT * FROM default.github\nWHERE TimeStamp >= toDateTime(1694596869) AND TimeStamp <= toDateTime(1694618469) AND EventType IN ('push','pull_request') AND Author IN ('ahinvinith')\nORDER BY TimeStamp DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Github Events",
+ "type": "table"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": true,
+ "text": [
+ "ahinvinith"
+ ],
+ "value": [
+ "ahinvinith"
+ ]
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "definition": "select Author from default.github",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "Author",
+ "options": [],
+ "query": "select Author from default.github",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "current": {
+ "selected": true,
+ "text": [
+ "push"
+ ],
+ "value": [
+ "push"
+ ]
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "definition": "select EventType from default.github",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "eventType",
+ "options": [],
+ "query": "select EventType from default.github",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "GitHub",
+ "uid": "ef91218c-94cb-48b1-be1d-0bafe848b75c",
+ "version": 2,
+ "weekStart": ""
+}
diff --git a/grafana/gitLab-dashboard.json b/grafana/gitLab-dashboard.json
new file mode 100644
index 00000000..b6517a4c
--- /dev/null
+++ b/grafana/gitLab-dashboard.json
@@ -0,0 +1,503 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 59,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 2,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "// Check if data.series exists\nif (context.panel.data.series && context.panel.data.series.length > 0) {\n const eventTypes = context.panel.data.series[0].fields[0].values;\n const authors = context.panel.data.series[0].fields[1].values;\n const repoNames = context.panel.data.series[0].fields[2].values;\n const total = context.panel.data.series[0].fields[3].values; // Assuming you have a field named \"Total\"\n\n // Create nodes and links\n const nodes = [];\n const links = [];\n\n authors.forEach((author, index) => {\n const sourceNode = {\n name: author,\n category: 0, // Category for source nodes\n symbolSize: 60, // Size for source nodes\n };\n\n const eventTypeNode = {\n name: eventTypes[index],\n category: 1, // Category for eventType nodes\n symbolSize: 40, // Size for eventType nodes\n };\n\n const repoNode = {\n name: repoNames[index],\n category: 2, // Category for repo nodes\n symbolSize: 30, // Size for repo nodes\n };\n\n const totalNode = {\n name: `Total: ${total[index]}`, // Assuming you have an array \"total\"\n category: 3, // Category for total nodes\n symbolSize: 20, // Size for total nodes\n };\n\n // Ensure source, eventType, repo, and total nodes are unique before adding them\n if (!nodes.some((node) => node.name === sourceNode.name)) {\n nodes.push(sourceNode);\n }\n\n if (!nodes.some((node) => node.name === eventTypeNode.name)) {\n nodes.push(eventTypeNode);\n }\n\n if (!nodes.some((node) => node.name === repoNode.name)) {\n nodes.push(repoNode);\n }\n\n if (!nodes.some((node) => node.name === totalNode.name)) {\n nodes.push(totalNode);\n }\n\n // Create links between author, eventType, repo, and total nodes\n links.push({\n source: author,\n target: eventTypes[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: repoNames[index],\n });\n\n links.push({\n source: eventTypes[index],\n target: totalNode.name,\n });\n });\n\n // Create categories for nodes\n const categories = [\n {\n name: 'Authors',\n },\n {\n name: 'Event Type',\n },\n {\n name: 'Repo Names',\n },\n {\n name: 'Total', // Add a category for \"Total\" nodes\n },\n ];\n\n // Create ECharts option\n const option = {\n tooltip: {\n trigger: 'item',\n formatter: '{b}',\n },\n legend: {\n x: 'left',\n data: ['Authors', 'Event Types', 'Repo Names', 'Total'], // Add \"Total\" to legend data\n },\n series: [\n {\n type: 'graph',\n layout: 'circular',\n roam: true,\n label: {\n show: true,\n textStyle: { // Add textStyle property to configure text style\n color: '#000', // Set the text color to a brighter color, such as white (#FFF)\n },\n },\n force: {\n repulsion: 100,\n gravity: 0.1,\n edgeLength: 150,\n },\n data: nodes,\n links: links,\n draggable: true,\n categories: categories,\n edgeSymbol: [\"none\", \"arrow\"],\n\n // Increase the size of arrow marks\n edgeSymbolSize: [12, 12], // Set the arrow size here\n lineStyle: {\n color: \"#000000\",\n curveness: 0,\n opacity: 0.3,\n },\n },\n ],\n };\n\n // Return the ECharts option\n return option;\n} else {\n // Display a custom message when data is not available\n const option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n\n return option;\n}",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"gitlab\" \nWHERE $timeFilterByColumn(TimeStamp) AND EventType In ($eventType) AND Author IN ($Author)\nGROUP BY EventType, Author, RepoName",
+ "rawQuery": "SELECT EventType, Author, RepoName , count(*) AS Total\nFROM \"default\".\"gitlab\" \nWHERE TimeStamp >= toDateTime(1694533326) AND TimeStamp <= toDateTime(1694619726) AND EventType In ('Push Hook') AND Author IN ('Ahin Vinith')\nGROUP BY EventType, Author, RepoName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitLab Contributions",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 10
+ },
+ "id": 5,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let options; // Initialize the options variable\n\nif (!context.panel.data || !context.panel.data.series || context.panel.data.series.length === 0 || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n options = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract Author and Push_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const pushEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Push_Events').values;\n\n // Create the ECharts options\n options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Push_Events'],\n orient: 'vertical', // Change the orientation to vertical\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: pushEvents,\n type: 'line',\n areaStyle: {\n color: 'rgba(0, 128, 255, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'blue', // Set the line color\n },\n name: 'Push_Events',\n },\n ],\n };\n}\n\nreturn options;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Push_Events\nFROM default.gitlab\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'Push Hook'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Push_Events\nFROM default.gitlab\nWHERE TimeStamp >= toDateTime(1694533277) AND TimeStamp <= toDateTime(1694619677) AND EventType = 'Push Hook'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of GitLab Push events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 10
+ },
+ "id": 6,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\n// Define a default options object\nconst defaultOptions = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n};\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = defaultOptions;\n} else {\n // Extract Author and Merge_Events data from the series\n const categories = context.panel.data.series[0].fields.find((f) => f.name === 'Author').values;\n const mergeEvents = context.panel.data.series[0].fields.find((f) => f.name === 'Merge_Events').values;\n\n // Create the ECharts options\n const options = {\n grid: {\n bottom: '3%',\n containLabel: true,\n left: '3%',\n right: '4%',\n top: '4%',\n },\n toolbox: {\n right: '5%', // Adjust the right margin to position it on the top right\n top: '0%', // Adjust the top margin to position it on the top right\n feature: {\n dataZoom: {\n yAxisIndex: 'none',\n },\n restore: {},\n },\n },\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'shadow',\n },\n },\n xAxis: {\n type: 'category',\n data: categories,\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Merge_Events'],\n orient: 'vertical',\n left: '5%',\n top: '5%',\n },\n series: [\n {\n data: mergeEvents,\n type: 'line',\n name: 'Merge_Events',\n areaStyle: {\n color: 'rgba(255, 0, 0, 0.3)', // Set the area (shadow) color\n },\n lineStyle: {\n color: 'red', // Set the line color\n },\n },\n ],\n };\n\n option = options; // Assign the options to the outer variable\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Author, count(*) AS Merge_Events\nFROM default.gitlab\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'Merge Request Hook'\nGROUP BY Author",
+ "rawQuery": "SELECT Author, count(*) AS Merge_Events\nFROM default.gitlab\nWHERE TimeStamp >= toDateTime(1694533298) AND TimeStamp <= toDateTime(1694619698) AND EventType = 'Merge Request Hook'\nGROUP BY Author",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Number of GitLab Merge events grouped by author",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 18
+ },
+ "id": 3,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) Gitlab FROM default.gitlab\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'Push Hook'",
+ "rawQuery": "SELECT count(*) Gitlab FROM default.gitlab\nWHERE TimeStamp >= toDateTime(1694533230) AND TimeStamp <= toDateTime(1694619630) AND EventType = 'Push Hook'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitLab Push Events",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "green",
+ "mode": "fixed"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 18
+ },
+ "id": 4,
+ "options": {
+ "colorMode": "background",
+ "graphMode": "area",
+ "justifyMode": "auto",
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "textMode": "auto"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT count(*) AS GitLab FROM default.gitlab\nWHERE $timeFilterByColumn(TimeStamp) AND EventType = 'Merge Request Hook'",
+ "rawQuery": "SELECT count(*) AS GitLab FROM default.gitlab\nWHERE TimeStamp >= toDateTime(1694533256) AND TimeStamp <= toDateTime(1694619656) AND EventType = 'Merge Request Hook'",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitLab Merge events",
+ "type": "stat"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 23
+ },
+ "id": 1,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.gitlab\nWHERE $timeFilterByColumn(TimeStamp) AND EventType IN ($eventType) AND Author IN ($Author)",
+ "rawQuery": "SELECT * FROM default.gitlab\nWHERE TimeStamp >= toDateTime(1694533200) AND TimeStamp <= toDateTime(1694619600) AND EventType IN ('Push Hook') AND Author IN ('Ahin Vinith')",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "GitLab Events",
+ "type": "table"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": [
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "definition": "SELECT EventType FROM default.gitlab",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "eventType",
+ "options": [],
+ "query": "SELECT EventType FROM default.gitlab",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ },
+ {
+ "current": {
+ "selected": false,
+ "text": "All",
+ "value": "$__all"
+ },
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "definition": "SELECT Author FROM default.gitlab",
+ "hide": 0,
+ "includeAll": true,
+ "multi": true,
+ "name": "Author",
+ "options": [],
+ "query": "SELECT Author FROM default.gitlab",
+ "refresh": 2,
+ "regex": "",
+ "skipUrlSync": false,
+ "sort": 0,
+ "type": "query"
+ }
+ ]
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "GitLab",
+ "uid": "ec8b9cb1-f9ae-4139-b270-4824b6508eff",
+ "version": 2,
+ "weekStart": ""
+}
diff --git a/grafana/kubeData-dashboard.json b/grafana/kubeData-dashboard.json
index 53d1b521..998bc514 100644
--- a/grafana/kubeData-dashboard.json
+++ b/grafana/kubeData-dashboard.json
@@ -21,10 +21,472 @@
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
- "id": 1,
+ "id": 56,
"links": [],
"liveNow": false,
"panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 11,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract data from your JSON as before\n const clusterNames = context.panel.data.series[0].fields[0].values;\n const namespaces = context.panel.data.series[0].fields[1].values;\n const reasons = context.panel.data.series[0].fields[2].values;\n const pods = context.panel.data.series[0].fields[3].values;\n\n // Create a hierarchical structure from the data without a root node\n const hierarchy = {\n name: 'root', // Use 'root' as a placeholder\n children: [],\n };\n\n const seenClusterNames = new Set();\n const seenNamespaces = new Set();\n\n for (let i = 0; i < clusterNames.length; i++) {\n const clusterName = clusterNames[i];\n const namespace = namespaces[i];\n const reason = reasons[i];\n const pod = pods[i];\n\n if (!seenClusterNames.has(clusterName)) {\n seenClusterNames.add(clusterName);\n const clusterNode = { name: clusterName, children: [] };\n hierarchy.children.push(clusterNode);\n seenNamespaces.clear(); // Reset seenNamespaces for each cluster\n }\n\n const clusterNode = hierarchy.children.find((node) => node.name === clusterName);\n\n if (!seenNamespaces.has(namespace)) {\n seenNamespaces.add(namespace);\n const namespaceNode = { name: namespace, children: [] };\n clusterNode.children.push(namespaceNode);\n }\n\n const namespaceNode = clusterNode.children.find((node) => node.name === namespace);\n const reasonNode = { name: reason, children: [{ name: `Count: ${pod}` }] };\n namespaceNode.children.push(reasonNode);\n }\n\n // Create the tree chart using ECharts\n option = {\n tooltip: {\n trigger: 'item',\n triggerOn: 'mousemove',\n formatter: function (params) {\n const node = params.data;\n let tooltip = '';\n if (node.column) {\n tooltip += `${node.column}: ${node.name}`;\n } else {\n tooltip += node.name;\n }\n return tooltip;\n },\n },\n series: [\n {\n type: 'tree',\n data: hierarchy.children, // Use the children directly\n top: '1%',\n left: '7%',\n bottom: '1%',\n right: '20%',\n symbolSize: 7,\n label: {\n position: 'left',\n verticalAlign: 'middle',\n align: 'centre',\n fontSize: 15, // Increase the text size for regular nodes\n fontWeight: 'bold', // Set the font weight to bold\n },\n leaves: {\n label: {\n position: 'right',\n verticalAlign: 'middle',\n align: 'left',\n fontSize: 15, // Increase the text size for leaves\n fontWeight: 'bold', // Set the font weight to bold\n },\n },\n emphasis: {\n focus: 'descendant',\n },\n expandAndCollapse: true,\n animationDuration: 550,\n animationDurationUpdate: 750,\n },\n ],\n };\n}\n\nreturn option;",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Namespace, Reason, count(*) AS Pods\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN ('Pod') AND ClusterName IN ($clusterName) AND Namespace IN ($namespace) AND Reason IN ($reason)\nGROUP BY ClusterName, Namespace, Reason",
+ "rawQuery": "SELECT ClusterName, Namespace, Reason, count(*) AS Pods\nFROM default.events\nWHERE EventTime >= toDateTime(1702949773) AND EventTime <= toDateTime(1702992973) AND Kind IN ('Pod') AND ClusterName IN ('capten-controlplane','kubviz','dev') AND Namespace IN ('argo-cd','crossplane','default','kubescape-prometheus','kubviz','kyverno','linkerd','observability','openebs-cstor','testkube','capten','falco','tekton','crossplane-system','test5','tek','tekton-pipelines','harbor','tekton-pipelines-resolvers','quality-trace','kube-system','cert-manager','emojivoto','local-path-storage','test-linkerd','external-secrets','policy-reporter','velero','tracetest') AND Reason IN ('BackOff','FailedMount','Unhealthy','SyncPackage','SelectComposition','SyncFailed','CannotUpdateExternalResource','RenderCRD','OfferClaim','EstablishComposite','SuccessfulCreate','InjectionSkipped','Scheduled','Pulled','Created','Started','Completed','SawCompletedJob','BindClusterRole','ApplyClusterRoles','CannotInitializeManagedResource','InstallPackageRevision','CreatedUsers','CreatedSuperuser','ApplyRoles','Synced','Killing','Init','ExternalProvisioning','Provisioning','Running','FailedScheduling','ProvisioningSucceeded','ScalingReplicaSet','Pending','Updated','FailUpdate','Degraded','Pulling','Healthy','Succeeded','IssuedLeafCertificate','Failed','Offline','EvictionThresholdMet','NodeHasDiskPressure','NodeHasNoDiskPressure','FreeDiskSpaceFailed','CreateCertificate','Issuing','Generated','Requested','cert-manager.io','OrderCreated','OrderPending','Presented','DomainVerified','Complete','CertificateIssued','SuccessfulDelete','ConfigureCompositeResource','BindCompositeResource','ImageGCFailed','ClaimLost','Evicted','RecreatingFailedPod','LeaderElection','InternalError','Reused','IssuerUpdated','OperationStarted','ResourceUpdated','OperationCompleted','MultiplePodDisruptionBudgets','MissingJob','InvalidOrder','TaintManagerEviction','SystemOOM','FailedKillPod','NodeHasSufficientMemory','NodeHasSufficientPID','NodeNotReady','FailedPreStopHook','NodeReady','Pool Imported','AlreadyPresent','StartingCassandra','UpdateCompleted','LabeledPodAsSeed','StartedCassandra','ComposeResources','UpdatedExternalResource','CannotDeleteExternalResource','DeletedExternalResource','ReconcileInProgress','ReconcileCompleted','ReconcileStarted','ProgressHostsCompleted','ReconcileFailed','UnlabeledPodAsSeed','CannotObserveExternalResource','DeletingStuckPod','DeleteCompositeResource','ResourceDeleted','PublishConnectionSecret','UnpackPackage','ResolveDependencies','ExceededGracePeriod','ProvisioningFailed','CannotCreateExternalResource','FailStatusSync','FailCreate','CannotUpdateManagedResource','CannotResolveResourceReferences','CreatedExternalResource','FailedSync','RegisteredNode','OwnerRefInvalidNamespace','NoPods','DeadlineExceeded','Create','FailedGetScale','NodeAllocatableEnforced','Starting','LintPackage','SandboxChanged','WaitForFirstConsumer','FailedGetResourceMetric','FailedCreate','Injected','Resizing','ExternalExpanding','VolumeResizeFailed','VolumeResizeSuccessful','FileSystemResizeRequired','FileSystemResizeSuccessful','CreatedResource','FailedToUpdateEndpoint','UpdateCertificate','FailedUpdateStatus','UpdateFailed','FailedComputeMetricsReplicas','FailedToCreateEndpoint','FailedAttachVolume','FailedToUpdateEndpointSlices','FailedDelete','Pool Expansion','Error')\nGROUP BY ClusterName, Namespace, Reason",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Pod Scenario Counts by Cluster, Namespace",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 9
+ },
+ "id": 12,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract data from your JSON as before\n const reasons = context.panel.data.series[0].fields[0].values;\n const counts = context.panel.data.series[0].fields[1].values;\n\n // Check if reasons and counts are defined and not empty\n if (!reasons || !counts || reasons.length === 0 || counts.length === 0) {\n // Display a message when no data is available\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n } else {\n // Data is available, proceed with the chart creation\n // Create an array of data items, each containing name and value\n const seriesData = reasons.map((reason, index) => ({\n name: reason,\n value: counts[index],\n }));\n\n // Define a custom color for the bars\n const customColor = 'rgb(0, 123, 255)'; // Change this to your desired color\n\n // Apache ECharts option\n option = {\n xAxis: {\n type: 'category',\n data: reasons, // Use the reasons directly for xAxis data\n axisLabel: {\n interval: 0, // Display all labels on the xAxis\n },\n },\n yAxis: {\n type: 'value',\n },\n legend: {\n data: ['Pods'], // Legend name\n left: 'left', // Position the legend on the left side\n bottom: 'bottom', // Position the legend at the bottom\n },\n series: [\n {\n name: 'Pods', // Series name for the legend\n data: seriesData,\n type: 'bar',\n label: {\n show: true,\n position: 'top',\n formatter: '{c}',\n },\n itemStyle: {\n barBorderRadius: [5, 5, 0, 0], // Adjust the values to control the curvature\n color: customColor, // Set the custom color for the bars\n },\n },\n ],\n };\n }\n}\n\nreturn option;",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Reason, count(Reason) AS Pods\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN ('Pod') AND ClusterName IN ($clusterName) AND Namespace In ($namespace)\nGROUP BY Reason",
+ "rawQuery": "SELECT Reason, count(Reason) AS Pods\nFROM default.events\nWHERE EventTime >= toDateTime(1702949951) AND EventTime <= toDateTime(1702993151) AND Kind IN ('Pod') AND ClusterName IN ('capten-controlplane','kubviz','dev') AND Namespace In ('quality-trace','crossplane-system','observability','default','testkube','openebs-cstor','kyverno','kubescape-prometheus','tekton-pipelines','capten','tek','test-linkerd','linkerd','argo-cd','tracetest','emojivoto','falco','kube-system','crossplane','kubviz','tekton','test5','harbor','tekton-pipelines-resolvers','cert-manager','local-path-storage','external-secrets','policy-reporter','velero')\nGROUP BY Reason",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Total Pod Counts by Reason",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 14,
+ "w": 24,
+ "x": 0,
+ "y": 19
+ },
+ "id": 6,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n\n // Define the data from your JSON\n const clusterNames = context.panel.data.series[0].fields[0].values;\n const kinds = context.panel.data.series[0].fields[1].values;\n const reasons = context.panel.data.series[0].fields[2].values;\n const counts = context.panel.data.series[0].fields[3].values;\n\n // Create the Sankey chart configuration\n option = {\n series: {\n type: 'sankey',\n layout: 'none',\n emphasis: {\n focus: 'adjacency',\n },\n data: [],\n links: [],\n },\n tooltip: {\n trigger: 'item',\n formatter: (params) => {\n if (params.dataType === 'node') {\n return params.name;\n }\n if (params.dataType === 'edge') {\n return `Count: ${counts[params.dataIndex]}`; // Display count values\n }\n return '';\n },\n },\n };\n\n // Create nodes for ClusterName, Kind, and Reason\n const uniqueClusterNames = Array.from(new Set(clusterNames));\n const uniqueKinds = Array.from(new Set(kinds));\n const uniqueReasons = Array.from(new Set(reasons));\n\n uniqueClusterNames.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n uniqueKinds.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n uniqueReasons.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n\n // Create links from Kind to Reason\n kinds.forEach((kind, index) => {\n const sourceIndex = uniqueClusterNames.length + uniqueKinds.indexOf(kind);\n const targetIndex = 1 * uniqueClusterNames.length + uniqueKinds.length + uniqueReasons.indexOf(reasons[index]);\n option.series.links.push({\n source: sourceIndex,\n target: targetIndex,\n value: counts[index], // Use count values\n });\n });\n\n // Create links from ClusterName to Kind\n clusterNames.forEach((clusterName, index) => {\n const sourceIndex = uniqueClusterNames.indexOf(clusterName);\n const targetIndex = uniqueClusterNames.length + uniqueKinds.indexOf(kinds[index]);\n option.series.links.push({\n source: sourceIndex,\n target: targetIndex,\n value: 1,\n });\n });\n}\n\n\nreturn option;\n// Render the chart\nmyChart.setOption(option);",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Kind, Reason, COUNT(*) as Count\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND ClusterName IN ($clusterName) AND Kind IN ('Pod', 'Node', 'Deployment')\nGROUP BY ClusterName, Kind, Reason\nORDER BY Count DESC",
+ "rawQuery": "SELECT ClusterName, Kind, Reason, COUNT(*) as Count\nFROM default.events\nWHERE EventTime >= toDateTime(1698927439) AND EventTime <= toDateTime(1698927739) AND ClusterName IN ('beta-cluster') AND Kind IN ('Pod', 'Node', 'Deployment')\nGROUP BY ClusterName, Kind, Reason\nORDER BY Count DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "K8s metrics status",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 33
+ },
+ "id": 8,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n\n // Define the data from your JSON\n const clusterNames = context.panel.data.series[0].fields[0].values;\n const kinds = context.panel.data.series[0].fields[1].values;\n const reasons = context.panel.data.series[0].fields[2].values;\n const counts = context.panel.data.series[0].fields[3].values;\n\n // Create the Sankey chart configuration\n option = {\n series: {\n type: 'sankey',\n layout: 'none',\n emphasis: {\n focus: 'adjacency',\n },\n data: [],\n links: [],\n },\n tooltip: {\n trigger: 'item',\n formatter: (params) => {\n if (params.dataType === 'node') {\n return params.name;\n }\n if (params.dataType === 'edge') {\n return `Count: ${counts[params.dataIndex]}`; // Display count values\n }\n return '';\n },\n },\n };\n\n // Create nodes for ClusterName, Kind, and Reason\n const uniqueClusterNames = Array.from(new Set(clusterNames));\n const uniqueKinds = Array.from(new Set(kinds));\n const uniqueReasons = Array.from(new Set(reasons));\n\n uniqueClusterNames.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n uniqueKinds.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n uniqueReasons.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n\n // Create links from Kind to Reason\n kinds.forEach((kind, index) => {\n const sourceIndex = uniqueClusterNames.length + uniqueKinds.indexOf(kind);\n const targetIndex = 1 * uniqueClusterNames.length + uniqueKinds.length + uniqueReasons.indexOf(reasons[index]);\n option.series.links.push({\n source: sourceIndex,\n target: targetIndex,\n value: counts[index], // Use count values\n });\n });\n\n // Create links from ClusterName to Kind\n clusterNames.forEach((clusterName, index) => {\n const sourceIndex = uniqueClusterNames.indexOf(clusterName);\n const targetIndex = uniqueClusterNames.length + uniqueKinds.indexOf(kinds[index]);\n option.series.links.push({\n source: sourceIndex,\n target: targetIndex,\n value: 1,\n });\n });\n}\n\n\n\nreturn option;\n// Render the chart\nmyChart.setOption(option);",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Kind, Reason, COUNT(*) as Count\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND ClusterName IN ($clusterName) AND Kind IN ('Service')\nGROUP BY ClusterName, Kind, Reason\nORDER BY Count DESC",
+ "rawQuery": "SELECT ClusterName, Kind, Reason, COUNT(*) as Count\nFROM default.events\nWHERE EventTime >= toDateTime(1698931712) AND EventTime <= toDateTime(1698932012) AND ClusterName IN ('beta-cluster') AND Kind IN ('Service')\nGROUP BY ClusterName, Kind, Reason\nORDER BY Count DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Service Events by Cluster and Reason",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 33
+ },
+ "id": 9,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n\n // Define the data from your JSON\n const clusterNames = context.panel.data.series[0].fields[0].values;\n const kinds = context.panel.data.series[0].fields[1].values;\n const reasons = context.panel.data.series[0].fields[2].values;\n const counts = context.panel.data.series[0].fields[3].values;\n\n // Create the Sankey chart configuration\n option = {\n series: {\n type: 'sankey',\n layout: 'none',\n emphasis: {\n focus: 'adjacency',\n },\n data: [],\n links: [],\n },\n tooltip: {\n trigger: 'item',\n formatter: (params) => {\n if (params.dataType === 'node') {\n return params.name;\n }\n if (params.dataType === 'edge') {\n return `Count: ${counts[params.dataIndex]}`; // Display count values\n }\n return '';\n },\n },\n };\n\n // Create nodes for ClusterName, Kind, and Reason\n const uniqueClusterNames = Array.from(new Set(clusterNames));\n const uniqueKinds = Array.from(new Set(kinds));\n const uniqueReasons = Array.from(new Set(reasons));\n\n uniqueClusterNames.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n uniqueKinds.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n uniqueReasons.forEach((name, index) => {\n option.series.data.push({\n name: name,\n });\n });\n\n\n // Create links from Kind to Reason\n kinds.forEach((kind, index) => {\n const sourceIndex = uniqueClusterNames.length + uniqueKinds.indexOf(kind);\n const targetIndex = 1 * uniqueClusterNames.length + uniqueKinds.length + uniqueReasons.indexOf(reasons[index]);\n option.series.links.push({\n source: sourceIndex,\n target: targetIndex,\n value: counts[index], // Use count values\n });\n });\n\n // Create links from ClusterName to Kind\n clusterNames.forEach((clusterName, index) => {\n const sourceIndex = uniqueClusterNames.indexOf(clusterName);\n const targetIndex = uniqueClusterNames.length + uniqueKinds.indexOf(kinds[index]);\n option.series.links.push({\n source: sourceIndex,\n target: targetIndex,\n value: 1,\n });\n });\n}\n\n\nreturn option;\n// Render the chart\nmyChart.setOption(option);\n\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Kind, Reason, COUNT(*) as Count\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN ('PersistentVolume','PersistentVolumeClaim')\nGROUP BY ClusterName, Kind, Reason\nORDER BY Count DESC\n",
+ "rawQuery": "SELECT ClusterName, Kind, Reason, COUNT(*) as Count\nFROM default.events\nWHERE EventTime >= toDateTime(1698931738) AND EventTime <= toDateTime(1698932038) AND Kind IN ('PersistentVolume','PersistentVolumeClaim')\nGROUP BY ClusterName, Kind, Reason\nORDER BY Count DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "PV, PVC events by cluster and reason",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 24,
+ "x": 0,
+ "y": 41
+ },
+ "id": 7,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract data from your JSON\n const clusters = context.panel.data.series[0].fields[0].values;\n const hosts = context.panel.data.series[0].fields[1].values;\n const reasons = context.panel.data.series[0].fields[2].values;\n const eventTimes = context.panel.data.series[0].fields[3].values;\n\n // Create a hierarchical structure for the tree chart starting with ClusterName\n const hierarchy = {\n name: 'Root', // You can customize the name of the root node if needed\n children: [],\n };\n\n for (let i = 0; i < clusters.length; i++) {\n const cluster = clusters[i];\n const host = hosts[i];\n const reason = reasons[i];\n const eventTime = eventTimes[i];\n\n // Find or create the cluster node\n let clusterNode = hierarchy.children.find((node) => node.name === cluster);\n if (!clusterNode) {\n clusterNode = { name: cluster, children: [] };\n hierarchy.children.push(clusterNode);\n }\n\n // Find or create the host node under the cluster\n let hostNode = clusterNode.children.find((node) => node.name === host);\n if (!hostNode) {\n hostNode = { name: host, children: [] };\n clusterNode.children.push(hostNode);\n }\n\n // Find or create the reason node under the host\n let reasonNode = hostNode.children.find((node) => node.name === reason);\n if (!reasonNode) {\n reasonNode = { name: reason, children: [] };\n hostNode.children.push(reasonNode);\n }\n\n // Create the eventTime node under the reason\n reasonNode.children.push({ name: eventTime });\n }\n\n // Create the tree chart using ECharts\n option = {\n tooltip: {\n trigger: 'item',\n triggerOn: 'mousemove',\n },\n series: [\n {\n type: 'tree',\n data: hierarchy.children, // Use the children directly as root nodes\n top: '1%',\n left: '7%',\n bottom: '1%',\n right: '20%', // Adjust the right margin to provide more space for labels\n symbolSize: 7,\n label: {\n position: 'inside', // Position labels inside the node\n verticalAlign: 'middle',\n align: 'center', // Center-align labels\n fontSize: 15,\n fontWeight: 'bold',\n },\n leaves: {\n label: {\n position: 'right', // Position labels inside the node\n verticalAlign: 'middle',\n align: 'left', // Center-align labels\n fontSize: 15,\n fontWeight: 'bold',\n },\n },\n emphasis: {\n focus: 'descendant',\n },\n expandAndCollapse: true,\n animationDuration: 550,\n animationDurationUpdate: 750,\n },\n ],\n };\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Host, Reason, EventTime\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND ClusterName IN ($clusterName) AND Kind = 'Node' AND Kind != ' '\nGROUP BY ClusterName, Host, Reason, EventTime",
+ "rawQuery": "SELECT ClusterName, Host, Reason, EventTime\nFROM default.events\nWHERE EventTime >= toDateTime(1698927382) AND EventTime <= toDateTime(1698927682) AND ClusterName IN ('beta-cluster') AND Kind = 'Node' AND Kind != ' '\nGROUP BY ClusterName, Host, Reason, EventTime",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Node Events by Cluster, Host, Reason, and EventTime",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 11,
+ "w": 24,
+ "x": 0,
+ "y": 54
+ },
+ "id": 5,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract data from your JSON as before\n const clusterNames = context.panel.data.series[0].fields[0].values; // New column for ClusterName\n const namespaces = context.panel.data.series[0].fields[1].values;\n const kinds = context.panel.data.series[0].fields[2].values; // Adjusted index for Kind\n const counts = context.panel.data.series[0].fields[3].values; // New column for Count\n\n // Create a hierarchical structure from the data without a root node\n const hierarchy = {\n name: 'root', // Use 'root' as a placeholder\n children: [],\n };\n\n const seenClusterNames = new Set();\n const seenNamespaces = new Set();\n\n for (let i = 0; i < clusterNames.length; i++) {\n const clusterName = clusterNames[i];\n const namespace = namespaces[i];\n const kind = kinds[i];\n const count = counts[i]; // Get the count value\n\n if (!seenClusterNames.has(clusterName)) {\n seenClusterNames.add(clusterName);\n const clusterNode = { name: clusterName, children: [] };\n hierarchy.children.push(clusterNode);\n seenNamespaces.clear(); // Reset seenNamespaces for each cluster\n }\n\n const clusterNode = hierarchy.children.find((node) => node.name === clusterName);\n\n if (!seenNamespaces.has(namespace)) {\n seenNamespaces.add(namespace);\n const namespaceNode = { name: namespace, children: [] };\n clusterNode.children.push(namespaceNode);\n }\n\n const namespaceNode = clusterNode.children.find((node) => node.name === namespace);\n const kindNode = { name: kind, children: [{ name: `Count: ${count}` }] }; // Include the count as a child node\n namespaceNode.children.push(kindNode);\n }\n\n // Create the tree chart using ECharts\n option = {\n tooltip: {\n trigger: 'item',\n triggerOn: 'mousemove',\n formatter: function (params) {\n const node = params.data;\n let tooltip = '';\n if (node.column) {\n tooltip += `${node.column}: ${node.name}`;\n } else {\n tooltip += node.name;\n }\n return tooltip;\n },\n },\n series: [\n {\n type: 'tree',\n data: hierarchy.children, // Use the children directly\n top: '1%',\n left: '7%',\n bottom: '1%',\n right: '20%',\n symbolSize: 7,\n label: {\n position: 'left',\n verticalAlign: 'middle',\n align: 'right',\n fontSize: 15, // Increase the text size for regular nodes\n fontWeight: 'bold', // Set the font weight to bold\n },\n leaves: {\n label: {\n position: 'right',\n verticalAlign: 'middle',\n align: 'left',\n fontSize: 15, // Increase the text size for leaves\n fontWeight: 'bold', // Set the font weight to bold\n },\n },\n emphasis: {\n focus: 'descendant',\n },\n expandAndCollapse: true,\n animationDuration: 550,\n animationDurationUpdate: 750,\n },\n ],\n };\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Namespace, Kind, count(*) AS count\nFROM default.events\nWHERE $timeFilterByColumn(EventTime) AND ClusterName IN ($clusterName) AND Namespace IN ($namespace)\nGROUP BY ClusterName, Namespace, Kind",
+ "rawQuery": "SELECT ClusterName, Namespace, Kind, count(*) AS count\nFROM default.events\nWHERE EventTime >= toDateTime(1694603943) AND EventTime <= toDateTime(1694604243) AND ClusterName IN ('dev') AND Namespace IN ('kubviz','argocd','observability','default','tracetestdemo','sonarqube','kube-system','tek','quality','tekton-pipelines','sample','tekton-pipelines-resolvers','tekton-chains','cert-manager','qtapp','otel-collector','mysql','traefik')\nGROUP BY ClusterName, Namespace, Kind",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Kubernetes Workload",
+ "type": "volkovlabs-echarts-panel"
+ },
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
@@ -42,8 +504,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -58,7 +519,7 @@
"h": 7,
"w": 24,
"x": 0,
- "y": 0
+ "y": 65
},
"id": 4,
"options": {
@@ -121,8 +582,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -137,7 +597,7 @@
"h": 16,
"w": 24,
"x": 0,
- "y": 7
+ "y": 72
},
"id": 2,
"options": {
@@ -172,7 +632,6 @@
}
],
"title": "Kubernetes",
- "transparent": true,
"type": "table"
}
],
@@ -284,6 +743,6 @@
"timezone": "",
"title": "Kubedata",
"uid": "Qq-FK1rVz",
- "version": 1,
+ "version": 3,
"weekStart": ""
}
diff --git a/grafana/kubeScore-dashboard.json b/grafana/kubeScore-dashboard.json
index b84ecb42..2fb3a6a0 100644
--- a/grafana/kubeScore-dashboard.json
+++ b/grafana/kubeScore-dashboard.json
@@ -21,7 +21,7 @@
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
- "id": 10,
+ "id": 5,
"links": [],
"liveNow": false,
"panels": [
@@ -38,8 +38,11 @@
},
"custom": {
"align": "center",
- "displayMode": "color-text",
- "filterable": true
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
},
"mappings": [],
"thresholds": {
@@ -77,7 +80,7 @@
},
"showHeader": true
},
- "pluginVersion": "8.4.6",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -89,33 +92,32 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.kubescore",
- "rawQuery": "SELECT * FROM default.kubescore",
+ "query": "SELECT * FROM default.kubescore\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.kubescore\nWHERE EventTime >= toDateTime(1694245574) AND EventTime <= toDateTime(1694267174)\nORDER BY EventTime DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
"title": "KubeScore",
- "transparent": true,
"type": "table"
}
],
"refresh": "",
- "schemaVersion": 35,
+ "schemaVersion": 38,
"style": "dark",
"tags": [],
"templating": {
"list": []
},
"time": {
- "from": "now-6h",
+ "from": "now-24h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "KubeScore",
"uid": "d8f0fceb-7621-45bc-9710-89e11fe57a79",
- "version": 2,
+ "version": 1,
"weekStart": ""
-}
+}
\ No newline at end of file
diff --git a/grafana/kuberhealthy-dashboard.json b/grafana/kuberhealthy-dashboard.json
new file mode 100644
index 00000000..46b8bd7d
--- /dev/null
+++ b/grafana/kuberhealthy-dashboard.json
@@ -0,0 +1,1195 @@
+{
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "fiscalYearStartMonth": 0,
+ "graphTooltip": 0,
+ "id": 41,
+ "links": [],
+ "liveNow": false,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 13,
+ "w": 18,
+ "x": 0,
+ "y": 0
+ },
+ "id": 14,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract data from your JSON as before\n const namespaces = context.panel.data.series[0].fields[0].values;\n const counts = context.panel.data.series[0].fields[1].values;\n\n // Create a hierarchical structure from the data with a default cluster node\n const hierarchy = {\n name: 'CheckName', // Default cluster node\n children: [],\n };\n\n // Create an object to store namespaces and their counts\n const namespaceCounts = {};\n\n // Populate the namespaceCounts object with namespaces and counts\n for (let i = 0; i < namespaces.length; i++) {\n const namespace = namespaces[i];\n const count = counts[i];\n\n if (!namespaceCounts[namespace]) {\n namespaceCounts[namespace] = count;\n } else {\n namespaceCounts[namespace] += count;\n }\n }\n\n // Create nodes for each namespace and add them as children of the default cluster node\n for (const namespace in namespaceCounts) {\n hierarchy.children.push({\n name: namespace,\n children: [{ name: `${namespaceCounts[namespace]}` }],\n });\n }\n\n // Create the tree chart using ECharts\n option = {\n tooltip: {\n trigger: 'item',\n triggerOn: 'mousemove',\n formatter: function (params) {\n const node = params.data;\n let tooltip = '';\n if (node.column) {\n tooltip += `${node.column}: ${node.name}`;\n } else {\n tooltip += node.name;\n }\n return tooltip;\n },\n },\n series: [\n {\n type: 'tree',\n data: [hierarchy], // Use the hierarchy object as the data\n top: '1%',\n left: '7%',\n bottom: '1%',\n right: '20%',\n symbolSize: 7,\n label: {\n position: 'left',\n verticalAlign: 'middle',\n align: 'centre',\n fontSize: 15, // Increase the text size for regular nodes\n fontWeight: 'bold', // Set the font weight to bold\n },\n leaves: {\n label: {\n position: 'right',\n verticalAlign: 'middle',\n align: 'left',\n fontSize: 15, // Increase the text size for leaves\n fontWeight: 'bold', // Set the font weight to bold\n },\n },\n emphasis: {\n focus: 'descendant',\n },\n expandAndCollapse: true,\n animationDuration: 550,\n animationDurationUpdate: 750,\n },\n ],\n };\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "console.log(context);\nreturn {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "codeHeight": 600,
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "editorMode": "builder",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT CheckName, count(OK) AS counts\nFROM default.kuberhealthy\nWHERE OK = '1'\nGROUP BY CheckName",
+ "rawQuery": "SELECT CheckName, count(OK) AS counts\nFROM default.kuberhealthy\nWHERE OK = '1'\nGROUP BY CheckName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Running kuberhealthy checks",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 12,
+ "w": 18,
+ "x": 0,
+ "y": 13
+ },
+ "id": 15,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let option; // Initialize the option variable\n\nif (typeof context.panel.data === 'undefined' || !context.panel.data.series || !context.panel.data.series[0] || !context.panel.data.series[0].fields) {\n // Data is not available or doesn't have the expected structure\n option = {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n} else {\n // Extract data from your JSON as before\n const namespaces = context.panel.data.series[0].fields[0].values;\n const counts = context.panel.data.series[0].fields[1].values;\n\n // Create a hierarchical structure from the data with a default cluster node\n const hierarchy = {\n name: 'CheckName', // Default cluster node\n children: [],\n };\n\n // Create an object to store namespaces and their counts\n const namespaceCounts = {};\n\n // Populate the namespaceCounts object with namespaces and counts\n for (let i = 0; i < namespaces.length; i++) {\n const namespace = namespaces[i];\n const count = counts[i];\n\n if (!namespaceCounts[namespace]) {\n namespaceCounts[namespace] = count;\n } else {\n namespaceCounts[namespace] += count;\n }\n }\n\n // Create nodes for each namespace and add them as children of the default cluster node\n for (const namespace in namespaceCounts) {\n hierarchy.children.push({\n name: namespace,\n children: [{ name: `${namespaceCounts[namespace]}` }],\n });\n }\n\n // Create the tree chart using ECharts\n option = {\n tooltip: {\n trigger: 'item',\n triggerOn: 'mousemove',\n formatter: function (params) {\n const node = params.data;\n let tooltip = '';\n if (node.column) {\n tooltip += `${node.column}: ${node.name}`;\n } else {\n tooltip += node.name;\n }\n return tooltip;\n },\n },\n series: [\n {\n type: 'tree',\n data: [hierarchy], // Use the hierarchy object as the data\n top: '1%',\n left: '7%',\n bottom: '1%',\n right: '20%',\n symbolSize: 7,\n label: {\n position: 'left',\n verticalAlign: 'middle',\n align: 'centre',\n fontSize: 15, // Increase the text size for regular nodes\n fontWeight: 'bold', // Set the font weight to bold\n },\n leaves: {\n label: {\n position: 'right',\n verticalAlign: 'middle',\n align: 'left',\n fontSize: 15, // Increase the text size for leaves\n fontWeight: 'bold', // Set the font weight to bold\n },\n },\n emphasis: {\n focus: 'descendant',\n },\n expandAndCollapse: true,\n animationDuration: 550,\n animationDurationUpdate: 750,\n },\n ],\n };\n}\n\nreturn option;\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "console.log(context);\nreturn {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "codeHeight": 600,
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "editorMode": "builder",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT CheckName, count(OK) AS counts\nFROM default.kuberhealthy\nWHERE OK = '0'\nGROUP BY CheckName",
+ "rawQuery": "SELECT CheckName, count(OK) AS counts\nFROM default.kuberhealthy\nWHERE OK = '0'\nGROUP BY CheckName",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": " Faliure kuberhealthy checks",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 20,
+ "gradientMode": "scheme",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "smooth",
+ "lineWidth": 3,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 25
+ },
+ "id": 10,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "Select LastRun, OK,Errors\nfrom default.kuberhealthy\nwhere CheckName LIKE '%pod-restarts%' AND $__timeFilter(LastRun)",
+ "refId": "A"
+ }
+ ],
+ "title": "Kuberhealthy Pod Restarts Status Over Time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 10,
+ "gradientMode": "hue",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "smooth",
+ "lineStyle": {
+ "fill": "solid"
+ },
+ "lineWidth": 2,
+ "pointSize": 10,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "OK"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 25
+ },
+ "id": 8,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "Select LastRun, OK\nfrom default.kuberhealthy\nwhere $__timeFilter(LastRun)",
+ "refId": "A"
+ }
+ ],
+ "title": "Kuberhealthy Status Over Time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": [
+ {
+ "__systemRef": "hideSeriesFrom",
+ "matcher": {
+ "id": "byNames",
+ "options": {
+ "mode": "exclude",
+ "names": [
+ "count()"
+ ],
+ "prefix": "All except:",
+ "readOnly": true
+ }
+ },
+ "properties": [
+ {
+ "id": "custom.hideFrom",
+ "value": {
+ "legend": false,
+ "tooltip": false,
+ "viz": true
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 33
+ },
+ "id": 7,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "Select LastRun, count(*)\nfrom default.kuberhealthy\nwhere $__timeFilter(LastRun)\nGroup By LastRun",
+ "refId": "A"
+ }
+ ],
+ "title": "Kuberhealthy Counts Over Time.",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 20,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "smooth",
+ "lineWidth": 3,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 33
+ },
+ "id": 12,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "Select LastRun, OK,Errors\nfrom default.kuberhealthy\nwhere CheckName LIKE '%image-pull-check%' AND $__timeFilter(LastRun)",
+ "refId": "A"
+ }
+ ],
+ "title": "Image Pull Check",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 20,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "smooth",
+ "lineWidth": 5,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 41
+ },
+ "id": 13,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "Select LastRun, OK,Errors\nfrom default.kuberhealthy\nwhere CheckName LIKE '%resource-quota%' AND $__timeFilter(LastRun)",
+ "refId": "A"
+ }
+ ],
+ "title": "Resource-quata check",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 41
+ },
+ "id": 6,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "Select LastRun, OK,Errors\nfrom default.kuberhealthy\nwhere $__timeFilter(LastRun) AND OK='0'\nOrder BY LastRun DESC",
+ "refId": "A"
+ }
+ ],
+ "title": "Kuberhealthy Errors Over Time",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 49
+ },
+ "id": 3,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true,
+ "sortBy": []
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "SELECT * FROM \"default\".\"kuberhealthy\"\nWhere $__timeFilter(LastRun)\nOrder By LastRun DESC \n\n",
+ "refId": "A"
+ }
+ ],
+ "title": "Kuberhealthy Tables",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 57
+ },
+ "id": 16,
+ "options": {
+ "displayMode": "gradient",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "SELECT CheckName, count(OK) AS counts\nFROM default.kuberhealthy\nWHERE OK = '1'\nGROUP BY CheckName",
+ "refId": "A"
+ }
+ ],
+ "title": "Success Kuberhealthy Checks",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 11,
+ "x": 12,
+ "y": 57
+ },
+ "id": 2,
+ "options": {
+ "displayMode": "gradient",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "SELECT CheckName, count(OK) AS counts\nFROM default.kuberhealthy\nWHERE OK = '0'\nGROUP BY CheckName",
+ "refId": "A"
+ }
+ ],
+ "title": "Faliure Kuberhealthy Checks",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
+ },
+ {
+ "color": "red",
+ "value": 85
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 65
+ },
+ "id": 4,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "SELECT count(OK)\nFROM default.kuberhealthy\nWHERE OK = '1'",
+ "refId": "A"
+ }
+ ],
+ "title": "Total count Kuberhealthy Running",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
+ },
+ {
+ "color": "red",
+ "value": 85
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 65
+ },
+ "id": 5,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "editorType": "sql",
+ "format": 1,
+ "meta": {
+ "builderOptions": {
+ "columns": [],
+ "database": "",
+ "limit": 1000,
+ "mode": "list",
+ "queryType": "table",
+ "table": ""
+ }
+ },
+ "pluginVersion": "4.0.3",
+ "queryType": "table",
+ "rawSql": "SELECT count(OK)\nFROM default.kuberhealthy\nWHERE OK = '0'",
+ "refId": "A"
+ }
+ ],
+ "title": "Total Count Kuberhealthy Faliures",
+ "type": "gauge"
+ }
+ ],
+ "refresh": "",
+ "schemaVersion": 38,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-24h",
+ "to": "now"
+ },
+ "timepicker": {},
+ "timezone": "",
+ "title": "KuberHealth",
+ "uid": "d946c53c-8b1d-4e3c-9154-4219165342",
+ "version": 2,
+ "weekStart": ""
+}
\ No newline at end of file
diff --git a/grafana/kubvizDsahboard.json b/grafana/kubvizDsahboard.json
index 9cbff0a2..740b4baa 100644
--- a/grafana/kubvizDsahboard.json
+++ b/grafana/kubvizDsahboard.json
@@ -21,7 +21,7 @@
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
- "id": 1,
+ "id": 64,
"links": [],
"liveNow": false,
"panels": [
@@ -114,13 +114,7 @@
"color": {
"mode": "thresholds"
},
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -184,22 +178,319 @@
"type": "vertamedia-clickhouse-datasource",
"uid": "vertamedia-clickhouse-datasource"
},
- "description": "This panel displays the total count of outdated images from all cluster.",
+ "gridPos": {
+ "h": 9,
+ "w": 13,
+ "x": 0,
+ "y": 6
+ },
+ "id": 72,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "// Check if context.panel.data.series exists\nif (context.panel.data.series && context.panel.data.series.length > 0) {\n const reasons = context.panel.data.series[0].fields[0].values;\n const kinds = context.panel.data.series[0].fields[1].values;\n const eventTimes = context.panel.data.series[0].fields[2].values;\n\n // Create nodes and links\n const nodes = [];\n const links = [];\n\n reasons.forEach((reason, index) => {\n const sourceNode = {\n name: reason,\n category: 0, // Category for source nodes\n symbolSize: 60, // Size for source nodes\n };\n\n const kindNode = {\n name: kinds[index],\n category: 1, // Category for kind nodes\n symbolSize: 40, // Size for kind nodes\n };\n\n const eventTimeNode = {\n name: eventTimes[index],\n category: 2, // Category for eventTime nodes\n symbolSize: 20, // Size for eventTime nodes\n };\n\n // Ensure source, kind, and eventTime nodes are unique before adding them\n if (!nodes.some((node) => node.name === sourceNode.name)) {\n nodes.push(sourceNode);\n }\n\n if (!nodes.some((node) => node.name === kindNode.name)) {\n nodes.push(kindNode);\n }\n\n if (!nodes.some((node) => node.name === eventTimeNode.name)) {\n nodes.push(eventTimeNode);\n }\n\n // Create links between reason, kind, and eventTime nodes\n links.push({\n source: reason,\n target: kinds[index],\n });\n\n links.push({\n source: kinds[index],\n target: eventTimes[index],\n });\n });\n\n // Create categories for nodes\n const categories = [\n {\n name: 'Reasons',\n },\n {\n name: 'Nodes',\n },\n {\n name: 'Event Times',\n },\n ];\n\n // Create ECharts option\n const option = {\n tooltip: {\n trigger: 'item',\n formatter: '{b}',\n },\n legend: {\n x: 'left',\n data: ['Reasons', 'Nodes', 'Event Times'],\n },\n series: [\n {\n type: 'graph',\n layout: 'circular',\n roam: true,\n label: {\n show: true,\n textStyle: {\n color: '#000',\n },\n },\n force: {\n repulsion: 100,\n gravity: 0.1,\n edgeLength: 150,\n },\n data: nodes,\n links: links,\n draggable: true,\n categories: categories,\n edgeSymbol: [\"none\", \"arrow\"],\n edgeSymbolSize: [12, 12],\n lineStyle: {\n color: \"#000000\",\n curveness: 0,\n opacity: 0.3,\n },\n },\n ],\n };\n\n // Return the ECharts option\n return option;\n} else {\n // Display \"Data not available\" in the panel\n return {\n title: {\n text: 'Data not available',\n textStyle: {\n fontSize: 24,\n fontWeight: 'bold',\n },\n left: 'center',\n top: 'middle',\n },\n };\n}\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT Reason, Host, EventTime\nFROM default.events\nWHERE Kind IN ('Node') AND Reason IN ('NodeNotReady')",
+ "rawQuery": "SELECT Reason, Host, EventTime\nFROM default.events\nWHERE Kind IN ('Node') AND Reason IN ('NodeNotReady')",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "NodeNotReady Events for Nodes",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "gridPos": {
+ "h": 9,
+ "w": 11,
+ "x": 13,
+ "y": 6
+ },
+ "id": 70,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
+ },
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "editorMode": "code",
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let kind = [];\nlet resources = [];\n\ncontext.panel.data.series.forEach((s) => {\n const kindField = s.fields.find((f) => f.name === 'Kind');\n const resourcesField = s.fields.find((f) => f.name === 'Resources');\n if (kindField && resourcesField) {\n kind = kindField.values;\n resources = resourcesField.values;\n }\n});\n\n// Create an empty array to store doughnut chart data\nconst doughnutChartData = [];\n\n// Define colors for doughnut slices\nconst doughnutSliceColors = ['#235894', '#FFFF00', '#FF0000', '#00FF00', '#FFA500'];\n\n// Map kind and resources counts to doughnut chart data\nkind.forEach((kinddata, index) => {\n doughnutChartData.push({\n value: resources[index],\n name: kinddata,\n clusterName: context.panel.data.series[0].fields[0].values[index], // Extract cluster name\n itemStyle: {\n borderRadius: [10, 10, 10, 10], // Add rounded corners\n color: doughnutSliceColors[index % doughnutSliceColors.length],\n borderWidth: 2,\n borderColor: '#fff',\n },\n });\n});\n\nreturn {\n backgroundColor: '#FFFFFF', // Set the background color to white\n tooltip: {\n trigger: 'item',\n formatter: function (params) {\n return `Resource From
${params.data.clusterName} ${params.value}`;\n },\n },\n legend: {\n top: '5%',\n left: 'center',\n },\n series: [\n {\n name: '',\n type: 'pie',\n radius: ['40%', '70%'],\n avoidLabelOverlap: false,\n label: {\n show: false,\n position: 'center',\n },\n emphasis: {\n label: {\n show: true,\n fontSize: 40,\n fontWeight: 'bold',\n },\n },\n labelLine: {\n show: false,\n },\n data: doughnutChartData, // Use the modified doughnut chart data\n },\n ],\n};\n",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ },
+ "visualEditor": {
+ "code": "return {\n dataset: context.editor.dataset,\n series: context.editor.series,\n xAxis: {\n type: 'time',\n },\n yAxis: {\n type: 'value',\n min: 'dataMin',\n },\n}\n",
+ "dataset": [],
+ "series": []
+ }
+ },
+ "pluginVersion": "6.0.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Kind, count(Resource) AS Resources\nFROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime) AND Kind In ('Deployment','Job','Secret', 'ConfigMap', 'Service')\nGROUP BY ClusterName, Kind",
+ "rawQuery": "SELECT ClusterName, Kind, count(Resource) AS Resources\nFROM default.getall_resources\nWHERE EventTime >= toDateTime(1698841487) AND EventTime <= toDateTime(1698927887) AND Kind In ('Deployment','Job','Secret', 'ConfigMap', 'Service')\nGROUP BY ClusterName, Kind",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Resource Distribution",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
"fieldConfig": {
"defaults": {
"color": {
- "mode": "thresholds"
+ "mode": "palette-classic"
},
- "links": [
- {
- "targetBlank": true,
- "title": "Outdated Images",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 30,
+ "gradientMode": "hue",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
}
- ],
+ },
"mappings": [],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 15
+ },
+ "id": 69,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "time_series",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": " SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE $timeFilterByColumn(EventTime) AND Kind = 'Pod' AND Reason = 'Created'\n GROUP BY EventTime, Kind, Reason",
+ "rawQuery": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE EventTime >= toDateTime(1698839664) AND EventTime <= toDateTime(1698926064) AND Kind = 'Pod' AND Reason = 'Created'\n GROUP BY EventTime, Kind, Reason",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "time_series",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "hide": false,
+ "intervalFactor": 1,
+ "query": " SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE $timeFilterByColumn(EventTime) AND Kind = 'Pod' AND Reason = 'BackOff'\n GROUP BY EventTime, Kind, Reason",
+ "rawQuery": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE EventTime >= toDateTime(1698839664) AND EventTime <= toDateTime(1698926064) AND Kind = 'Pod' AND Reason = 'BackOff'\n GROUP BY EventTime, Kind, Reason",
+ "refId": "B",
+ "round": "0s",
+ "skip_comments": true
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "time_series",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "hide": false,
+ "intervalFactor": 1,
+ "query": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE $timeFilterByColumn(EventTime) AND Kind = 'Pod' AND Reason = 'NodeNotReady'\n GROUP BY EventTime, Kind, Reason",
+ "rawQuery": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE EventTime >= toDateTime(1698839664) AND EventTime <= toDateTime(1698926064) AND Kind = 'Pod' AND Reason = 'NodeNotReady'\n GROUP BY EventTime, Kind, Reason",
+ "refId": "C",
+ "round": "0s",
+ "skip_comments": true
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "time_series",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "hide": false,
+ "intervalFactor": 1,
+ "query": " SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE $timeFilterByColumn(EventTime) AND Kind = 'Pod' AND Reason = 'Scheduled'\n GROUP BY EventTime, Kind, Reason",
+ "rawQuery": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE EventTime >= toDateTime(1698839664) AND EventTime <= toDateTime(1698926064) AND Kind = 'Pod' AND Reason = 'Scheduled'\n GROUP BY EventTime, Kind, Reason",
+ "refId": "D",
+ "round": "0s",
+ "skip_comments": true
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "time_series",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "hide": false,
+ "intervalFactor": 1,
+ "query": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE $timeFilterByColumn(EventTime) AND Kind = 'Pod' AND Reason = 'FailedScheduling'\n GROUP BY EventTime, Kind, Reason",
+ "rawQuery": "SELECT EventTime, Kind, Reason, count(*) as total_count\n FROM default.events\n WHERE EventTime >= toDateTime(1698839664) AND EventTime <= toDateTime(1698926064) AND Kind = 'Pod' AND Reason = 'FailedScheduling'\n GROUP BY EventTime, Kind, Reason",
+ "refId": "E",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Pod Events by Reason Over Time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
"steps": [
{
"color": "green",
@@ -212,13 +503,98 @@
]
}
},
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Age"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "lcd",
+ "type": "gauge"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 15
+ },
+ "id": 71,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT ClusterName, Resource, Age\nFROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT ClusterName, Resource, Age\nFROM default.getall_resources\nWHERE EventTime >= toDateTime(1696336208) AND EventTime <= toDateTime(1698928208)",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Resource Details by Cluster and Age",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel displays the total count of outdated images from all cluster.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "links": [],
+ "mappings": [],
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
"overrides": []
},
"gridPos": {
"h": 4,
"w": 5,
"x": 0,
- "y": 6
+ "y": 22
},
"id": 18,
"options": {
@@ -245,8 +621,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.outdated_images\nWHERE VersionsBehind > 0",
- "rawQuery": "SELECT count(*) FROM default.outdated_images\nWHERE VersionsBehind > 0",
+ "query": "SELECT count(*)\n\nFROM default.outdated_images\n\nWHERE $timeFilterByColumn(EventTime) AND VersionsBehind > 0",
+ "rawQuery": "SELECT count(*)\n\nFROM default.outdated_images\n\nWHERE EventTime >= toDateTime(1695283225) AND EventTime <= toDateTime(1695369625) AND VersionsBehind > 0",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -266,20 +642,13 @@
"color": {
"mode": "thresholds"
},
- "links": [
- {
- "targetBlank": true,
- "title": "Kubedata",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1689917173495&to=1689918073495"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -294,7 +663,7 @@
"h": 4,
"w": 5,
"x": 5,
- "y": 6
+ "y": 22
},
"id": 20,
"options": {
@@ -342,20 +711,13 @@
"color": {
"mode": "thresholds"
},
- "links": [
- {
- "targetBlank": true,
- "title": "DeletedAPIs",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -370,7 +732,7 @@
"h": 4,
"w": 5,
"x": 10,
- "y": 6
+ "y": 22
},
"id": 22,
"options": {
@@ -397,8 +759,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.DeletedAPIs",
- "rawQuery": "SELECT count(*) FROM default.DeletedAPIs",
+ "query": "SELECT count(*) FROM default.DeletedAPIs\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT count(*) FROM default.DeletedAPIs\nWHERE EventTime >= toDateTime(1695367863) AND EventTime <= toDateTime(1695369663)",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -418,20 +780,13 @@
"color": {
"mode": "thresholds"
},
- "links": [
- {
- "targetBlank": true,
- "title": "DeprecatedAPIs",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -446,7 +801,7 @@
"h": 4,
"w": 5,
"x": 15,
- "y": 6
+ "y": 22
},
"id": 24,
"options": {
@@ -473,8 +828,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.DeprecatedAPIs",
- "rawQuery": "SELECT count(*) FROM default.DeprecatedAPIs",
+ "query": "SELECT count(*) FROM default.DeprecatedAPIs\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT count(*) FROM default.DeprecatedAPIs\nWHERE EventTime >= toDateTime(1695367882) AND EventTime <= toDateTime(1695369682)",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -494,20 +849,13 @@
"color": {
"mode": "thresholds"
},
- "links": [
- {
- "targetBlank": true,
- "title": "Kubernetes Resources",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -522,7 +870,7 @@
"h": 4,
"w": 4,
"x": 20,
- "y": 6
+ "y": 22
},
"id": 26,
"options": {
@@ -549,8 +897,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) FROM default.getall_resources",
- "rawQuery": "SELECT count(*) FROM default.getall_resources",
+ "query": "SELECT count(*) FROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT count(*) FROM default.getall_resources\nWHERE EventTime >= toDateTime(1695367909) AND EventTime <= toDateTime(1695369709)",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -564,7 +912,7 @@
"type": "grafana-clickhouse-datasource",
"uid": "ClickHouse"
},
- "description": "This panel provides a time-based analysis of the occurrences of 'Pod' and 'Node'",
+ "description": "This panel provides a time-based analysis of the occurrences of 'Pod'",
"fieldConfig": {
"defaults": {
"color": {
@@ -609,8 +957,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -623,9 +970,9 @@
},
"gridPos": {
"h": 6,
- "w": 24,
+ "w": 13,
"x": 0,
- "y": 10
+ "y": 26
},
"id": 53,
"options": {
@@ -658,14 +1005,95 @@
"rawSql": "SELECT EventTime, COUNT(*) AS Pods\nFROM default.events\nWHERE Kind = 'Pod'\nGROUP BY EventTime;\n",
"refId": "A",
"selectedFormat": 0
+ }
+ ],
+ "title": "Number of Pods over time",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "grafana-clickhouse-datasource",
+ "uid": "ClickHouse"
+ },
+ "description": "This panel provides a time-based analysis of the occurrences of 'Node'",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "fixedColor": "yellow",
+ "mode": "fixed"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "auto",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "off"
+ }
+ },
+ "mappings": [],
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
},
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 6,
+ "w": 11,
+ "x": 13,
+ "y": 26
+ },
+ "id": 65,
+ "options": {
+ "legend": {
+ "calcs": [],
+ "displayMode": "list",
+ "placement": "bottom",
+ "showLegend": true
+ },
+ "tooltip": {
+ "mode": "single",
+ "sort": "none"
+ }
+ },
+ "targets": [
{
"datasource": {
"type": "grafana-clickhouse-datasource",
"uid": "ClickHouse"
},
"format": 0,
- "hide": false,
"meta": {
"builderOptions": {
"fields": [],
@@ -675,11 +1103,11 @@
},
"queryType": "sql",
"rawSql": "SELECT EventTime, COUNT(*) AS Nodes\nFROM default.events\nWHERE Kind = 'Node'\nGROUP BY EventTime;\n",
- "refId": "B",
+ "refId": "A",
"selectedFormat": 0
}
],
- "title": "Number of Pods and Nodes over time",
+ "title": "Number of Nodes over Time",
"type": "timeseries"
},
{
@@ -694,20 +1122,13 @@
"fixedColor": "#249b6a",
"mode": "fixed"
},
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -722,7 +1143,7 @@
"h": 4,
"w": 6,
"x": 0,
- "y": 16
+ "y": 32
},
"id": 57,
"options": {
@@ -769,20 +1190,13 @@
"description": "This panel displays the total number of pods with Created state.",
"fieldConfig": {
"defaults": {
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "orange",
@@ -801,7 +1215,7 @@
"h": 4,
"w": 6,
"x": 6,
- "y": 16
+ "y": 32
},
"id": 63,
"options": {
@@ -846,20 +1260,13 @@
"description": "This panel displays the total number of pods with backOff state.",
"fieldConfig": {
"defaults": {
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "orange",
@@ -878,7 +1285,7 @@
"h": 4,
"w": 6,
"x": 12,
- "y": 16
+ "y": 32
},
"id": 61,
"options": {
@@ -923,20 +1330,13 @@
"description": "This panel displays the total number of pods with Unhealthy state.",
"fieldConfig": {
"defaults": {
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "orange",
@@ -955,7 +1355,7 @@
"h": 4,
"w": 6,
"x": 18,
- "y": 16
+ "y": 32
},
"id": 62,
"options": {
@@ -1004,20 +1404,13 @@
"fixedColor": "#249b6a",
"mode": "fixed"
},
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1032,7 +1425,7 @@
"h": 4,
"w": 6,
"x": 0,
- "y": 20
+ "y": 36
},
"id": 56,
"options": {
@@ -1079,20 +1472,13 @@
"description": "This panel displays the total number of nodes which is in not ready state",
"fieldConfig": {
"defaults": {
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "orange",
@@ -1111,7 +1497,7 @@
"h": 4,
"w": 6,
"x": 6,
- "y": 20
+ "y": 36
},
"id": 58,
"options": {
@@ -1156,20 +1542,13 @@
"description": "This panel displays the total number of nodes which is in ready state",
"fieldConfig": {
"defaults": {
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "orange",
@@ -1188,7 +1567,7 @@
"h": 4,
"w": 6,
"x": 12,
- "y": 20
+ "y": 36
},
"id": 59,
"options": {
@@ -1233,20 +1612,13 @@
"description": "This panel displays the total number of nodes which is in NodeHasNoDiskPressure state",
"fieldConfig": {
"defaults": {
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "percentage",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "orange",
@@ -1265,7 +1637,7 @@
"h": 4,
"w": 6,
"x": 18,
- "y": 20
+ "y": 36
},
"id": 60,
"options": {
@@ -1321,20 +1693,13 @@
"filterable": true,
"inspect": false
},
- "links": [
- {
- "targetBlank": true,
- "title": "Kubedata",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1689917173495&to=1689918073495"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "light-yellow",
@@ -1378,7 +1743,7 @@
"h": 8,
"w": 24,
"x": 0,
- "y": 24
+ "y": 40
},
"id": 44,
"options": {
@@ -1421,24 +1786,26 @@
"type": "vertamedia-clickhouse-datasource",
"uid": "vertamedia-clickhouse-datasource"
},
- "description": "This panel displays the total number of clusters containing activity uniquely.",
+ "description": "This panel displays the total number of clusters containing DeletedAPIs activity.",
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
- "links": [],
"mappings": [],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
},
{
"color": "red",
- "value": 80
+ "value": 85
}
]
}
@@ -1447,11 +1814,11 @@
},
"gridPos": {
"h": 5,
- "w": 24,
+ "w": 8,
"x": 0,
- "y": 32
+ "y": 48
},
- "id": 40,
+ "id": 66,
"options": {
"orientation": "auto",
"reduceOptions": {
@@ -1476,29 +1843,65 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT COUNT(DISTINCT ClusterName) AS DeletedAPIs\nFROM default.DeletedAPIs",
- "rawQuery": "SELECT COUNT(DISTINCT ClusterName) AS DeletedAPIs\nFROM default.DeletedAPIs",
+ "query": "SELECT COUNT(DISTINCT ClusterName) AS DeletedAPIs\nFROM default.DeletedAPIs\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT COUNT(DISTINCT ClusterName) AS DeletedAPIs\nFROM default.DeletedAPIs\nWHERE EventTime >= toDateTime(1695369491) AND EventTime <= toDateTime(1695369791)",
"refId": "A",
"round": "0s",
"skip_comments": true
+ }
+ ],
+ "title": "Number of Clusters Containing DeletedAPIs Activity",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel displays the total number of clusters containing Outdated Images activity.",
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
+ },
+ {
+ "color": "red",
+ "value": 85
+ }
+ ]
+ }
},
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT COUNT(DISTINCT ClusterName) AS DeprecatedAPIs\nFROM default.DeprecatedAPIs",
- "rawQuery": "SELECT COUNT(DISTINCT ClusterName) AS DeprecatedAPIs\nFROM default.DeprecatedAPIs",
- "refId": "B",
- "round": "0s",
- "skip_comments": true
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 8,
+ "y": 48
+ },
+ "id": 68,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
},
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
@@ -1508,14 +1911,66 @@
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
"intervalFactor": 1,
- "query": "SELECT count(DISTINCT(ClusterName)) AS Events\nFROM default.events",
- "rawQuery": "SELECT count(DISTINCT(ClusterName)) AS Events\nFROM default.events",
- "refId": "C",
+ "query": "SELECT count(DISTINCT(ClusterName)) AS OutdatedImages\nFROM default.outdated_images\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT count(DISTINCT(ClusterName)) AS OutdatedImages\nFROM default.outdated_images\nWHERE EventTime >= toDateTime(1695369518) AND EventTime <= toDateTime(1695369818)",
+ "refId": "A",
"round": "0s",
"skip_comments": true
+ }
+ ],
+ "title": "Number of Clusters Containing Outdated Images Activity",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel displays the total number of clusters containing DeprecatedAPIs activity.",
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
+ },
+ {
+ "color": "red",
+ "value": 85
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 8,
+ "x": 16,
+ "y": 48
+ },
+ "id": 67,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [
+ "lastNotNull"
+ ],
+ "fields": "",
+ "values": false
},
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
@@ -1525,16 +1980,15 @@
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
"intervalFactor": 1,
- "query": "SELECT count(DISTINCT(ClusterName)) AS OutdatedImages\nFROM default.outdated_images",
- "rawQuery": "SELECT count(DISTINCT(ClusterName)) AS OutdatedImages\nFROM default.outdated_images",
- "refId": "D",
+ "query": "SELECT COUNT(DISTINCT ClusterName) AS DeprecatedAPIs\nFROM default.DeprecatedAPIs\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT COUNT(DISTINCT ClusterName) AS DeprecatedAPIs\nFROM default.DeprecatedAPIs\nWHERE EventTime >= toDateTime(1695369535) AND EventTime <= toDateTime(1695369835)",
+ "refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Number of Clusters Containing Activity",
+ "title": "Number of Clusters Containing DeprecatedAPIs Activity",
"type": "gauge"
},
{
@@ -1556,13 +2010,7 @@
"filterable": true,
"inspect": false
},
- "links": [
- {
- "targetBlank": true,
- "title": "KubeData",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1690551079516&to=1690551979516"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -1600,7 +2048,7 @@
"h": 8,
"w": 24,
"x": 0,
- "y": 37
+ "y": 53
},
"id": 64,
"options": {
@@ -1627,8 +2075,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT ClusterName, Reason, count(Kind) AS Pods FROM default.events\nWHERE Kind IN 'Pod' \nGROUP BY ClusterName,Reason",
- "rawQuery": "SELECT ClusterName, Reason, count(Kind) AS Pods FROM default.events\nWHERE Kind IN 'Pod' \nGROUP BY ClusterName,Reason",
+ "query": "SELECT ClusterName, Reason, count(Kind) AS Pods FROM default.events\nWHERE $timeFilterByColumn(EventTime) AND Kind IN 'Pod' \nGROUP BY ClusterName,Reason",
+ "rawQuery": "SELECT ClusterName, Reason, count(Kind) AS Pods FROM default.events\nWHERE EventTime >= toDateTime(1695369559) AND EventTime <= toDateTime(1695369859) AND Kind IN 'Pod' \nGROUP BY ClusterName,Reason",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -1670,13 +2118,7 @@
"mode": "off"
}
},
- "links": [
- {
- "targetBlank": true,
- "title": "DeprecatedAPIs",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -1697,7 +2139,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 45
+ "y": 61
},
"id": 34,
"options": {
@@ -1732,8 +2174,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT ClusterName, count(Deprecated) AS DeprecatedAPIs FROM default.DeprecatedAPIs\nGROUP BY ClusterName",
- "rawQuery": "SELECT ClusterName, count(Deprecated) AS DeprecatedAPIs FROM default.DeprecatedAPIs\nGROUP BY ClusterName",
+ "query": "SELECT ClusterName, count(Deprecated) AS DeprecatedAPIs FROM default.DeprecatedAPIs\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY ClusterName",
+ "rawQuery": "SELECT ClusterName, count(Deprecated) AS DeprecatedAPIs FROM default.DeprecatedAPIs\nWHERE EventTime >= toDateTime(1695369580) AND EventTime <= toDateTime(1695369880)\nGROUP BY ClusterName",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -1775,13 +2217,7 @@
"mode": "off"
}
},
- "links": [
- {
- "targetBlank": true,
- "title": "DeletedAPIs",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -1802,7 +2238,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 45
+ "y": 61
},
"id": 36,
"options": {
@@ -1837,8 +2273,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT ClusterName, count(Deleted) AS DeletedAPIs FROM default.DeletedAPIs\nGROUP BY ClusterName",
- "rawQuery": "SELECT ClusterName, count(Deleted) AS DeletedAPIs FROM default.DeletedAPIs\nGROUP BY ClusterName",
+ "query": "SELECT ClusterName, count(Deleted) AS DeletedAPIs FROM default.DeletedAPIs\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY ClusterName",
+ "rawQuery": "SELECT ClusterName, count(Deleted) AS DeletedAPIs FROM default.DeletedAPIs\nWHERE EventTime >= toDateTime(1695369607) AND EventTime <= toDateTime(1695369907)\nGROUP BY ClusterName",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -1880,13 +2316,7 @@
"mode": "off"
}
},
- "links": [
- {
- "targetBlank": true,
- "title": "Outdated Images",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -1907,7 +2337,7 @@
"h": 8,
"w": 8,
"x": 0,
- "y": 54
+ "y": 70
},
"id": 28,
"options": {
@@ -1942,8 +2372,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT ClusterName, count(CurrentImage) AS Outdated_Images FROM default.outdated_images\nGROUP BY ClusterName",
- "rawQuery": "SELECT ClusterName, count(CurrentImage) AS Outdated_Images FROM default.outdated_images\nGROUP BY ClusterName",
+ "query": "SELECT ClusterName, count(CurrentImage) AS Outdated_Images FROM default.outdated_images\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY ClusterName",
+ "rawQuery": "SELECT ClusterName, count(CurrentImage) AS Outdated_Images FROM default.outdated_images\nWHERE EventTime >= toDateTime(1695369627) AND EventTime <= toDateTime(1695369927)\nGROUP BY ClusterName",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -1984,13 +2414,7 @@
"mode": "off"
}
},
- "links": [
- {
- "targetBlank": true,
- "title": "Kubedata",
- "url": "https://grafana.alpha.optimizor.app/d/Qq-FK1rVz/kubedata?orgId=1&from=1689917173495&to=1689918073495"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -2011,7 +2435,7 @@
"h": 8,
"w": 8,
"x": 8,
- "y": 54
+ "y": 70
},
"id": 32,
"options": {
@@ -2089,13 +2513,7 @@
"mode": "off"
}
},
- "links": [
- {
- "targetBlank": true,
- "title": "Kubernetes Resources",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -2116,7 +2534,7 @@
"h": 8,
"w": 8,
"x": 16,
- "y": 54
+ "y": 70
},
"id": 30,
"options": {
@@ -2151,8 +2569,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT ClusterName, count(Resource) AS Resources FROM default.getall_resources\nGROUP BY ClusterName",
- "rawQuery": "SELECT ClusterName, count(Resource) AS Resources FROM default.getall_resources\nGROUP BY ClusterName",
+ "query": "SELECT ClusterName, count(Resource) AS Resources FROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY ClusterName",
+ "rawQuery": "SELECT ClusterName, count(Resource) AS Resources FROM default.getall_resources\nWHERE EventTime >= toDateTime(1695369653) AND EventTime <= toDateTime(1695369953)\nGROUP BY ClusterName",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -2180,13 +2598,7 @@
"filterable": true,
"inspect": false
},
- "links": [
- {
- "targetBlank": true,
- "title": "Kubernetes Resources",
- "url": "https://grafana.alpha.optimizor.app/d/o2M7hbrVk/kubviz-features?orgId=1&from=1689896094681&to=1689917694681"
- }
- ],
+ "links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
@@ -2228,7 +2640,7 @@
"h": 7,
"w": 24,
"x": 0,
- "y": 62
+ "y": 78
},
"id": 42,
"options": {
@@ -2255,8 +2667,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT ClusterName, Kind, count(Resource) AS Resources FROM default.getall_resources\nGROUP BY ClusterName,Kind\nORDER BY Resources DESC",
- "rawQuery": "SELECT ClusterName, Kind, count(Resource) AS Resources FROM default.getall_resources\nGROUP BY ClusterName,Kind\nORDER BY Resources DESC",
+ "query": "SELECT ClusterName, Kind, count(Resource) AS Resources FROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY ClusterName,Kind\nORDER BY Resources DESC",
+ "rawQuery": "SELECT ClusterName, Kind, count(Resource) AS Resources FROM default.getall_resources\nWHERE EventTime >= toDateTime(1695369674) AND EventTime <= toDateTime(1695369974)\nGROUP BY ClusterName,Kind\nORDER BY Resources DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -2269,13 +2681,13 @@
"collapsed": true,
"datasource": {
"type": "vertamedia-clickhouse-datasource",
- "uid": "e06865c2-5bcc-4533-8de7-880298c555af"
+ "uid": "vertamedia-clickhouse-datasource"
},
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 69
+ "y": 85
},
"id": 16,
"panels": [
@@ -2318,7 +2730,7 @@
"h": 16,
"w": 24,
"x": 0,
- "y": 70
+ "y": 110
},
"id": 14,
"options": {
@@ -2345,8 +2757,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.getall_resources",
- "rawQuery": "SELECT * FROM default.getall_resources",
+ "query": "SELECT * FROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT * FROM default.getall_resources\nWHERE EventTime >= toDateTime(1695369699) AND EventTime <= toDateTime(1695369999)",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -2361,7 +2773,7 @@
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
- "uid": "e06865c2-5bcc-4533-8de7-880298c555af"
+ "uid": "vertamedia-clickhouse-datasource"
},
"refId": "A"
}
@@ -2373,13 +2785,13 @@
"collapsed": true,
"datasource": {
"type": "vertamedia-clickhouse-datasource",
- "uid": "e06865c2-5bcc-4533-8de7-880298c555af"
+ "uid": "vertamedia-clickhouse-datasource"
},
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 70
+ "y": 86
},
"id": 12,
"panels": [
@@ -2421,11 +2833,13 @@
"h": 16,
"w": 24,
"x": 0,
- "y": 218
+ "y": 127
},
"id": 10,
"options": {
+ "cellHeight": "sm",
"footer": {
+ "countRows": false,
"fields": "",
"reducer": [
"sum"
@@ -2434,7 +2848,7 @@
},
"showHeader": true
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -2446,8 +2860,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.outdated_images\nWHERE VersionsBehind > 0",
- "rawQuery": "SELECT * FROM default.outdated_images\nWHERE VersionsBehind > 0",
+ "query": "SELECT * FROM default.outdated_images\nWHERE $timeFilterByColumn(EventTime) AND VersionsBehind > 0",
+ "rawQuery": "SELECT * FROM default.outdated_images\nWHERE EventTime >= toDateTime(1695369722) AND EventTime <= toDateTime(1695370022) AND VersionsBehind > 0",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -2462,7 +2876,7 @@
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
- "uid": "e06865c2-5bcc-4533-8de7-880298c555af"
+ "uid": "vertamedia-clickhouse-datasource"
},
"refId": "A"
}
@@ -2474,13 +2888,13 @@
"collapsed": true,
"datasource": {
"type": "vertamedia-clickhouse-datasource",
- "uid": "e06865c2-5bcc-4533-8de7-880298c555af"
+ "uid": "vertamedia-clickhouse-datasource"
},
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 71
+ "y": 87
},
"id": 8,
"panels": [
@@ -2523,11 +2937,13 @@
"h": 11,
"w": 24,
"x": 0,
- "y": 219
+ "y": 112
},
"id": 6,
"options": {
+ "cellHeight": "sm",
"footer": {
+ "countRows": false,
"fields": "",
"reducer": [
"sum"
@@ -2536,7 +2952,7 @@
},
"showHeader": true
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -2548,8 +2964,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.DeletedAPIs",
- "rawQuery": "SELECT * FROM default.DeletedAPIs",
+ "query": "SELECT * FROM default.DeletedAPIs\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT * FROM default.DeletedAPIs\nWHERE EventTime >= toDateTime(1695369749) AND EventTime <= toDateTime(1695370049)",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -2564,7 +2980,7 @@
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
- "uid": "e06865c2-5bcc-4533-8de7-880298c555af"
+ "uid": "vertamedia-clickhouse-datasource"
},
"refId": "A"
}
@@ -2576,13 +2992,13 @@
"collapsed": true,
"datasource": {
"type": "vertamedia-clickhouse-datasource",
- "uid": "e06865c2-5bcc-4533-8de7-880298c555af"
+ "uid": "vertamedia-clickhouse-datasource"
},
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
- "y": 72
+ "y": 88
},
"id": 4,
"panels": [
@@ -2625,11 +3041,13 @@
"h": 8,
"w": 24,
"x": 0,
- "y": 220
+ "y": 113
},
"id": 2,
"options": {
+ "cellHeight": "sm",
"footer": {
+ "countRows": false,
"fields": "",
"reducer": [
"sum"
@@ -2638,7 +3056,7 @@
},
"showHeader": true
},
- "pluginVersion": "9.3.2",
+ "pluginVersion": "10.0.3",
"targets": [
{
"datasource": {
@@ -2650,8 +3068,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.DeprecatedAPIs",
- "rawQuery": "SELECT * FROM default.DeprecatedAPIs",
+ "query": "SELECT * FROM default.DeprecatedAPIs\nWHERE $timeFilterByColumn(EventTime)",
+ "rawQuery": "SELECT * FROM default.DeprecatedAPIs\nWHERE EventTime >= toDateTime(1695369773) AND EventTime <= toDateTime(1695370073)",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -2666,7 +3084,7 @@
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
- "uid": "e06865c2-5bcc-4533-8de7-880298c555af"
+ "uid": "vertamedia-clickhouse-datasource"
},
"refId": "A"
}
@@ -2683,7 +3101,7 @@
"list": []
},
"time": {
- "from": "now-30m",
+ "from": "now-24h",
"to": "now"
},
"timepicker": {},
diff --git a/grafana/kubvizFeatures-dashboard.json b/grafana/kubvizFeatures-dashboard.json
index 79287253..90582557 100644
--- a/grafana/kubvizFeatures-dashboard.json
+++ b/grafana/kubvizFeatures-dashboard.json
@@ -21,7 +21,7 @@
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
- "id": 5,
+ "id": 2,
"links": [],
"liveNow": false,
"panels": [
@@ -92,8 +92,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.getall_resources",
- "rawQuery": "SELECT * FROM default.getall_resources",
+ "query": "SELECT * FROM default.getall_resources\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.getall_resources\nWHERE EventTime >= toDateTime(1694219529) AND EventTime <= toDateTime(1694241129)\nORDER BY EventTime DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -169,8 +169,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.outdated_images\nWHERE VersionsBehind > 0",
- "rawQuery": "SELECT * FROM default.outdated_images\nWHERE VersionsBehind > 0",
+ "query": "SELECT * FROM default.outdated_images\nWHERE $timeFilterByColumn(EventTime) AND VersionsBehind > 0\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.outdated_images\nWHERE EventTime >= toDateTime(1694219614) AND EventTime <= toDateTime(1694241214) AND VersionsBehind > 0\nORDER BY EventTime DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -246,8 +246,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.DeletedAPIs",
- "rawQuery": "SELECT * FROM default.DeletedAPIs",
+ "query": "SELECT * FROM default.DeletedAPIs\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.DeletedAPIs\nWHERE EventTime >= toDateTime(1694219743) AND EventTime <= toDateTime(1694241343)\nORDER BY EventTime DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -323,8 +323,8 @@
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT * FROM default.DeprecatedAPIs",
- "rawQuery": "SELECT * FROM default.DeprecatedAPIs",
+ "query": "SELECT * FROM default.DeprecatedAPIs\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT * FROM default.DeprecatedAPIs\nWHERE EventTime >= toDateTime(1694219780) AND EventTime <= toDateTime(1694241380)\nORDER BY EventTime DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
@@ -342,7 +342,7 @@
"list": []
},
"time": {
- "from": "now-6h",
+ "from": "now-24h",
"to": "now"
},
"timepicker": {},
@@ -351,4 +351,4 @@
"uid": "o2M7hbrVk",
"version": 1,
"weekStart": ""
-}
+}
\ No newline at end of file
diff --git a/grafana/trivy-dashboard.json b/grafana/trivy-dashboard.json
index da050cba..7241ee58 100644
--- a/grafana/trivy-dashboard.json
+++ b/grafana/trivy-dashboard.json
@@ -21,20 +21,37 @@
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
- "id": 4,
+ "id": 71,
"links": [],
"liveNow": false,
"panels": [
+ {
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 49,
+ "title": "Image Vulnerability and SBOM",
+ "type": "row"
+ },
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "vertamedia-clickhouse-datasource"
},
- "description": "This panel illustrates the distribution of vulnerability severities across different clusters. It provides an overview of the count of vulnerabilities categorized by severity levels within each cluster.",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "continuous-GrYlRd"
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "inspect": false
},
"mappings": [],
"thresholds": {
@@ -44,34 +61,65 @@
"color": "green",
"value": null
},
+ {
+ "color": "semi-dark-yellow",
+ "value": 10
+ },
+ {
+ "color": "orange",
+ "value": 25
+ },
{
"color": "red",
- "value": 80
+ "value": 50
+ },
+ {
+ "color": "semi-dark-red",
+ "value": 100
+ },
+ {
+ "color": "dark-red",
+ "value": 1000
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Counts"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "lcd",
+ "type": "gauge"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
- "y": 0
+ "y": 1
},
- "id": 20,
+ "id": 47,
"options": {
- "displayMode": "gradient",
- "minVizHeight": 10,
- "minVizWidth": 0,
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showUnfilled": true,
- "valueMode": "color"
+ "showHeader": true
},
"pluginVersion": "10.0.3",
"targets": [
@@ -81,30 +129,37 @@
"uid": "vertamedia-clickhouse-datasource"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, vul_severity, count(*) \nFROM default.trivy_vul\nGROUP BY cluster_name, vul_severity",
- "rawQuery": "SELECT cluster_name, vul_severity, count(*) \nFROM default.trivy_vul\nGROUP BY cluster_name, vul_severity",
+ "query": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'LOW'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
+ "rawQuery": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162544) AND EventTime <= toDateTime(1713248944) AND vul_severity = 'LOW'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Vulnerability Severity counts grouped by Cluster",
- "type": "bargauge"
+ "title": "Highest Vulnerability Images with Low Severity",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "vertamedia-clickhouse-datasource"
},
- "description": "This panel illustrates the distribution of misconfiguration severities across different clusters. It provides an overview of the count of misconfigurations categorized by severity levels within each cluster. ",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "continuous-GrYlRd"
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "inspect": false
},
"mappings": [],
"thresholds": {
@@ -114,34 +169,65 @@
"color": "green",
"value": null
},
+ {
+ "color": "semi-dark-yellow",
+ "value": 10
+ },
+ {
+ "color": "orange",
+ "value": 25
+ },
{
"color": "red",
- "value": 80
+ "value": 50
+ },
+ {
+ "color": "semi-dark-red",
+ "value": 100
+ },
+ {
+ "color": "dark-red",
+ "value": 1000
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Counts"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "lcd",
+ "type": "gauge"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 0
+ "y": 1
},
- "id": 22,
+ "id": 48,
"options": {
- "displayMode": "gradient",
- "minVizHeight": 10,
- "minVizWidth": 0,
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showUnfilled": true,
- "valueMode": "color"
+ "showHeader": true
},
"pluginVersion": "10.0.3",
"targets": [
@@ -151,65 +237,105 @@
"uid": "vertamedia-clickhouse-datasource"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, misconfig_severity, count(*)\nFROM default.trivy_misconfig\nGROUP BY cluster_name, misconfig_severity",
- "rawQuery": "SELECT cluster_name, misconfig_severity, count(*)\nFROM default.trivy_misconfig\nGROUP BY cluster_name, misconfig_severity",
+ "query": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'HIGH'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
+ "rawQuery": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162563) AND EventTime <= toDateTime(1713248963) AND vul_severity = 'HIGH'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Misconfiguration Severity counts grouped by Cluster",
- "type": "bargauge"
+ "title": "Highest Vulnerability Images with High Severity",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "vertamedia-clickhouse-datasource"
},
- "description": "This panel displays the total of misconfigurations from each clusters.",
"fieldConfig": {
"defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "inspect": false
+ },
"mappings": [],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
+ {
+ "color": "semi-dark-yellow",
+ "value": 10
+ },
{
"color": "orange",
- "value": 70
+ "value": 25
},
{
"color": "red",
- "value": 85
+ "value": 50
+ },
+ {
+ "color": "semi-dark-red",
+ "value": 100
+ },
+ {
+ "color": "dark-red",
+ "value": 1000
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Counts"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "lcd",
+ "type": "gauge"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
- "h": 5,
+ "h": 8,
"w": 12,
"x": 0,
- "y": 8
+ "y": 9
},
- "id": 16,
+ "id": 45,
"options": {
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showThresholdLabels": false,
- "showThresholdMarkers": true
+ "showHeader": true
},
"pluginVersion": "10.0.3",
"targets": [
@@ -219,65 +345,105 @@
"uid": "vertamedia-clickhouse-datasource"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, count(*) FROM default.trivy_misconfig\nGROUP BY cluster_name",
- "rawQuery": "SELECT cluster_name, count(*) FROM default.trivy_misconfig\nGROUP BY cluster_name",
+ "query": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'MEDIUM'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
+ "rawQuery": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162505) AND EventTime <= toDateTime(1713248905) AND vul_severity = 'MEDIUM'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Misconfiguration Count by Cluster",
- "type": "gauge"
+ "title": "Highest Vulnerability Images with Medium Severity",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "vertamedia-clickhouse-datasource"
},
- "description": "This panel displays the total number Vulnerabilities under each namespace",
"fieldConfig": {
"defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "auto",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "inspect": false
+ },
"mappings": [],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
+ {
+ "color": "semi-dark-yellow",
+ "value": 10
+ },
{
"color": "orange",
- "value": 70
+ "value": 25
},
{
"color": "red",
- "value": 85
+ "value": 50
+ },
+ {
+ "color": "semi-dark-red",
+ "value": 100
+ },
+ {
+ "color": "dark-red",
+ "value": 1000
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "Counts"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "lcd",
+ "type": "gauge"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
- "h": 5,
+ "h": 8,
"w": 12,
"x": 12,
- "y": 8
+ "y": 9
},
- "id": 18,
+ "id": 46,
"options": {
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showThresholdLabels": false,
- "showThresholdMarkers": true
+ "showHeader": true
},
"pluginVersion": "10.0.3",
"targets": [
@@ -287,67 +453,86 @@
"uid": "vertamedia-clickhouse-datasource"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, count(*) FROM default.trivy_vul\nGROUP BY cluster_name",
- "rawQuery": "SELECT cluster_name, count(*) FROM default.trivy_vul\nGROUP BY cluster_name",
+ "query": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'CRITICAL'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
+ "rawQuery": "SELECT cluster_name, artifact_name, vul_pkg_name, count(vul_severity) AS Counts\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162524) AND EventTime <= toDateTime(1713248924) AND vul_severity = 'CRITICAL'\nGROUP BY cluster_name, artifact_name, vul_pkg_name\nHAVING count(vul_severity) > 1\nORDER BY count(vul_severity) DESC",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Vulnerability Count by Cluster",
- "type": "gauge"
+ "title": "Highest Vulnerability Images with Critical Severity",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "vertamedia-clickhouse-datasource"
},
- "description": "This panel provides a count of critical vulnerabilities categorized by namespace. It helps to monitor and prioritize critical security issues across different namespaces.",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "continuous-GrYlRd"
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "filterable": true,
+ "inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green",
+ "color": "light-blue",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "vul_severity"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "basic",
+ "type": "color-background"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
- "h": 7,
+ "h": 8,
"w": 12,
"x": 0,
- "y": 13
+ "y": 17
},
- "id": 12,
+ "id": 43,
"options": {
- "displayMode": "basic",
- "minVizHeight": 10,
- "minVizWidth": 0,
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showUnfilled": true,
- "valueMode": "color"
+ "showHeader": true
},
"pluginVersion": "10.0.3",
"targets": [
@@ -357,30 +542,38 @@
"uid": "vertamedia-clickhouse-datasource"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, namespace, count(vul_severity) AS Critical_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace\n",
- "rawQuery": "SELECT cluster_name, namespace, count(vul_severity) AS Critical_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace",
+ "query": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'LOW'",
+ "rawQuery": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162449) AND EventTime <= toDateTime(1713248849) AND vul_severity = 'LOW'",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Critical Vulnerability Count by Namespace and ClusterName",
- "type": "bargauge"
+ "title": "Low Vulnerability Images",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "vertamedia-clickhouse-datasource"
},
- "description": "This panel provides a count of critical misconfigurations categorized by namespace. It helps to monitor and prioritize critical security issues across different namespaces.",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "continuous-GrYlRd"
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "filterable": true,
+ "inspect": false
},
"mappings": [],
"thresholds": {
@@ -389,35 +582,46 @@
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "vul_severity"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "basic",
+ "type": "color-background"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
- "h": 7,
+ "h": 8,
"w": 12,
"x": 12,
- "y": 13
+ "y": 17
},
- "id": 14,
+ "id": 44,
"options": {
- "displayMode": "basic",
- "minVizHeight": 10,
- "minVizWidth": 0,
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showUnfilled": true,
- "valueMode": "color"
+ "showHeader": true
},
"pluginVersion": "10.0.3",
"targets": [
@@ -427,68 +631,86 @@
"uid": "vertamedia-clickhouse-datasource"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "interval": "",
"intervalFactor": 1,
- "query": "SELECT cluster_name, namespace, count(misconfig_severity) AS Critical_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace\n",
- "rawQuery": "SELECT cluster_name, namespace, count(misconfig_severity) AS Critical_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace",
+ "query": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'MEDIUM'",
+ "rawQuery": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162478) AND EventTime <= toDateTime(1713248878) AND vul_severity = 'MEDIUM'",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Critical Misconfiguration Count by Namespace and ClusterName",
- "type": "bargauge"
+ "title": "Medium Vulnerability Images",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "vertamedia-clickhouse-datasource"
},
- "description": "This panel displays the total count of Misconfiguration severity for each level",
"fieldConfig": {
"defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "filterable": true,
+ "inspect": false
+ },
"mappings": [],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
{
- "color": "green",
+ "color": "super-light-orange",
"value": null
- },
- {
- "color": "orange",
- "value": 70
- },
- {
- "color": "red",
- "value": 85
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "vul_severity"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "basic",
+ "type": "color-background"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
- "h": 6,
+ "h": 8,
"w": 12,
"x": 0,
- "y": 20
+ "y": 25
},
- "id": 8,
+ "id": 41,
"options": {
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": false
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showThresholdLabels": false,
- "showThresholdMarkers": true
+ "showHeader": true
},
"pluginVersion": "10.0.3",
"targets": [
@@ -498,118 +720,86 @@
"uid": "vertamedia-clickhouse-datasource"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) AS High_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'HIGH'",
- "rawQuery": "SELECT count(*) AS High_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'HIGH'",
+ "query": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'HIGH'",
+ "rawQuery": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162400) AND EventTime <= toDateTime(1713248800) AND vul_severity = 'HIGH'",
"refId": "A",
"round": "0s",
"skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Medium_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'MEDIUM'",
- "rawQuery": "SELECT count(*) AS Medium_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'MEDIUM'",
- "refId": "B",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
+ }
+ ],
+ "title": "High Vulnerability Images",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
},
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Low_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'LOW'",
- "rawQuery": "SELECT count(*) AS Low_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'LOW'",
- "refId": "C",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "auto"
+ },
+ "filterable": true,
+ "inspect": false
},
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Critical_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'CRITICAL'",
- "rawQuery": "SELECT count(*) AS Critical_Severity\nFROM default.trivy_misconfig\nWHERE misconfig_severity = 'CRITICAL'",
- "refId": "D",
- "round": "0s",
- "skip_comments": true
- }
- ],
- "title": "Count of Misconfiguration Severity Level",
- "type": "gauge"
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "description": "This panel displays the total count of Vulnerability severity for each level",
- "fieldConfig": {
- "defaults": {
"mappings": [],
"thresholds": {
- "mode": "percentage",
+ "mode": "absolute",
"steps": [
- {
- "color": "green",
- "value": null
- },
- {
- "color": "orange",
- "value": 70
- },
{
"color": "red",
- "value": 85
+ "value": null
}
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "vul_severity"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "basic",
+ "type": "color-background"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
- "h": 6,
+ "h": 8,
"w": 12,
"x": 12,
- "y": 20
+ "y": 25
},
- "id": 10,
+ "id": 42,
"options": {
- "orientation": "auto",
- "reduceOptions": {
- "calcs": [
- "lastNotNull"
- ],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": false
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showThresholdLabels": false,
- "showThresholdMarkers": true
+ "showHeader": true
},
"pluginVersion": "10.0.3",
"targets": [
@@ -619,81 +809,38 @@
"uid": "vertamedia-clickhouse-datasource"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT count(*) AS High_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'HIGH'",
- "rawQuery": "SELECT count(*) AS High_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'HIGH'",
+ "query": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime) AND vul_severity = 'CRITICAL'",
+ "rawQuery": "SELECT cluster_name, artifact_name AS image_name, vul_id, vul_pkg_name, vul_severity\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162429) AND EventTime <= toDateTime(1713248829) AND vul_severity = 'CRITICAL'",
"refId": "A",
"round": "0s",
"skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Medium_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'MEDIUM'",
- "rawQuery": "SELECT count(*) AS Medium_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'MEDIUM'",
- "refId": "B",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Low_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'LOW'",
- "rawQuery": "SELECT count(*) AS Low_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'LOW'",
- "refId": "C",
- "round": "0s",
- "skip_comments": true
- },
- {
- "datasource": {
- "type": "vertamedia-clickhouse-datasource",
- "uid": "vertamedia-clickhouse-datasource"
- },
- "dateTimeType": "DATETIME",
- "extrapolate": true,
- "format": "table",
- "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
- "hide": false,
- "intervalFactor": 1,
- "query": "SELECT count(*) AS Critical_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'CRITICAL'",
- "rawQuery": "SELECT count(*) AS Critical_Severity\nFROM default.trivy_vul\nWHERE vul_severity = 'CRITICAL'",
- "refId": "D",
- "round": "0s",
- "skip_comments": true
}
],
- "title": "Count of Vulnereability Severity level",
- "type": "gauge"
+ "title": "Critical Vulnerability Images",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "vertamedia-clickhouse-datasource"
},
- "description": "This panel displays the count of Misconfigurations in different clusters and namespaces.",
"fieldConfig": {
"defaults": {
"color": {
- "mode": "continuous-GrYlRd"
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
},
"mappings": [],
"thresholds": {
@@ -716,21 +863,20 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 26
+ "y": 33
},
- "id": 4,
+ "id": 39,
"options": {
- "displayMode": "lcd",
- "minVizHeight": 10,
- "minVizWidth": 0,
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showUnfilled": true,
- "valueMode": "color"
+ "showHeader": true
},
"pluginVersion": "10.0.3",
"targets": [
@@ -740,30 +886,35 @@
"uid": "vertamedia-clickhouse-datasource"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, namespace, count(*) AS Misconfigurations\nFROM default.trivy_misconfig\nGROUP BY cluster_name, namespace",
- "rawQuery": "SELECT cluster_name, namespace, count(*) AS Misconfigurations\nFROM default.trivy_misconfig\nGROUP BY cluster_name, namespace",
+ "query": "SELECT image_name, package_url, count(*) AS duplicates\nFROM default.trivysbom\nWHERE $timeFilterByColumn(event_time)\nGROUP BY image_name,package_url\nHAVING count(*) > 1\n",
+ "rawQuery": "SELECT image_name, package_url, count(*) AS duplicates\nFROM default.trivysbom\nWHERE event_time >= toDateTime(1713162318) AND event_time <= toDateTime(1713248718)\nGROUP BY image_name,package_url\nHAVING count(*) > 1",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Misconfiguration Count by Cluster and Namespace",
- "type": "bargauge"
+ "title": "duplicate package for sbom images",
+ "type": "table"
},
{
"datasource": {
"type": "vertamedia-clickhouse-datasource",
"uid": "vertamedia-clickhouse-datasource"
},
- "description": "This panel displays the count of vulnerabilities in different clusters and namespaces.",
"fieldConfig": {
"defaults": {
- "color": {
- "mode": "continuous-GrYlRd"
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
},
"mappings": [],
"thresholds": {
@@ -780,27 +931,42 @@
]
}
},
- "overrides": []
+ "overrides": [
+ {
+ "matcher": {
+ "id": "byName",
+ "options": "images"
+ },
+ "properties": [
+ {
+ "id": "custom.cellOptions",
+ "value": {
+ "mode": "lcd",
+ "type": "gauge"
+ }
+ }
+ ]
+ }
+ ]
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
- "y": 26
+ "y": 33
},
- "id": 6,
+ "id": 40,
"options": {
- "displayMode": "lcd",
- "minVizHeight": 10,
- "minVizWidth": 0,
- "orientation": "horizontal",
- "reduceOptions": {
- "calcs": [],
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
"fields": "",
- "values": true
+ "reducer": [
+ "sum"
+ ],
+ "show": false
},
- "showUnfilled": true,
- "valueMode": "color"
+ "showHeader": true
},
"pluginVersion": "10.0.3",
"targets": [
@@ -810,24 +976,25 @@
"uid": "vertamedia-clickhouse-datasource"
},
"dateTimeType": "DATETIME",
+ "editorMode": "builder",
"extrapolate": true,
"format": "table",
"formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
"intervalFactor": 1,
- "query": "SELECT cluster_name, namespace, count(*) AS Vulnerabilities\nFROM default.trivy_vul\nGROUP BY cluster_name, namespace",
- "rawQuery": "SELECT cluster_name, namespace, count(*) AS Vulnerabilities\nFROM default.trivy_vul\nGROUP BY cluster_name, namespace",
+ "query": "SELECT vul_id, count(artifact_name) AS images\nFROM default.trivyimage\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY vul_id",
+ "rawQuery": "SELECT vul_id, count(artifact_name) AS images\nFROM default.trivyimage\nWHERE EventTime >= toDateTime(1713162358) AND EventTime <= toDateTime(1713248758)\nGROUP BY vul_id",
"refId": "A",
"round": "0s",
"skip_comments": true
}
],
- "title": "Vulnerability Count by Cluster and Namespace",
- "type": "bargauge"
+ "title": "Count of images across Vulnerability Id",
+ "type": "table"
},
{
"datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
},
"fieldConfig": {
"defaults": {
@@ -843,16 +1010,13 @@
"inspect": false
},
"mappings": [],
+ "noValue": "Trivy Image not available",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
}
@@ -863,9 +1027,9 @@
"h": 8,
"w": 24,
"x": 0,
- "y": 34
+ "y": 41
},
- "id": 2,
+ "id": 34,
"options": {
"cellHeight": "sm",
"footer": {
@@ -881,51 +1045,30 @@
"pluginVersion": "10.0.3",
"targets": [
{
- "builderOptions": {
- "database": "default",
- "fields": [
- "cluster_name",
- "namespace",
- "kind",
- "name",
- "vul_id",
- "vul_vendor_ids",
- "vul_pkg_id",
- "vul_pkg_name",
- "vul_pkg_path",
- "vul_installed_version",
- "vul_fixed_version",
- "vul_title",
- "vul_severity",
- "vul_published_date",
- "vul_last_modified_date"
- ],
- "filters": [],
- "limit": null,
- "mode": "list",
- "orderBy": [],
- "table": "trivy_vul"
- },
"datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
},
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT \"cluster_name\", \"namespace\", \"kind\", \"name\", \"vul_id\", \"vul_vendor_ids\", \"vul_pkg_id\", \"vul_pkg_name\", \"vul_pkg_path\", \"vul_installed_version\", \"vul_fixed_version\", \"vul_title\", \"vul_severity\", \"vul_published_date\", \"vul_last_modified_date\" FROM \"default\".\"trivy_vul\"",
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT \"cluster_name\", \"EventTime\", \"artifact_name\", \"vul_id\", \"vul_pkg_id\", \"vul_pkg_name\", \"vul_installed_version\", \"vul_fixed_version\", \"vul_title\", \"vul_severity\", \"vul_published_date\", \"vul_last_modified_date\" \nFROM \"default\".\"trivyimage\"\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT \"cluster_name\", \"EventTime\", \"artifact_name\", \"vul_id\", \"vul_pkg_id\", \"vul_pkg_name\", \"vul_installed_version\", \"vul_fixed_version\", \"vul_title\", \"vul_severity\", \"vul_published_date\", \"vul_last_modified_date\" \nFROM \"default\".\"trivyimage\"\nWHERE EventTime >= toDateTime(1693581675) AND EventTime <= toDateTime(1694186475)\nORDER BY EventTime DESC",
"refId": "A",
- "selectedFormat": 1
+ "round": "0s",
+ "skip_comments": true
}
],
- "title": "Trivy Vulnerabilities",
+ "title": "Trivy Image",
"type": "table"
},
{
"datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
},
- "description": "",
"fieldConfig": {
"defaults": {
"color": {
@@ -940,16 +1083,13 @@
"inspect": false
},
"mappings": [],
+ "noValue": "Trivy SBOM not available",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
- },
- {
- "color": "red",
- "value": 80
}
]
}
@@ -957,12 +1097,12 @@
"overrides": []
},
"gridPos": {
- "h": 10,
+ "h": 8,
"w": 24,
"x": 0,
- "y": 42
+ "y": 49
},
- "id": 1,
+ "id": 35,
"options": {
"cellHeight": "sm",
"footer": {
@@ -978,256 +1118,1278 @@
"pluginVersion": "10.0.3",
"targets": [
{
- "builderOptions": {
- "database": "default",
- "fields": [
- "cluster_name",
- "namespace",
- "kind",
- "name",
- "misconfig_id",
- "misconfig_avdid",
- "misconfig_type",
- "misconfig_title",
- "misconfig_desc",
- "misconfig_msg",
- "misconfig_query",
- "misconfig_resolution",
- "misconfig_severity",
- "misconfig_status"
- ],
- "filters": [],
- "limit": null,
- "mode": "list",
- "orderBy": [],
- "table": "trivy_misconfig"
- },
"datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
},
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT \"cluster_name\", \"namespace\", \"kind\", \"name\", \"misconfig_id\", \"misconfig_avdid\", \"misconfig_type\", \"misconfig_title\", \"misconfig_desc\", \"misconfig_msg\", \"misconfig_query\", \"misconfig_resolution\", \"misconfig_severity\", \"misconfig_status\" FROM \"default\".\"trivy_misconfig\"",
+ "dateTimeType": "DATETIME",
+ "editorMode": "builder",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT * FROM default.trivysbom\nWHERE $timeFilterByColumn(event_time)",
+ "rawQuery": "SELECT * FROM default.trivysbom\nWHERE event_time >= toDateTime(1713162248) AND event_time <= toDateTime(1713248648)",
"refId": "A",
- "selectedFormat": 1
+ "round": "0s",
+ "skip_comments": true
}
],
- "title": "Trivy Misconfiguration",
+ "title": "Trivy_SBOM",
"type": "table"
},
{
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
+ "collapsed": true,
+ "gridPos": {
+ "h": 1,
+ "w": 24,
+ "x": 0,
+ "y": 57
},
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
+ "id": 38,
+ "panels": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
},
- "custom": {
- "align": "center",
- "cellOptions": {
- "type": "color-text"
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 97
+ },
+ "id": 36,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
},
- "filterable": true,
- "inspect": false
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let severity = [];\nlet counts = [];\n\ndata.series.map((s) => {\n severity = s.fields.find((f) => f.name === 'vul_severity').values;\n counts = s.fields.find((f) => f.name === 'total_count').values;\n});\n\n// Create an empty array to store pie chart data\nconst pieChartData = [];\n\n// Define colors for pie slices\nconst pieSliceColors = ['#235894', '#FF0000', '#00FF00', '#FFFF00', '#FFA500'];\n\n// Map severity and counts to pie chart data\nseverity.forEach((sev, index) => {\n pieChartData.push({\n value: counts[index],\n name: sev,\n itemStyle: {\n opacity: 0.7,\n color: pieSliceColors[index % pieSliceColors.length],\n borderWidth: 3,\n borderColor: '#FFFFFF', // Set the border color to white\n },\n });\n});\n\nreturn {\n backgroundColor: '#FFFFFF', // Set the background color to white\n tooltip: {},\n series: [\n {\n name: 'pie',\n type: 'pie',\n selectedMode: 'single',\n selectedOffset: 30,\n clockwise: true,\n label: {\n fontSize: 18,\n color: '#235894',\n },\n labelLine: {\n lineStyle: {\n color: '#235894',\n },\n },\n data: pieChartData, // Use the modified pie chart data\n itemStyle: {\n opacity: 0.7,\n borderWidth: 3,\n borderColor: '#FFFFFF', // Set the border color to white\n },\n },\n ],\n};",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ }
},
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
},
- {
- "color": "red",
- "value": 80
- }
- ]
- }
- },
- "overrides": []
- },
- "gridPos": {
- "h": 10,
- "w": 24,
- "x": 0,
- "y": 52
- },
- "id": 24,
- "options": {
- "cellHeight": "sm",
- "footer": {
- "countRows": false,
- "fields": "",
- "reducer": [
- "sum"
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT vul_severity, count(*) AS total_count\nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date)\nGROUP BY vul_severity",
+ "rawQuery": "SELECT vul_severity, count(*) AS total_count\nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694438766) AND vul_last_modified_date <= toDateTime(1694611566)\nGROUP BY vul_severity",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
],
- "show": false
+ "title": "Vulnerability Severity Distribution",
+ "type": "volkovlabs-echarts-panel"
},
- "showHeader": true
- },
- "pluginVersion": "10.0.3",
- "targets": [
{
- "builderOptions": {
- "0": "T",
- "1": "h",
- "2": "e",
- "3": " ",
- "4": "q",
- "5": "u",
- "6": "e",
- "7": "r",
- "8": "y",
- "9": " ",
- "10": "i",
- "11": "s",
- "12": " ",
- "13": "n",
- "14": "o",
- "15": "t",
- "16": " ",
- "17": "a",
- "18": " ",
- "19": "s",
- "20": "e",
- "21": "l",
- "22": "e",
- "23": "c",
- "24": "t",
- "25": " ",
- "26": "s",
- "27": "t",
- "28": "a",
- "29": "t",
- "30": "e",
- "31": "m",
- "32": "e",
- "33": "n",
- "34": "t",
- "35": ".",
- "database": "default",
- "fields": [
- "cluster_name",
- "artifact_name",
- "vul_id",
- "vul_pkg_id",
- "vul_pkg_name",
- "vul_installed_version",
- "vul_fixed_version",
- "vul_title",
- "vul_severity",
- "vul_published_date",
- "vul_last_modified_date"
- ],
- "filters": [],
- "limit": null,
- "mode": "list",
- "orderBy": [],
- "table": "trivyimage"
- },
"datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "format": 1,
- "meta": {
- "builderOptions": {
- "fields": [],
- "limit": 100,
- "mode": "list"
- }
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
},
- "queryType": "builder",
- "rawSql": "SELECT \"cluster_name\", \"artifact_name\", \"vul_id\", \"vul_pkg_id\", \"vul_pkg_name\", \"vul_installed_version\", \"vul_fixed_version\", \"vul_title\", \"vul_severity\", \"vul_published_date\", \"vul_last_modified_date\" FROM \"default\".\"trivyimage\"",
- "refId": "A",
- "selectedFormat": 1
- }
- ],
- "title": "Trivy Image",
- "type": "table"
- },
- {
- "datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
- },
- "fieldConfig": {
- "defaults": {
- "color": {
- "mode": "thresholds"
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 97
},
- "custom": {
- "align": "center",
- "cellOptions": {
- "type": "color-text"
+ "id": 37,
+ "options": {
+ "baidu": {
+ "callback": "bmapReady",
+ "key": ""
},
- "filterable": true,
- "inspect": false
- },
- "mappings": [],
- "thresholds": {
- "mode": "absolute",
- "steps": [
- {
- "color": "green",
- "value": null
+ "editor": {
+ "format": "auto",
+ "height": 600
+ },
+ "gaode": {
+ "key": "",
+ "plugin": "AMap.Scale,AMap.ToolBar"
+ },
+ "getOption": "let severity = [];\nlet counts = [];\n\ndata.series.map((s) => {\n severity = s.fields.find((f) => f.name === 'misconfig_severity').values;\n counts = s.fields.find((f) => f.name === 'total_count').values;\n});\n\n// Create an empty array to store pie chart data\nconst pieChartData = [];\n\n// Define colors for pie slices\nconst pieSliceColors = ['#235894', '#FF0000', '#00FF00', '#FFFF00', '#FFA500'];\n\n// Map severity and counts to pie chart data\nseverity.forEach((sev, index) => {\n pieChartData.push({\n value: counts[index],\n name: sev,\n itemStyle: {\n opacity: 0.7,\n color: pieSliceColors[index % pieSliceColors.length],\n borderWidth: 3,\n borderColor: '#FFFFFF', // Set the border color to white\n },\n });\n});\n\nreturn {\n backgroundColor: '#FFFFFF', // Set the background color to white\n tooltip: {},\n series: [\n {\n name: 'pie',\n type: 'pie',\n selectedMode: 'single',\n selectedOffset: 30,\n clockwise: true,\n label: {\n fontSize: 18,\n color: '#235894',\n },\n labelLine: {\n lineStyle: {\n color: '#235894',\n },\n },\n data: pieChartData, // Use the modified pie chart data\n itemStyle: {\n opacity: 0.7,\n borderWidth: 3,\n borderColor: '#FFFFFF', // Set the border color to white\n },\n },\n ],\n};",
+ "google": {
+ "callback": "gmapReady",
+ "key": ""
+ },
+ "map": "none",
+ "renderer": "canvas",
+ "themeEditor": {
+ "config": "{}",
+ "height": 400,
+ "name": "default"
+ }
+ },
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT misconfig_severity, count(*) AS total_count\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY misconfig_severity",
+ "rawQuery": "SELECT misconfig_severity, count(*) AS total_count\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694438912) AND EventTime <= toDateTime(1694611712)\nGROUP BY misconfig_severity",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Misconfiguration Severity Distribution",
+ "type": "volkovlabs-echarts-panel"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel illustrates the distribution of vulnerability severities across different clusters. It provides an overview of the count of vulnerabilities categorized by severity levels within each cluster.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
}
- ]
- }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 105
+ },
+ "id": 20,
+ "options": {
+ "displayMode": "gradient",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, vul_severity, count(*) \nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date)\nGROUP BY cluster_name, vul_severity",
+ "rawQuery": "SELECT cluster_name, vul_severity, count(*) \nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694155614) AND vul_last_modified_date <= toDateTime(1694242014)\nGROUP BY cluster_name, vul_severity",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Vulnerability Severity counts grouped by Cluster",
+ "type": "bargauge"
},
- "overrides": []
- },
- "gridPos": {
- "h": 8,
- "w": 24,
- "x": 0,
- "y": 62
- },
- "id": 25,
- "options": {
- "cellHeight": "sm",
- "footer": {
- "countRows": false,
- "fields": "",
- "reducer": [
- "sum"
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel illustrates the distribution of misconfiguration severities across different clusters. It provides an overview of the count of misconfigurations categorized by severity levels within each cluster. ",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 105
+ },
+ "id": 22,
+ "options": {
+ "displayMode": "gradient",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, misconfig_severity, count(*)\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY cluster_name, misconfig_severity",
+ "rawQuery": "SELECT cluster_name, misconfig_severity, count(*)\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156239) AND EventTime <= toDateTime(1694242639)\nGROUP BY cluster_name, misconfig_severity",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
],
- "show": false
+ "title": "Misconfiguration Severity counts grouped by Cluster",
+ "type": "bargauge"
},
- "showHeader": true
- },
- "pluginVersion": "10.0.3",
- "targets": [
{
- "builderOptions": {
- "database": "default",
- "fields": [
- "*"
- ],
- "filters": [],
- "limit": null,
- "mode": "list",
- "orderBy": [],
- "table": "trivysbom"
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
},
+ "description": "This panel displays the total number Vulnerabilities under each namespace",
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
+ },
+ {
+ "color": "red",
+ "value": 85
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 0,
+ "y": 113
+ },
+ "id": 18,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, count(*) FROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date)\nGROUP BY cluster_name",
+ "rawQuery": "SELECT cluster_name, count(*) FROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694155665) AND vul_last_modified_date <= toDateTime(1694242065)\nGROUP BY cluster_name",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Vulnerability Count by Cluster",
+ "type": "gauge"
+ },
+ {
"datasource": {
- "type": "grafana-clickhouse-datasource",
- "uid": "ClickHouse"
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
},
- "format": 1,
- "queryType": "builder",
- "rawSql": "SELECT * FROM \"default\".\"trivysbom\"",
- "refId": "A",
- "selectedFormat": 1
+ "description": "This panel displays the total of misconfigurations from each clusters.",
+ "fieldConfig": {
+ "defaults": {
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "percentage",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "orange",
+ "value": 70
+ },
+ {
+ "color": "red",
+ "value": 85
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 5,
+ "w": 12,
+ "x": 12,
+ "y": 113
+ },
+ "id": 16,
+ "options": {
+ "orientation": "auto",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showThresholdLabels": false,
+ "showThresholdMarkers": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, count(*) FROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY cluster_name",
+ "rawQuery": "SELECT cluster_name, count(*) FROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156266) AND EventTime <= toDateTime(1694242666)\nGROUP BY cluster_name",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Misconfiguration Count by Cluster",
+ "type": "gauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel provides a count of high vulnerabilities categorized by namespace. It helps to monitor and prioritize high security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 118
+ },
+ "id": 29,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(vul_severity) AS High_Severity\nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date) AND vul_severity = 'HIGH'\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(vul_severity) AS High_Severity\nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694155706) AND vul_last_modified_date <= toDateTime(1694242106) AND vul_severity = 'HIGH'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "High Vulnerability Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel provides a count of high misconfigurations categorized by namespace. It helps to monitor and prioritize high security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 118
+ },
+ "id": 30,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(misconfig_severity) AS High_Severity\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime) AND misconfig_severity = 'HIGH'\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(misconfig_severity) AS High_Severity\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156303) AND EventTime <= toDateTime(1694242703) AND misconfig_severity = 'HIGH'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "High Misconfiguration Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel provides a count of Low vulnerabilities categorized by namespace. It helps to monitor and prioritize Low security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 125
+ },
+ "id": 27,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(vul_severity) AS Low_Severity\nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date) AND vul_severity = 'LOW'\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(vul_severity) AS Low_Severity\nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694155745) AND vul_last_modified_date <= toDateTime(1694242145) AND vul_severity = 'LOW'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Low Vulnerability Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel provides a count of low misconfigurations categorized by namespace. It helps to monitor and prioritize low security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 125
+ },
+ "id": 28,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(misconfig_severity) AS Low_Severity\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime) AND misconfig_severity = 'LOW'\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(misconfig_severity) AS Low_Severity\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156331) AND EventTime <= toDateTime(1694242731) AND misconfig_severity = 'LOW'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Low Misconfiguration Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel provides a count of Medium vulnerabilities categorized by namespace. It helps to monitor and prioritize Medium security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 132
+ },
+ "id": 25,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(vul_severity) AS Medium_Severity\nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date) AND vul_severity = 'MEDIUM'\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(vul_severity) AS Medium_Severity\nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694155776) AND vul_last_modified_date <= toDateTime(1694242176) AND vul_severity = 'MEDIUM'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Medium Vulnerability Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel provides a count of medium misconfigurations categorized by namespace. It helps to monitor and prioritize medium security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 132
+ },
+ "id": 26,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(misconfig_severity) AS Medium_Severity\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime) AND misconfig_severity = 'MEDIUM'\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(misconfig_severity) AS Medium_Severity\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156357) AND EventTime <= toDateTime(1694242757) AND misconfig_severity = 'MEDIUM'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Medium Misconfiguration Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel provides a count of critical vulnerabilities categorized by namespace. It helps to monitor and prioritize critical security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 0,
+ "y": 139
+ },
+ "id": 12,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(vul_severity) AS Critical_Severity\nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date) AND vul_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace\n",
+ "rawQuery": "SELECT cluster_name, namespace, count(vul_severity) AS Critical_Severity\nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694156016) AND vul_last_modified_date <= toDateTime(1694242416) AND vul_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Critical Vulnerability Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel provides a count of critical misconfigurations categorized by namespace. It helps to monitor and prioritize critical security issues across different namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 7,
+ "w": 12,
+ "x": 12,
+ "y": 139
+ },
+ "id": 14,
+ "options": {
+ "displayMode": "basic",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "interval": "",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(misconfig_severity) AS Critical_Severity\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime) AND misconfig_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace\n",
+ "rawQuery": "SELECT cluster_name, namespace, count(misconfig_severity) AS Critical_Severity\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156383) AND EventTime <= toDateTime(1694242783) AND misconfig_severity = 'CRITICAL'\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Critical Misconfiguration Count by Namespace and ClusterName",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel displays the count of vulnerabilities in different clusters and namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 146
+ },
+ "id": 6,
+ "options": {
+ "displayMode": "lcd",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(*) AS Vulnerabilities\nFROM default.trivy_vul\nWHERE $timeFilterByColumn(vul_last_modified_date)\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(*) AS Vulnerabilities\nFROM default.trivy_vul\nWHERE vul_last_modified_date >= toDateTime(1694156175) AND vul_last_modified_date <= toDateTime(1694242575)\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Vulnerability Count by Cluster and Namespace",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "description": "This panel displays the count of Misconfigurations in different clusters and namespaces.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "continuous-GrYlRd"
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ },
+ {
+ "color": "red",
+ "value": 80
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 146
+ },
+ "id": 4,
+ "options": {
+ "displayMode": "lcd",
+ "minVizHeight": 10,
+ "minVizWidth": 0,
+ "orientation": "horizontal",
+ "reduceOptions": {
+ "calcs": [],
+ "fields": "",
+ "values": true
+ },
+ "showUnfilled": true,
+ "valueMode": "color"
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT cluster_name, namespace, count(*) AS Misconfigurations\nFROM default.trivy_misconfig\nWHERE $timeFilterByColumn(EventTime)\nGROUP BY cluster_name, namespace",
+ "rawQuery": "SELECT cluster_name, namespace, count(*) AS Misconfigurations\nFROM default.trivy_misconfig\nWHERE EventTime >= toDateTime(1694156545) AND EventTime <= toDateTime(1694242945)\nGROUP BY cluster_name, namespace",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Misconfiguration Count by Cluster and Namespace",
+ "type": "bargauge"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "noValue": "Trivy Vulnerabilities Not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 154
+ },
+ "id": 32,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT \"cluster_name\", \"namespace\", \"kind\", \"name\", \"vul_id\", \"vul_vendor_ids\", \"vul_pkg_id\", \"vul_pkg_name\", \"vul_pkg_path\", \"vul_installed_version\", \"vul_fixed_version\", \"vul_title\", \"vul_severity\", \"vul_published_date\", \"vul_last_modified_date\" \nFROM \"default\".\"trivy_vul\"\nWHERE $timeFilterByColumn(vul_last_modified_date)\nORDER BY vul_last_modified_date DESC",
+ "rawQuery": "SELECT \"cluster_name\", \"namespace\", \"kind\", \"name\", \"vul_id\", \"vul_vendor_ids\", \"vul_pkg_id\", \"vul_pkg_name\", \"vul_pkg_path\", \"vul_installed_version\", \"vul_fixed_version\", \"vul_title\", \"vul_severity\", \"vul_published_date\", \"vul_last_modified_date\" \nFROM \"default\".\"trivy_vul\"\nWHERE vul_last_modified_date >= toDateTime(1694099993) AND vul_last_modified_date <= toDateTime(1694186393)\nORDER BY vul_last_modified_date DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Trivy Vulnerabilities",
+ "type": "table"
+ },
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "thresholds"
+ },
+ "custom": {
+ "align": "center",
+ "cellOptions": {
+ "type": "color-text"
+ },
+ "filterable": true,
+ "inspect": false
+ },
+ "mappings": [],
+ "noValue": "Trivy Misconfigurations not available",
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green"
+ }
+ ]
+ }
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 24,
+ "x": 0,
+ "y": 162
+ },
+ "id": 33,
+ "options": {
+ "cellHeight": "sm",
+ "footer": {
+ "countRows": false,
+ "fields": "",
+ "reducer": [
+ "sum"
+ ],
+ "show": false
+ },
+ "showHeader": true
+ },
+ "pluginVersion": "10.0.3",
+ "targets": [
+ {
+ "datasource": {
+ "type": "vertamedia-clickhouse-datasource",
+ "uid": "vertamedia-clickhouse-datasource"
+ },
+ "dateTimeType": "DATETIME",
+ "extrapolate": true,
+ "format": "table",
+ "formattedQuery": "SELECT $timeSeries as t, count() FROM $table WHERE $timeFilter GROUP BY t ORDER BY t",
+ "intervalFactor": 1,
+ "query": "SELECT \"cluster_name\", \"EventTime\", \"namespace\", \"kind\", \"name\", \"misconfig_id\", \"misconfig_avdid\", \"misconfig_type\", \"misconfig_title\", \"misconfig_desc\", \"misconfig_msg\", \"misconfig_query\", \"misconfig_resolution\", \"misconfig_severity\", \"misconfig_status\" \nFROM \"default\".\"trivy_misconfig\"\nWHERE $timeFilterByColumn(EventTime)\nORDER BY EventTime DESC",
+ "rawQuery": "SELECT \"cluster_name\", \"EventTime\", \"namespace\", \"kind\", \"name\", \"misconfig_id\", \"misconfig_avdid\", \"misconfig_type\", \"misconfig_title\", \"misconfig_desc\", \"misconfig_msg\", \"misconfig_query\", \"misconfig_resolution\", \"misconfig_severity\", \"misconfig_status\" \nFROM \"default\".\"trivy_misconfig\"\nWHERE EventTime >= toDateTime(1694966455) AND EventTime <= toDateTime(1695052855)\nORDER BY EventTime DESC",
+ "refId": "A",
+ "round": "0s",
+ "skip_comments": true
+ }
+ ],
+ "title": "Trivy Misconfiguration",
+ "type": "table"
}
],
- "title": "Trivy_SBOM",
- "type": "table"
+ "title": "Trivy Vulnerability and Misconfiguration",
+ "type": "row"
}
],
"refresh": "",
@@ -1238,13 +2400,13 @@
"list": []
},
"time": {
- "from": "now-6h",
+ "from": "now-24h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Trivy",
- "uid": "f9b0a865-f419-410a-b7d9-9a3f79a70d47",
- "version": 1,
+ "uid": "f9b0a865-f419-410a-b7d9-9a3f79a70d48",
+ "version": 2,
"weekStart": ""
-}
+}
\ No newline at end of file
diff --git a/graphqlserver/gqlgen.yml b/graphqlserver/gqlgen.yml
new file mode 100644
index 00000000..fcbd0dc5
--- /dev/null
+++ b/graphqlserver/gqlgen.yml
@@ -0,0 +1,87 @@
+# Where are all the schema files located? globs are supported eg src/**/*.graphqls
+schema:
+ - graph/*.graphqls
+
+# Where should the generated server code go?
+exec:
+ filename: graph/generated.go
+ package: graph
+
+# Uncomment to enable federation
+# federation:
+# filename: graph/federation.go
+# package: graph
+
+# Where should any generated models go?
+model:
+ filename: graph/model/models_gen.go
+ package: model
+
+# Where should the resolver implementations go?
+resolver:
+ layout: follow-schema
+ dir: graph
+ package: graph
+ filename_template: "{name}.resolvers.go"
+ # Optional: turn on to not generate template comments above resolvers
+ # omit_template_comment: false
+
+# Optional: turn on use ` + "`" + `gqlgen:"fieldName"` + "`" + ` tags in your models
+# struct_tag: json
+
+# Optional: turn on to use []Thing instead of []*Thing
+# omit_slice_element_pointers: false
+
+# Optional: turn on to omit Is() methods to interface and unions
+# omit_interface_checks : true
+
+# Optional: turn on to skip generation of ComplexityRoot struct content and Complexity function
+# omit_complexity: false
+
+# Optional: turn on to not generate any file notice comments in generated files
+# omit_gqlgen_file_notice: false
+
+# Optional: turn on to exclude the gqlgen version in the generated file notice. No effect if `omit_gqlgen_file_notice` is true.
+# omit_gqlgen_version_in_file_notice: false
+
+# Optional: turn off to make struct-type struct fields not use pointers
+# e.g. type Thing struct { FieldA OtherThing } instead of { FieldA *OtherThing }
+# struct_fields_always_pointers: true
+
+# Optional: turn off to make resolvers return values instead of pointers for structs
+# resolvers_always_return_pointers: true
+
+# Optional: turn on to return pointers instead of values in unmarshalInput
+# return_pointers_in_unmarshalinput: false
+
+# Optional: wrap nullable input fields with Omittable
+# nullable_input_omittable: true
+
+# Optional: set to speed up generation time by not performing a final validation pass.
+# skip_validation: true
+
+# Optional: set to skip running `go mod tidy` when generating server code
+# skip_mod_tidy: true
+
+# gqlgen will search for any type names in the schema in these go packages
+# if they match it will use them, otherwise it will generate them.
+autobind:
+# - "github.com/intelops/kubviz/graphqlserver/graph/model"
+
+# This section declares type mapping between the GraphQL and go type systems
+#
+# The first line in each type will be used as defaults for resolver arguments and
+# modelgen, the others will be allowed when binding to fields. Configure them to
+# your liking
+models:
+ ID:
+ model:
+ - github.com/99designs/gqlgen/graphql.ID
+ - github.com/99designs/gqlgen/graphql.Int
+ - github.com/99designs/gqlgen/graphql.Int64
+ - github.com/99designs/gqlgen/graphql.Int32
+ Int:
+ model:
+ - github.com/99designs/gqlgen/graphql.Int
+ - github.com/99designs/gqlgen/graphql.Int64
+ - github.com/99designs/gqlgen/graphql.Int32
diff --git a/graphqlserver/graph/generated.go b/graphqlserver/graph/generated.go
new file mode 100644
index 00000000..7a1d76b0
--- /dev/null
+++ b/graphqlserver/graph/generated.go
@@ -0,0 +1,19952 @@
+// Code generated by github.com/99designs/gqlgen, DO NOT EDIT.
+
+package graph
+
+import (
+ "bytes"
+ "context"
+ "embed"
+ "errors"
+ "fmt"
+ "strconv"
+ "sync"
+ "sync/atomic"
+
+ "github.com/99designs/gqlgen/graphql"
+ "github.com/99designs/gqlgen/graphql/introspection"
+ "github.com/intelops/kubviz/graphqlserver/graph/model"
+ gqlparser "github.com/vektah/gqlparser/v2"
+ "github.com/vektah/gqlparser/v2/ast"
+)
+
+// region ************************** generated!.gotpl **************************
+
+// NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface.
+func NewExecutableSchema(cfg Config) graphql.ExecutableSchema {
+ return &executableSchema{
+ schema: cfg.Schema,
+ resolvers: cfg.Resolvers,
+ directives: cfg.Directives,
+ complexity: cfg.Complexity,
+ }
+}
+
+type Config struct {
+ Schema *ast.Schema
+ Resolvers ResolverRoot
+ Directives DirectiveRoot
+ Complexity ComplexityRoot
+}
+
+type ResolverRoot interface {
+ Query() QueryResolver
+}
+
+type DirectiveRoot struct {
+}
+
+type ComplexityRoot struct {
+ Cluster struct {
+ Name func(childComplexity int) int
+ }
+
+ ClusterAPIsCount struct {
+ ClusterName func(childComplexity int) int
+ Count func(childComplexity int) int
+ }
+
+ ClusterDeletedAPICount struct {
+ ClusterName func(childComplexity int) int
+ DeletedAPICount func(childComplexity int) int
+ }
+
+ ClusterDeprecatedAPICount struct {
+ ClusterName func(childComplexity int) int
+ DeprecatedAPICount func(childComplexity int) int
+ }
+
+ ClusterNamespaceMisconfigCount struct {
+ ClusterName func(childComplexity int) int
+ MisconfigCount func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ }
+
+ ClusterNamespaceOutdatedCount struct {
+ ClusterName func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ OutdatedCount func(childComplexity int) int
+ }
+
+ ClusterNamespaceResourceCount struct {
+ ClusterName func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ ResourceCount func(childComplexity int) int
+ }
+
+ ClusterNamespaceVulCount struct {
+ ClusterName func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ VulCount func(childComplexity int) int
+ }
+
+ DeletedAPI struct {
+ ClusterName func(childComplexity int) int
+ Deleted func(childComplexity int) int
+ EventTime func(childComplexity int) int
+ ExpiryDate func(childComplexity int) int
+ Group func(childComplexity int) int
+ Kind func(childComplexity int) int
+ Name func(childComplexity int) int
+ ObjectName func(childComplexity int) int
+ Scope func(childComplexity int) int
+ Version func(childComplexity int) int
+ }
+
+ DeprecatedAPI struct {
+ ClusterName func(childComplexity int) int
+ Deprecated func(childComplexity int) int
+ Description func(childComplexity int) int
+ EventTime func(childComplexity int) int
+ ExpiryDate func(childComplexity int) int
+ Kind func(childComplexity int) int
+ ObjectName func(childComplexity int) int
+ Scope func(childComplexity int) int
+ }
+
+ Event struct {
+ ClusterName func(childComplexity int) int
+ Event func(childComplexity int) int
+ EventTime func(childComplexity int) int
+ ExpiryDate func(childComplexity int) int
+ FirstTime func(childComplexity int) int
+ Host func(childComplexity int) int
+ ID func(childComplexity int) int
+ ImageName func(childComplexity int) int
+ Kind func(childComplexity int) int
+ LastTime func(childComplexity int) int
+ Message func(childComplexity int) int
+ Name func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ OpType func(childComplexity int) int
+ Reason func(childComplexity int) int
+ }
+
+ GetAllResource struct {
+ Age func(childComplexity int) int
+ ClusterName func(childComplexity int) int
+ EventTime func(childComplexity int) int
+ ExpiryDate func(childComplexity int) int
+ Kind func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ Resource func(childComplexity int) int
+ }
+
+ KubeScore struct {
+ APIVersion func(childComplexity int) int
+ ClusterName func(childComplexity int) int
+ Description func(childComplexity int) int
+ EventTime func(childComplexity int) int
+ FileName func(childComplexity int) int
+ FileRow func(childComplexity int) int
+ ID func(childComplexity int) int
+ Kind func(childComplexity int) int
+ Name func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ ObjectName func(childComplexity int) int
+ Path func(childComplexity int) int
+ Summary func(childComplexity int) int
+ TargetType func(childComplexity int) int
+ }
+
+ Kubescore struct {
+ APIVersion func(childComplexity int) int
+ ClusterName func(childComplexity int) int
+ Description func(childComplexity int) int
+ EventTime func(childComplexity int) int
+ FileName func(childComplexity int) int
+ FileRow func(childComplexity int) int
+ ID func(childComplexity int) int
+ Kind func(childComplexity int) int
+ Name func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ ObjectName func(childComplexity int) int
+ Path func(childComplexity int) int
+ Summary func(childComplexity int) int
+ TargetType func(childComplexity int) int
+ }
+
+ Misconfiguration struct {
+ ClusterName func(childComplexity int) int
+ EventTime func(childComplexity int) int
+ ExpiryDate func(childComplexity int) int
+ ExportedAt func(childComplexity int) int
+ ID func(childComplexity int) int
+ Kind func(childComplexity int) int
+ MisconfigAvdid func(childComplexity int) int
+ MisconfigDesc func(childComplexity int) int
+ MisconfigID func(childComplexity int) int
+ MisconfigMsg func(childComplexity int) int
+ MisconfigQuery func(childComplexity int) int
+ MisconfigResolution func(childComplexity int) int
+ MisconfigSeverity func(childComplexity int) int
+ MisconfigStatus func(childComplexity int) int
+ MisconfigTitle func(childComplexity int) int
+ MisconfigType func(childComplexity int) int
+ Name func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ }
+
+ Namespace struct {
+ Name func(childComplexity int) int
+ }
+
+ NamespaceData struct {
+ KubeScores func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ OutdatedImages func(childComplexity int) int
+ Resources func(childComplexity int) int
+ }
+
+ OutdatedImage struct {
+ ClusterName func(childComplexity int) int
+ CurrentImage func(childComplexity int) int
+ CurrentTag func(childComplexity int) int
+ EventTime func(childComplexity int) int
+ LatestVersion func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ Pod func(childComplexity int) int
+ VersionsBehind func(childComplexity int) int
+ }
+
+ Query struct {
+ AllClusterDeletedAPIsCounts func(childComplexity int) int
+ AllClusterDeprecatedAPIsCounts func(childComplexity int) int
+ AllClusterNamespaceOutdatedCounts func(childComplexity int) int
+ AllClusterNamespaceResourceCounts func(childComplexity int) int
+ AllDeletedAPIs func(childComplexity int) int
+ AllDeprecatedAPIs func(childComplexity int) int
+ AllEvents func(childComplexity int) int
+ AllGetAllResources func(childComplexity int) int
+ AllKubeScores func(childComplexity int) int
+ AllRakkess func(childComplexity int) int
+ AllTrivyImages func(childComplexity int) int
+ AllTrivyMisconfigs func(childComplexity int) int
+ AllTrivySBOMs func(childComplexity int) int
+ AllTrivyVuls func(childComplexity int) int
+ DeletedAPICount func(childComplexity int, clusterName string) int
+ DeletedAPIs func(childComplexity int, clusterName string) int
+ DeprecatedAPICount func(childComplexity int, clusterName string) int
+ DeprecatedAPIs func(childComplexity int, clusterName string) int
+ EventsByClusterAndNamespace func(childComplexity int, clusterName string, namespace string) int
+ GetAllResources func(childComplexity int, clusterName string, namespace string) int
+ Kubescores func(childComplexity int, clustername string, namespace string) int
+ Misconfigurations func(childComplexity int, clusterName string, namespace string) int
+ OutdatedImagesByClusterAndNamespace func(childComplexity int, clusterName string, namespace string) int
+ OutdatedImagesCount func(childComplexity int, clusterName string, namespace string) int
+ TrivyImageCount func(childComplexity int, clusterName string) int
+ TrivyImages func(childComplexity int, clusterName string) int
+ TrivyMisconfigCount func(childComplexity int, clusterName string, namespace string) int
+ TrivySBOMs func(childComplexity int, clusterName string) int
+ TrivyVulCount func(childComplexity int, clusterName string, namespace string) int
+ UniqueClusters func(childComplexity int) int
+ UniqueNamespaces func(childComplexity int, clusterName string) int
+ Vulnerabilities func(childComplexity int, clusterName string, namespace string) int
+ }
+
+ Rakkess struct {
+ ClusterName func(childComplexity int) int
+ Create func(childComplexity int) int
+ Delete func(childComplexity int) int
+ EventTime func(childComplexity int) int
+ ExpiryDate func(childComplexity int) int
+ List func(childComplexity int) int
+ Name func(childComplexity int) int
+ Update func(childComplexity int) int
+ }
+
+ Resource struct {
+ Age func(childComplexity int) int
+ ClusterName func(childComplexity int) int
+ EventTime func(childComplexity int) int
+ Kind func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ Resource func(childComplexity int) int
+ }
+
+ TrivyImage struct {
+ ArtifactName func(childComplexity int) int
+ ClusterName func(childComplexity int) int
+ ExpiryDate func(childComplexity int) int
+ ID func(childComplexity int) int
+ VulFixedVersion func(childComplexity int) int
+ VulID func(childComplexity int) int
+ VulInstalledVersion func(childComplexity int) int
+ VulLastModifiedDate func(childComplexity int) int
+ VulPkgID func(childComplexity int) int
+ VulPkgName func(childComplexity int) int
+ VulPublishedDate func(childComplexity int) int
+ VulSeverity func(childComplexity int) int
+ VulTitle func(childComplexity int) int
+ }
+
+ TrivyImageCount struct {
+ ClusterName func(childComplexity int) int
+ ImageCount func(childComplexity int) int
+ }
+
+ TrivyMisconfig struct {
+ ClusterName func(childComplexity int) int
+ EventTime func(childComplexity int) int
+ ExpiryDate func(childComplexity int) int
+ ID func(childComplexity int) int
+ Kind func(childComplexity int) int
+ MisconfigAvdid func(childComplexity int) int
+ MisconfigDesc func(childComplexity int) int
+ MisconfigID func(childComplexity int) int
+ MisconfigMsg func(childComplexity int) int
+ MisconfigQuery func(childComplexity int) int
+ MisconfigResolution func(childComplexity int) int
+ MisconfigSeverity func(childComplexity int) int
+ MisconfigStatus func(childComplexity int) int
+ MisconfigTitle func(childComplexity int) int
+ MisconfigType func(childComplexity int) int
+ Name func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ }
+
+ TrivySBOM struct {
+ BomFormat func(childComplexity int) int
+ BomRef func(childComplexity int) int
+ ClusterName func(childComplexity int) int
+ ExpiryDate func(childComplexity int) int
+ ID func(childComplexity int) int
+ ImageName func(childComplexity int) int
+ PackageName func(childComplexity int) int
+ PackageURL func(childComplexity int) int
+ SerialNumber func(childComplexity int) int
+ Version func(childComplexity int) int
+ }
+
+ TrivyVul struct {
+ ClusterName func(childComplexity int) int
+ ExpiryDate func(childComplexity int) int
+ ID func(childComplexity int) int
+ Kind func(childComplexity int) int
+ Name func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ VulFixedVersion func(childComplexity int) int
+ VulID func(childComplexity int) int
+ VulInstalledVersion func(childComplexity int) int
+ VulLastModifiedDate func(childComplexity int) int
+ VulPkgID func(childComplexity int) int
+ VulPkgName func(childComplexity int) int
+ VulPkgPath func(childComplexity int) int
+ VulPublishedDate func(childComplexity int) int
+ VulSeverity func(childComplexity int) int
+ VulTitle func(childComplexity int) int
+ VulVendorIds func(childComplexity int) int
+ }
+
+ Vulnerability struct {
+ ClusterName func(childComplexity int) int
+ ExpiryDate func(childComplexity int) int
+ ExportedAt func(childComplexity int) int
+ ID func(childComplexity int) int
+ Kind func(childComplexity int) int
+ Name func(childComplexity int) int
+ Namespace func(childComplexity int) int
+ VulFixedVersion func(childComplexity int) int
+ VulID func(childComplexity int) int
+ VulInstalledVersion func(childComplexity int) int
+ VulLastModifiedDate func(childComplexity int) int
+ VulPkgID func(childComplexity int) int
+ VulPkgName func(childComplexity int) int
+ VulPkgPath func(childComplexity int) int
+ VulPublishedDate func(childComplexity int) int
+ VulSeverity func(childComplexity int) int
+ VulTitle func(childComplexity int) int
+ VulVendorIds func(childComplexity int) int
+ }
+}
+
+type QueryResolver interface {
+ AllEvents(ctx context.Context) ([]*model.Event, error)
+ AllRakkess(ctx context.Context) ([]*model.Rakkess, error)
+ AllDeprecatedAPIs(ctx context.Context) ([]*model.DeprecatedAPI, error)
+ AllDeletedAPIs(ctx context.Context) ([]*model.DeletedAPI, error)
+ AllGetAllResources(ctx context.Context) ([]*model.GetAllResource, error)
+ AllTrivySBOMs(ctx context.Context) ([]*model.TrivySbom, error)
+ AllTrivyImages(ctx context.Context) ([]*model.TrivyImage, error)
+ AllKubeScores(ctx context.Context) ([]*model.Kubescore, error)
+ AllTrivyVuls(ctx context.Context) ([]*model.TrivyVul, error)
+ AllTrivyMisconfigs(ctx context.Context) ([]*model.TrivyMisconfig, error)
+ UniqueNamespaces(ctx context.Context, clusterName string) ([]*model.Namespace, error)
+ UniqueClusters(ctx context.Context) ([]*model.Cluster, error)
+ OutdatedImagesByClusterAndNamespace(ctx context.Context, clusterName string, namespace string) ([]*model.OutdatedImage, error)
+ OutdatedImagesCount(ctx context.Context, clusterName string, namespace string) (int, error)
+ AllClusterNamespaceOutdatedCounts(ctx context.Context) ([]*model.ClusterNamespaceOutdatedCount, error)
+ AllClusterDeprecatedAPIsCounts(ctx context.Context) ([]*model.ClusterAPIsCount, error)
+ AllClusterDeletedAPIsCounts(ctx context.Context) ([]*model.ClusterAPIsCount, error)
+ AllClusterNamespaceResourceCounts(ctx context.Context) ([]*model.ClusterNamespaceResourceCount, error)
+ EventsByClusterAndNamespace(ctx context.Context, clusterName string, namespace string) ([]*model.Event, error)
+ Vulnerabilities(ctx context.Context, clusterName string, namespace string) ([]*model.Vulnerability, error)
+ Misconfigurations(ctx context.Context, clusterName string, namespace string) ([]*model.Misconfiguration, error)
+ Kubescores(ctx context.Context, clustername string, namespace string) ([]*model.KubeScore, error)
+ GetAllResources(ctx context.Context, clusterName string, namespace string) ([]*model.GetAllResource, error)
+ TrivyImages(ctx context.Context, clusterName string) ([]*model.TrivyImage, error)
+ DeprecatedAPIs(ctx context.Context, clusterName string) ([]*model.DeprecatedAPI, error)
+ DeletedAPIs(ctx context.Context, clusterName string) ([]*model.DeletedAPI, error)
+ TrivySBOMs(ctx context.Context, clusterName string) ([]*model.TrivySbom, error)
+ TrivyVulCount(ctx context.Context, clusterName string, namespace string) (*model.ClusterNamespaceVulCount, error)
+ TrivyMisconfigCount(ctx context.Context, clusterName string, namespace string) (*model.ClusterNamespaceMisconfigCount, error)
+ DeletedAPICount(ctx context.Context, clusterName string) (*model.ClusterDeletedAPICount, error)
+ TrivyImageCount(ctx context.Context, clusterName string) (*model.TrivyImageCount, error)
+ DeprecatedAPICount(ctx context.Context, clusterName string) (*model.ClusterDeprecatedAPICount, error)
+}
+
+type executableSchema struct {
+ schema *ast.Schema
+ resolvers ResolverRoot
+ directives DirectiveRoot
+ complexity ComplexityRoot
+}
+
+func (e *executableSchema) Schema() *ast.Schema {
+ if e.schema != nil {
+ return e.schema
+ }
+ return parsedSchema
+}
+
+func (e *executableSchema) Complexity(typeName, field string, childComplexity int, rawArgs map[string]interface{}) (int, bool) {
+ ec := executionContext{nil, e, 0, 0, nil}
+ _ = ec
+ switch typeName + "." + field {
+
+ case "Cluster.name":
+ if e.complexity.Cluster.Name == nil {
+ break
+ }
+
+ return e.complexity.Cluster.Name(childComplexity), true
+
+ case "ClusterAPIsCount.clusterName":
+ if e.complexity.ClusterAPIsCount.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.ClusterAPIsCount.ClusterName(childComplexity), true
+
+ case "ClusterAPIsCount.count":
+ if e.complexity.ClusterAPIsCount.Count == nil {
+ break
+ }
+
+ return e.complexity.ClusterAPIsCount.Count(childComplexity), true
+
+ case "ClusterDeletedAPICount.clusterName":
+ if e.complexity.ClusterDeletedAPICount.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.ClusterDeletedAPICount.ClusterName(childComplexity), true
+
+ case "ClusterDeletedAPICount.deletedAPICount":
+ if e.complexity.ClusterDeletedAPICount.DeletedAPICount == nil {
+ break
+ }
+
+ return e.complexity.ClusterDeletedAPICount.DeletedAPICount(childComplexity), true
+
+ case "ClusterDeprecatedAPICount.clusterName":
+ if e.complexity.ClusterDeprecatedAPICount.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.ClusterDeprecatedAPICount.ClusterName(childComplexity), true
+
+ case "ClusterDeprecatedAPICount.deprecatedAPICount":
+ if e.complexity.ClusterDeprecatedAPICount.DeprecatedAPICount == nil {
+ break
+ }
+
+ return e.complexity.ClusterDeprecatedAPICount.DeprecatedAPICount(childComplexity), true
+
+ case "ClusterNamespaceMisconfigCount.clusterName":
+ if e.complexity.ClusterNamespaceMisconfigCount.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.ClusterNamespaceMisconfigCount.ClusterName(childComplexity), true
+
+ case "ClusterNamespaceMisconfigCount.misconfigCount":
+ if e.complexity.ClusterNamespaceMisconfigCount.MisconfigCount == nil {
+ break
+ }
+
+ return e.complexity.ClusterNamespaceMisconfigCount.MisconfigCount(childComplexity), true
+
+ case "ClusterNamespaceMisconfigCount.namespace":
+ if e.complexity.ClusterNamespaceMisconfigCount.Namespace == nil {
+ break
+ }
+
+ return e.complexity.ClusterNamespaceMisconfigCount.Namespace(childComplexity), true
+
+ case "ClusterNamespaceOutdatedCount.clusterName":
+ if e.complexity.ClusterNamespaceOutdatedCount.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.ClusterNamespaceOutdatedCount.ClusterName(childComplexity), true
+
+ case "ClusterNamespaceOutdatedCount.namespace":
+ if e.complexity.ClusterNamespaceOutdatedCount.Namespace == nil {
+ break
+ }
+
+ return e.complexity.ClusterNamespaceOutdatedCount.Namespace(childComplexity), true
+
+ case "ClusterNamespaceOutdatedCount.outdatedCount":
+ if e.complexity.ClusterNamespaceOutdatedCount.OutdatedCount == nil {
+ break
+ }
+
+ return e.complexity.ClusterNamespaceOutdatedCount.OutdatedCount(childComplexity), true
+
+ case "ClusterNamespaceResourceCount.clusterName":
+ if e.complexity.ClusterNamespaceResourceCount.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.ClusterNamespaceResourceCount.ClusterName(childComplexity), true
+
+ case "ClusterNamespaceResourceCount.namespace":
+ if e.complexity.ClusterNamespaceResourceCount.Namespace == nil {
+ break
+ }
+
+ return e.complexity.ClusterNamespaceResourceCount.Namespace(childComplexity), true
+
+ case "ClusterNamespaceResourceCount.resourceCount":
+ if e.complexity.ClusterNamespaceResourceCount.ResourceCount == nil {
+ break
+ }
+
+ return e.complexity.ClusterNamespaceResourceCount.ResourceCount(childComplexity), true
+
+ case "ClusterNamespaceVulCount.clusterName":
+ if e.complexity.ClusterNamespaceVulCount.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.ClusterNamespaceVulCount.ClusterName(childComplexity), true
+
+ case "ClusterNamespaceVulCount.namespace":
+ if e.complexity.ClusterNamespaceVulCount.Namespace == nil {
+ break
+ }
+
+ return e.complexity.ClusterNamespaceVulCount.Namespace(childComplexity), true
+
+ case "ClusterNamespaceVulCount.vulCount":
+ if e.complexity.ClusterNamespaceVulCount.VulCount == nil {
+ break
+ }
+
+ return e.complexity.ClusterNamespaceVulCount.VulCount(childComplexity), true
+
+ case "DeletedAPI.ClusterName":
+ if e.complexity.DeletedAPI.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.DeletedAPI.ClusterName(childComplexity), true
+
+ case "DeletedAPI.Deleted":
+ if e.complexity.DeletedAPI.Deleted == nil {
+ break
+ }
+
+ return e.complexity.DeletedAPI.Deleted(childComplexity), true
+
+ case "DeletedAPI.EventTime":
+ if e.complexity.DeletedAPI.EventTime == nil {
+ break
+ }
+
+ return e.complexity.DeletedAPI.EventTime(childComplexity), true
+
+ case "DeletedAPI.ExpiryDate":
+ if e.complexity.DeletedAPI.ExpiryDate == nil {
+ break
+ }
+
+ return e.complexity.DeletedAPI.ExpiryDate(childComplexity), true
+
+ case "DeletedAPI.Group":
+ if e.complexity.DeletedAPI.Group == nil {
+ break
+ }
+
+ return e.complexity.DeletedAPI.Group(childComplexity), true
+
+ case "DeletedAPI.Kind":
+ if e.complexity.DeletedAPI.Kind == nil {
+ break
+ }
+
+ return e.complexity.DeletedAPI.Kind(childComplexity), true
+
+ case "DeletedAPI.Name":
+ if e.complexity.DeletedAPI.Name == nil {
+ break
+ }
+
+ return e.complexity.DeletedAPI.Name(childComplexity), true
+
+ case "DeletedAPI.ObjectName":
+ if e.complexity.DeletedAPI.ObjectName == nil {
+ break
+ }
+
+ return e.complexity.DeletedAPI.ObjectName(childComplexity), true
+
+ case "DeletedAPI.Scope":
+ if e.complexity.DeletedAPI.Scope == nil {
+ break
+ }
+
+ return e.complexity.DeletedAPI.Scope(childComplexity), true
+
+ case "DeletedAPI.Version":
+ if e.complexity.DeletedAPI.Version == nil {
+ break
+ }
+
+ return e.complexity.DeletedAPI.Version(childComplexity), true
+
+ case "DeprecatedAPI.ClusterName":
+ if e.complexity.DeprecatedAPI.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.DeprecatedAPI.ClusterName(childComplexity), true
+
+ case "DeprecatedAPI.Deprecated":
+ if e.complexity.DeprecatedAPI.Deprecated == nil {
+ break
+ }
+
+ return e.complexity.DeprecatedAPI.Deprecated(childComplexity), true
+
+ case "DeprecatedAPI.Description":
+ if e.complexity.DeprecatedAPI.Description == nil {
+ break
+ }
+
+ return e.complexity.DeprecatedAPI.Description(childComplexity), true
+
+ case "DeprecatedAPI.EventTime":
+ if e.complexity.DeprecatedAPI.EventTime == nil {
+ break
+ }
+
+ return e.complexity.DeprecatedAPI.EventTime(childComplexity), true
+
+ case "DeprecatedAPI.ExpiryDate":
+ if e.complexity.DeprecatedAPI.ExpiryDate == nil {
+ break
+ }
+
+ return e.complexity.DeprecatedAPI.ExpiryDate(childComplexity), true
+
+ case "DeprecatedAPI.Kind":
+ if e.complexity.DeprecatedAPI.Kind == nil {
+ break
+ }
+
+ return e.complexity.DeprecatedAPI.Kind(childComplexity), true
+
+ case "DeprecatedAPI.ObjectName":
+ if e.complexity.DeprecatedAPI.ObjectName == nil {
+ break
+ }
+
+ return e.complexity.DeprecatedAPI.ObjectName(childComplexity), true
+
+ case "DeprecatedAPI.Scope":
+ if e.complexity.DeprecatedAPI.Scope == nil {
+ break
+ }
+
+ return e.complexity.DeprecatedAPI.Scope(childComplexity), true
+
+ case "Event.ClusterName":
+ if e.complexity.Event.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.Event.ClusterName(childComplexity), true
+
+ case "Event.Event":
+ if e.complexity.Event.Event == nil {
+ break
+ }
+
+ return e.complexity.Event.Event(childComplexity), true
+
+ case "Event.EventTime":
+ if e.complexity.Event.EventTime == nil {
+ break
+ }
+
+ return e.complexity.Event.EventTime(childComplexity), true
+
+ case "Event.ExpiryDate":
+ if e.complexity.Event.ExpiryDate == nil {
+ break
+ }
+
+ return e.complexity.Event.ExpiryDate(childComplexity), true
+
+ case "Event.FirstTime":
+ if e.complexity.Event.FirstTime == nil {
+ break
+ }
+
+ return e.complexity.Event.FirstTime(childComplexity), true
+
+ case "Event.Host":
+ if e.complexity.Event.Host == nil {
+ break
+ }
+
+ return e.complexity.Event.Host(childComplexity), true
+
+ case "Event.Id":
+ if e.complexity.Event.ID == nil {
+ break
+ }
+
+ return e.complexity.Event.ID(childComplexity), true
+
+ case "Event.ImageName":
+ if e.complexity.Event.ImageName == nil {
+ break
+ }
+
+ return e.complexity.Event.ImageName(childComplexity), true
+
+ case "Event.Kind":
+ if e.complexity.Event.Kind == nil {
+ break
+ }
+
+ return e.complexity.Event.Kind(childComplexity), true
+
+ case "Event.LastTime":
+ if e.complexity.Event.LastTime == nil {
+ break
+ }
+
+ return e.complexity.Event.LastTime(childComplexity), true
+
+ case "Event.Message":
+ if e.complexity.Event.Message == nil {
+ break
+ }
+
+ return e.complexity.Event.Message(childComplexity), true
+
+ case "Event.Name":
+ if e.complexity.Event.Name == nil {
+ break
+ }
+
+ return e.complexity.Event.Name(childComplexity), true
+
+ case "Event.Namespace":
+ if e.complexity.Event.Namespace == nil {
+ break
+ }
+
+ return e.complexity.Event.Namespace(childComplexity), true
+
+ case "Event.OpType":
+ if e.complexity.Event.OpType == nil {
+ break
+ }
+
+ return e.complexity.Event.OpType(childComplexity), true
+
+ case "Event.Reason":
+ if e.complexity.Event.Reason == nil {
+ break
+ }
+
+ return e.complexity.Event.Reason(childComplexity), true
+
+ case "GetAllResource.Age":
+ if e.complexity.GetAllResource.Age == nil {
+ break
+ }
+
+ return e.complexity.GetAllResource.Age(childComplexity), true
+
+ case "GetAllResource.ClusterName":
+ if e.complexity.GetAllResource.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.GetAllResource.ClusterName(childComplexity), true
+
+ case "GetAllResource.EventTime":
+ if e.complexity.GetAllResource.EventTime == nil {
+ break
+ }
+
+ return e.complexity.GetAllResource.EventTime(childComplexity), true
+
+ case "GetAllResource.ExpiryDate":
+ if e.complexity.GetAllResource.ExpiryDate == nil {
+ break
+ }
+
+ return e.complexity.GetAllResource.ExpiryDate(childComplexity), true
+
+ case "GetAllResource.Kind":
+ if e.complexity.GetAllResource.Kind == nil {
+ break
+ }
+
+ return e.complexity.GetAllResource.Kind(childComplexity), true
+
+ case "GetAllResource.Namespace":
+ if e.complexity.GetAllResource.Namespace == nil {
+ break
+ }
+
+ return e.complexity.GetAllResource.Namespace(childComplexity), true
+
+ case "GetAllResource.Resource":
+ if e.complexity.GetAllResource.Resource == nil {
+ break
+ }
+
+ return e.complexity.GetAllResource.Resource(childComplexity), true
+
+ case "KubeScore.apiVersion":
+ if e.complexity.KubeScore.APIVersion == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.APIVersion(childComplexity), true
+
+ case "KubeScore.clusterName":
+ if e.complexity.KubeScore.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.ClusterName(childComplexity), true
+
+ case "KubeScore.description":
+ if e.complexity.KubeScore.Description == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.Description(childComplexity), true
+
+ case "KubeScore.eventTime":
+ if e.complexity.KubeScore.EventTime == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.EventTime(childComplexity), true
+
+ case "KubeScore.fileName":
+ if e.complexity.KubeScore.FileName == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.FileName(childComplexity), true
+
+ case "KubeScore.fileRow":
+ if e.complexity.KubeScore.FileRow == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.FileRow(childComplexity), true
+
+ case "KubeScore.id":
+ if e.complexity.KubeScore.ID == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.ID(childComplexity), true
+
+ case "KubeScore.kind":
+ if e.complexity.KubeScore.Kind == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.Kind(childComplexity), true
+
+ case "KubeScore.name":
+ if e.complexity.KubeScore.Name == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.Name(childComplexity), true
+
+ case "KubeScore.namespace":
+ if e.complexity.KubeScore.Namespace == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.Namespace(childComplexity), true
+
+ case "KubeScore.objectName":
+ if e.complexity.KubeScore.ObjectName == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.ObjectName(childComplexity), true
+
+ case "KubeScore.path":
+ if e.complexity.KubeScore.Path == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.Path(childComplexity), true
+
+ case "KubeScore.summary":
+ if e.complexity.KubeScore.Summary == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.Summary(childComplexity), true
+
+ case "KubeScore.targetType":
+ if e.complexity.KubeScore.TargetType == nil {
+ break
+ }
+
+ return e.complexity.KubeScore.TargetType(childComplexity), true
+
+ case "Kubescore.apiVersion":
+ if e.complexity.Kubescore.APIVersion == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.APIVersion(childComplexity), true
+
+ case "Kubescore.clusterName":
+ if e.complexity.Kubescore.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.ClusterName(childComplexity), true
+
+ case "Kubescore.description":
+ if e.complexity.Kubescore.Description == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.Description(childComplexity), true
+
+ case "Kubescore.eventTime":
+ if e.complexity.Kubescore.EventTime == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.EventTime(childComplexity), true
+
+ case "Kubescore.fileName":
+ if e.complexity.Kubescore.FileName == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.FileName(childComplexity), true
+
+ case "Kubescore.fileRow":
+ if e.complexity.Kubescore.FileRow == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.FileRow(childComplexity), true
+
+ case "Kubescore.id":
+ if e.complexity.Kubescore.ID == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.ID(childComplexity), true
+
+ case "Kubescore.kind":
+ if e.complexity.Kubescore.Kind == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.Kind(childComplexity), true
+
+ case "Kubescore.name":
+ if e.complexity.Kubescore.Name == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.Name(childComplexity), true
+
+ case "Kubescore.namespace":
+ if e.complexity.Kubescore.Namespace == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.Namespace(childComplexity), true
+
+ case "Kubescore.objectName":
+ if e.complexity.Kubescore.ObjectName == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.ObjectName(childComplexity), true
+
+ case "Kubescore.path":
+ if e.complexity.Kubescore.Path == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.Path(childComplexity), true
+
+ case "Kubescore.summary":
+ if e.complexity.Kubescore.Summary == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.Summary(childComplexity), true
+
+ case "Kubescore.targetType":
+ if e.complexity.Kubescore.TargetType == nil {
+ break
+ }
+
+ return e.complexity.Kubescore.TargetType(childComplexity), true
+
+ case "Misconfiguration.clusterName":
+ if e.complexity.Misconfiguration.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.ClusterName(childComplexity), true
+
+ case "Misconfiguration.eventTime":
+ if e.complexity.Misconfiguration.EventTime == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.EventTime(childComplexity), true
+
+ case "Misconfiguration.expiryDate":
+ if e.complexity.Misconfiguration.ExpiryDate == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.ExpiryDate(childComplexity), true
+
+ case "Misconfiguration.exportedAt":
+ if e.complexity.Misconfiguration.ExportedAt == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.ExportedAt(childComplexity), true
+
+ case "Misconfiguration.id":
+ if e.complexity.Misconfiguration.ID == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.ID(childComplexity), true
+
+ case "Misconfiguration.kind":
+ if e.complexity.Misconfiguration.Kind == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.Kind(childComplexity), true
+
+ case "Misconfiguration.misconfigAvdid":
+ if e.complexity.Misconfiguration.MisconfigAvdid == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.MisconfigAvdid(childComplexity), true
+
+ case "Misconfiguration.misconfigDesc":
+ if e.complexity.Misconfiguration.MisconfigDesc == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.MisconfigDesc(childComplexity), true
+
+ case "Misconfiguration.misconfigId":
+ if e.complexity.Misconfiguration.MisconfigID == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.MisconfigID(childComplexity), true
+
+ case "Misconfiguration.misconfigMsg":
+ if e.complexity.Misconfiguration.MisconfigMsg == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.MisconfigMsg(childComplexity), true
+
+ case "Misconfiguration.misconfigQuery":
+ if e.complexity.Misconfiguration.MisconfigQuery == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.MisconfigQuery(childComplexity), true
+
+ case "Misconfiguration.misconfigResolution":
+ if e.complexity.Misconfiguration.MisconfigResolution == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.MisconfigResolution(childComplexity), true
+
+ case "Misconfiguration.misconfigSeverity":
+ if e.complexity.Misconfiguration.MisconfigSeverity == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.MisconfigSeverity(childComplexity), true
+
+ case "Misconfiguration.misconfigStatus":
+ if e.complexity.Misconfiguration.MisconfigStatus == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.MisconfigStatus(childComplexity), true
+
+ case "Misconfiguration.misconfigTitle":
+ if e.complexity.Misconfiguration.MisconfigTitle == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.MisconfigTitle(childComplexity), true
+
+ case "Misconfiguration.misconfigType":
+ if e.complexity.Misconfiguration.MisconfigType == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.MisconfigType(childComplexity), true
+
+ case "Misconfiguration.name":
+ if e.complexity.Misconfiguration.Name == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.Name(childComplexity), true
+
+ case "Misconfiguration.namespace":
+ if e.complexity.Misconfiguration.Namespace == nil {
+ break
+ }
+
+ return e.complexity.Misconfiguration.Namespace(childComplexity), true
+
+ case "Namespace.name":
+ if e.complexity.Namespace.Name == nil {
+ break
+ }
+
+ return e.complexity.Namespace.Name(childComplexity), true
+
+ case "NamespaceData.kubeScores":
+ if e.complexity.NamespaceData.KubeScores == nil {
+ break
+ }
+
+ return e.complexity.NamespaceData.KubeScores(childComplexity), true
+
+ case "NamespaceData.namespace":
+ if e.complexity.NamespaceData.Namespace == nil {
+ break
+ }
+
+ return e.complexity.NamespaceData.Namespace(childComplexity), true
+
+ case "NamespaceData.outdatedImages":
+ if e.complexity.NamespaceData.OutdatedImages == nil {
+ break
+ }
+
+ return e.complexity.NamespaceData.OutdatedImages(childComplexity), true
+
+ case "NamespaceData.resources":
+ if e.complexity.NamespaceData.Resources == nil {
+ break
+ }
+
+ return e.complexity.NamespaceData.Resources(childComplexity), true
+
+ case "OutdatedImage.clusterName":
+ if e.complexity.OutdatedImage.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.OutdatedImage.ClusterName(childComplexity), true
+
+ case "OutdatedImage.currentImage":
+ if e.complexity.OutdatedImage.CurrentImage == nil {
+ break
+ }
+
+ return e.complexity.OutdatedImage.CurrentImage(childComplexity), true
+
+ case "OutdatedImage.currentTag":
+ if e.complexity.OutdatedImage.CurrentTag == nil {
+ break
+ }
+
+ return e.complexity.OutdatedImage.CurrentTag(childComplexity), true
+
+ case "OutdatedImage.eventTime":
+ if e.complexity.OutdatedImage.EventTime == nil {
+ break
+ }
+
+ return e.complexity.OutdatedImage.EventTime(childComplexity), true
+
+ case "OutdatedImage.latestVersion":
+ if e.complexity.OutdatedImage.LatestVersion == nil {
+ break
+ }
+
+ return e.complexity.OutdatedImage.LatestVersion(childComplexity), true
+
+ case "OutdatedImage.namespace":
+ if e.complexity.OutdatedImage.Namespace == nil {
+ break
+ }
+
+ return e.complexity.OutdatedImage.Namespace(childComplexity), true
+
+ case "OutdatedImage.pod":
+ if e.complexity.OutdatedImage.Pod == nil {
+ break
+ }
+
+ return e.complexity.OutdatedImage.Pod(childComplexity), true
+
+ case "OutdatedImage.versionsBehind":
+ if e.complexity.OutdatedImage.VersionsBehind == nil {
+ break
+ }
+
+ return e.complexity.OutdatedImage.VersionsBehind(childComplexity), true
+
+ case "Query.allClusterDeletedAPIsCounts":
+ if e.complexity.Query.AllClusterDeletedAPIsCounts == nil {
+ break
+ }
+
+ return e.complexity.Query.AllClusterDeletedAPIsCounts(childComplexity), true
+
+ case "Query.allClusterDeprecatedAPIsCounts":
+ if e.complexity.Query.AllClusterDeprecatedAPIsCounts == nil {
+ break
+ }
+
+ return e.complexity.Query.AllClusterDeprecatedAPIsCounts(childComplexity), true
+
+ case "Query.allClusterNamespaceOutdatedCounts":
+ if e.complexity.Query.AllClusterNamespaceOutdatedCounts == nil {
+ break
+ }
+
+ return e.complexity.Query.AllClusterNamespaceOutdatedCounts(childComplexity), true
+
+ case "Query.allClusterNamespaceResourceCounts":
+ if e.complexity.Query.AllClusterNamespaceResourceCounts == nil {
+ break
+ }
+
+ return e.complexity.Query.AllClusterNamespaceResourceCounts(childComplexity), true
+
+ case "Query.allDeletedAPIs":
+ if e.complexity.Query.AllDeletedAPIs == nil {
+ break
+ }
+
+ return e.complexity.Query.AllDeletedAPIs(childComplexity), true
+
+ case "Query.allDeprecatedAPIs":
+ if e.complexity.Query.AllDeprecatedAPIs == nil {
+ break
+ }
+
+ return e.complexity.Query.AllDeprecatedAPIs(childComplexity), true
+
+ case "Query.allEvents":
+ if e.complexity.Query.AllEvents == nil {
+ break
+ }
+
+ return e.complexity.Query.AllEvents(childComplexity), true
+
+ case "Query.allGetAllResources":
+ if e.complexity.Query.AllGetAllResources == nil {
+ break
+ }
+
+ return e.complexity.Query.AllGetAllResources(childComplexity), true
+
+ case "Query.allKubeScores":
+ if e.complexity.Query.AllKubeScores == nil {
+ break
+ }
+
+ return e.complexity.Query.AllKubeScores(childComplexity), true
+
+ case "Query.allRakkess":
+ if e.complexity.Query.AllRakkess == nil {
+ break
+ }
+
+ return e.complexity.Query.AllRakkess(childComplexity), true
+
+ case "Query.allTrivyImages":
+ if e.complexity.Query.AllTrivyImages == nil {
+ break
+ }
+
+ return e.complexity.Query.AllTrivyImages(childComplexity), true
+
+ case "Query.allTrivyMisconfigs":
+ if e.complexity.Query.AllTrivyMisconfigs == nil {
+ break
+ }
+
+ return e.complexity.Query.AllTrivyMisconfigs(childComplexity), true
+
+ case "Query.allTrivySBOMs":
+ if e.complexity.Query.AllTrivySBOMs == nil {
+ break
+ }
+
+ return e.complexity.Query.AllTrivySBOMs(childComplexity), true
+
+ case "Query.allTrivyVuls":
+ if e.complexity.Query.AllTrivyVuls == nil {
+ break
+ }
+
+ return e.complexity.Query.AllTrivyVuls(childComplexity), true
+
+ case "Query.deletedAPICount":
+ if e.complexity.Query.DeletedAPICount == nil {
+ break
+ }
+
+ args, err := ec.field_Query_deletedAPICount_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.DeletedAPICount(childComplexity, args["clusterName"].(string)), true
+
+ case "Query.deletedAPIs":
+ if e.complexity.Query.DeletedAPIs == nil {
+ break
+ }
+
+ args, err := ec.field_Query_deletedAPIs_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.DeletedAPIs(childComplexity, args["clusterName"].(string)), true
+
+ case "Query.deprecatedAPICount":
+ if e.complexity.Query.DeprecatedAPICount == nil {
+ break
+ }
+
+ args, err := ec.field_Query_deprecatedAPICount_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.DeprecatedAPICount(childComplexity, args["clusterName"].(string)), true
+
+ case "Query.deprecatedAPIs":
+ if e.complexity.Query.DeprecatedAPIs == nil {
+ break
+ }
+
+ args, err := ec.field_Query_deprecatedAPIs_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.DeprecatedAPIs(childComplexity, args["clusterName"].(string)), true
+
+ case "Query.eventsByClusterAndNamespace":
+ if e.complexity.Query.EventsByClusterAndNamespace == nil {
+ break
+ }
+
+ args, err := ec.field_Query_eventsByClusterAndNamespace_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.EventsByClusterAndNamespace(childComplexity, args["clusterName"].(string), args["namespace"].(string)), true
+
+ case "Query.getAllResources":
+ if e.complexity.Query.GetAllResources == nil {
+ break
+ }
+
+ args, err := ec.field_Query_getAllResources_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.GetAllResources(childComplexity, args["clusterName"].(string), args["namespace"].(string)), true
+
+ case "Query.kubescores":
+ if e.complexity.Query.Kubescores == nil {
+ break
+ }
+
+ args, err := ec.field_Query_kubescores_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.Kubescores(childComplexity, args["clustername"].(string), args["namespace"].(string)), true
+
+ case "Query.misconfigurations":
+ if e.complexity.Query.Misconfigurations == nil {
+ break
+ }
+
+ args, err := ec.field_Query_misconfigurations_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.Misconfigurations(childComplexity, args["clusterName"].(string), args["namespace"].(string)), true
+
+ case "Query.outdatedImagesByClusterAndNamespace":
+ if e.complexity.Query.OutdatedImagesByClusterAndNamespace == nil {
+ break
+ }
+
+ args, err := ec.field_Query_outdatedImagesByClusterAndNamespace_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.OutdatedImagesByClusterAndNamespace(childComplexity, args["clusterName"].(string), args["namespace"].(string)), true
+
+ case "Query.outdatedImagesCount":
+ if e.complexity.Query.OutdatedImagesCount == nil {
+ break
+ }
+
+ args, err := ec.field_Query_outdatedImagesCount_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.OutdatedImagesCount(childComplexity, args["clusterName"].(string), args["namespace"].(string)), true
+
+ case "Query.trivyImageCount":
+ if e.complexity.Query.TrivyImageCount == nil {
+ break
+ }
+
+ args, err := ec.field_Query_trivyImageCount_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.TrivyImageCount(childComplexity, args["clusterName"].(string)), true
+
+ case "Query.trivyImages":
+ if e.complexity.Query.TrivyImages == nil {
+ break
+ }
+
+ args, err := ec.field_Query_trivyImages_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.TrivyImages(childComplexity, args["clusterName"].(string)), true
+
+ case "Query.trivyMisconfigCount":
+ if e.complexity.Query.TrivyMisconfigCount == nil {
+ break
+ }
+
+ args, err := ec.field_Query_trivyMisconfigCount_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.TrivyMisconfigCount(childComplexity, args["clusterName"].(string), args["namespace"].(string)), true
+
+ case "Query.trivySBOMs":
+ if e.complexity.Query.TrivySBOMs == nil {
+ break
+ }
+
+ args, err := ec.field_Query_trivySBOMs_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.TrivySBOMs(childComplexity, args["clusterName"].(string)), true
+
+ case "Query.trivyVulCount":
+ if e.complexity.Query.TrivyVulCount == nil {
+ break
+ }
+
+ args, err := ec.field_Query_trivyVulCount_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.TrivyVulCount(childComplexity, args["clusterName"].(string), args["namespace"].(string)), true
+
+ case "Query.uniqueClusters":
+ if e.complexity.Query.UniqueClusters == nil {
+ break
+ }
+
+ return e.complexity.Query.UniqueClusters(childComplexity), true
+
+ case "Query.uniqueNamespaces":
+ if e.complexity.Query.UniqueNamespaces == nil {
+ break
+ }
+
+ args, err := ec.field_Query_uniqueNamespaces_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.UniqueNamespaces(childComplexity, args["clusterName"].(string)), true
+
+ case "Query.vulnerabilities":
+ if e.complexity.Query.Vulnerabilities == nil {
+ break
+ }
+
+ args, err := ec.field_Query_vulnerabilities_args(context.TODO(), rawArgs)
+ if err != nil {
+ return 0, false
+ }
+
+ return e.complexity.Query.Vulnerabilities(childComplexity, args["clusterName"].(string), args["namespace"].(string)), true
+
+ case "Rakkess.ClusterName":
+ if e.complexity.Rakkess.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.Rakkess.ClusterName(childComplexity), true
+
+ case "Rakkess.Create":
+ if e.complexity.Rakkess.Create == nil {
+ break
+ }
+
+ return e.complexity.Rakkess.Create(childComplexity), true
+
+ case "Rakkess.Delete":
+ if e.complexity.Rakkess.Delete == nil {
+ break
+ }
+
+ return e.complexity.Rakkess.Delete(childComplexity), true
+
+ case "Rakkess.EventTime":
+ if e.complexity.Rakkess.EventTime == nil {
+ break
+ }
+
+ return e.complexity.Rakkess.EventTime(childComplexity), true
+
+ case "Rakkess.ExpiryDate":
+ if e.complexity.Rakkess.ExpiryDate == nil {
+ break
+ }
+
+ return e.complexity.Rakkess.ExpiryDate(childComplexity), true
+
+ case "Rakkess.List":
+ if e.complexity.Rakkess.List == nil {
+ break
+ }
+
+ return e.complexity.Rakkess.List(childComplexity), true
+
+ case "Rakkess.Name":
+ if e.complexity.Rakkess.Name == nil {
+ break
+ }
+
+ return e.complexity.Rakkess.Name(childComplexity), true
+
+ case "Rakkess.Update":
+ if e.complexity.Rakkess.Update == nil {
+ break
+ }
+
+ return e.complexity.Rakkess.Update(childComplexity), true
+
+ case "Resource.age":
+ if e.complexity.Resource.Age == nil {
+ break
+ }
+
+ return e.complexity.Resource.Age(childComplexity), true
+
+ case "Resource.clusterName":
+ if e.complexity.Resource.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.Resource.ClusterName(childComplexity), true
+
+ case "Resource.eventTime":
+ if e.complexity.Resource.EventTime == nil {
+ break
+ }
+
+ return e.complexity.Resource.EventTime(childComplexity), true
+
+ case "Resource.kind":
+ if e.complexity.Resource.Kind == nil {
+ break
+ }
+
+ return e.complexity.Resource.Kind(childComplexity), true
+
+ case "Resource.namespace":
+ if e.complexity.Resource.Namespace == nil {
+ break
+ }
+
+ return e.complexity.Resource.Namespace(childComplexity), true
+
+ case "Resource.resource":
+ if e.complexity.Resource.Resource == nil {
+ break
+ }
+
+ return e.complexity.Resource.Resource(childComplexity), true
+
+ case "TrivyImage.artifactName":
+ if e.complexity.TrivyImage.ArtifactName == nil {
+ break
+ }
+
+ return e.complexity.TrivyImage.ArtifactName(childComplexity), true
+
+ case "TrivyImage.clusterName":
+ if e.complexity.TrivyImage.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.TrivyImage.ClusterName(childComplexity), true
+
+ case "TrivyImage.expiryDate":
+ if e.complexity.TrivyImage.ExpiryDate == nil {
+ break
+ }
+
+ return e.complexity.TrivyImage.ExpiryDate(childComplexity), true
+
+ case "TrivyImage.id":
+ if e.complexity.TrivyImage.ID == nil {
+ break
+ }
+
+ return e.complexity.TrivyImage.ID(childComplexity), true
+
+ case "TrivyImage.vulFixedVersion":
+ if e.complexity.TrivyImage.VulFixedVersion == nil {
+ break
+ }
+
+ return e.complexity.TrivyImage.VulFixedVersion(childComplexity), true
+
+ case "TrivyImage.vulId":
+ if e.complexity.TrivyImage.VulID == nil {
+ break
+ }
+
+ return e.complexity.TrivyImage.VulID(childComplexity), true
+
+ case "TrivyImage.vulInstalledVersion":
+ if e.complexity.TrivyImage.VulInstalledVersion == nil {
+ break
+ }
+
+ return e.complexity.TrivyImage.VulInstalledVersion(childComplexity), true
+
+ case "TrivyImage.vulLastModifiedDate":
+ if e.complexity.TrivyImage.VulLastModifiedDate == nil {
+ break
+ }
+
+ return e.complexity.TrivyImage.VulLastModifiedDate(childComplexity), true
+
+ case "TrivyImage.vulPkgId":
+ if e.complexity.TrivyImage.VulPkgID == nil {
+ break
+ }
+
+ return e.complexity.TrivyImage.VulPkgID(childComplexity), true
+
+ case "TrivyImage.vulPkgName":
+ if e.complexity.TrivyImage.VulPkgName == nil {
+ break
+ }
+
+ return e.complexity.TrivyImage.VulPkgName(childComplexity), true
+
+ case "TrivyImage.vulPublishedDate":
+ if e.complexity.TrivyImage.VulPublishedDate == nil {
+ break
+ }
+
+ return e.complexity.TrivyImage.VulPublishedDate(childComplexity), true
+
+ case "TrivyImage.vulSeverity":
+ if e.complexity.TrivyImage.VulSeverity == nil {
+ break
+ }
+
+ return e.complexity.TrivyImage.VulSeverity(childComplexity), true
+
+ case "TrivyImage.vulTitle":
+ if e.complexity.TrivyImage.VulTitle == nil {
+ break
+ }
+
+ return e.complexity.TrivyImage.VulTitle(childComplexity), true
+
+ case "TrivyImageCount.clusterName":
+ if e.complexity.TrivyImageCount.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.TrivyImageCount.ClusterName(childComplexity), true
+
+ case "TrivyImageCount.ImageCount":
+ if e.complexity.TrivyImageCount.ImageCount == nil {
+ break
+ }
+
+ return e.complexity.TrivyImageCount.ImageCount(childComplexity), true
+
+ case "TrivyMisconfig.clusterName":
+ if e.complexity.TrivyMisconfig.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.ClusterName(childComplexity), true
+
+ case "TrivyMisconfig.eventTime":
+ if e.complexity.TrivyMisconfig.EventTime == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.EventTime(childComplexity), true
+
+ case "TrivyMisconfig.expiryDate":
+ if e.complexity.TrivyMisconfig.ExpiryDate == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.ExpiryDate(childComplexity), true
+
+ case "TrivyMisconfig.id":
+ if e.complexity.TrivyMisconfig.ID == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.ID(childComplexity), true
+
+ case "TrivyMisconfig.kind":
+ if e.complexity.TrivyMisconfig.Kind == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.Kind(childComplexity), true
+
+ case "TrivyMisconfig.misconfigAvdid":
+ if e.complexity.TrivyMisconfig.MisconfigAvdid == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.MisconfigAvdid(childComplexity), true
+
+ case "TrivyMisconfig.misconfigDesc":
+ if e.complexity.TrivyMisconfig.MisconfigDesc == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.MisconfigDesc(childComplexity), true
+
+ case "TrivyMisconfig.misconfigId":
+ if e.complexity.TrivyMisconfig.MisconfigID == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.MisconfigID(childComplexity), true
+
+ case "TrivyMisconfig.misconfigMsg":
+ if e.complexity.TrivyMisconfig.MisconfigMsg == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.MisconfigMsg(childComplexity), true
+
+ case "TrivyMisconfig.misconfigQuery":
+ if e.complexity.TrivyMisconfig.MisconfigQuery == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.MisconfigQuery(childComplexity), true
+
+ case "TrivyMisconfig.misconfigResolution":
+ if e.complexity.TrivyMisconfig.MisconfigResolution == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.MisconfigResolution(childComplexity), true
+
+ case "TrivyMisconfig.misconfigSeverity":
+ if e.complexity.TrivyMisconfig.MisconfigSeverity == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.MisconfigSeverity(childComplexity), true
+
+ case "TrivyMisconfig.misconfigStatus":
+ if e.complexity.TrivyMisconfig.MisconfigStatus == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.MisconfigStatus(childComplexity), true
+
+ case "TrivyMisconfig.misconfigTitle":
+ if e.complexity.TrivyMisconfig.MisconfigTitle == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.MisconfigTitle(childComplexity), true
+
+ case "TrivyMisconfig.misconfigType":
+ if e.complexity.TrivyMisconfig.MisconfigType == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.MisconfigType(childComplexity), true
+
+ case "TrivyMisconfig.name":
+ if e.complexity.TrivyMisconfig.Name == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.Name(childComplexity), true
+
+ case "TrivyMisconfig.namespace":
+ if e.complexity.TrivyMisconfig.Namespace == nil {
+ break
+ }
+
+ return e.complexity.TrivyMisconfig.Namespace(childComplexity), true
+
+ case "TrivySBOM.bomFormat":
+ if e.complexity.TrivySBOM.BomFormat == nil {
+ break
+ }
+
+ return e.complexity.TrivySBOM.BomFormat(childComplexity), true
+
+ case "TrivySBOM.bomRef":
+ if e.complexity.TrivySBOM.BomRef == nil {
+ break
+ }
+
+ return e.complexity.TrivySBOM.BomRef(childComplexity), true
+
+ case "TrivySBOM.clusterName":
+ if e.complexity.TrivySBOM.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.TrivySBOM.ClusterName(childComplexity), true
+
+ case "TrivySBOM.expiryDate":
+ if e.complexity.TrivySBOM.ExpiryDate == nil {
+ break
+ }
+
+ return e.complexity.TrivySBOM.ExpiryDate(childComplexity), true
+
+ case "TrivySBOM.id":
+ if e.complexity.TrivySBOM.ID == nil {
+ break
+ }
+
+ return e.complexity.TrivySBOM.ID(childComplexity), true
+
+ case "TrivySBOM.imageName":
+ if e.complexity.TrivySBOM.ImageName == nil {
+ break
+ }
+
+ return e.complexity.TrivySBOM.ImageName(childComplexity), true
+
+ case "TrivySBOM.packageName":
+ if e.complexity.TrivySBOM.PackageName == nil {
+ break
+ }
+
+ return e.complexity.TrivySBOM.PackageName(childComplexity), true
+
+ case "TrivySBOM.packageUrl":
+ if e.complexity.TrivySBOM.PackageURL == nil {
+ break
+ }
+
+ return e.complexity.TrivySBOM.PackageURL(childComplexity), true
+
+ case "TrivySBOM.serialNumber":
+ if e.complexity.TrivySBOM.SerialNumber == nil {
+ break
+ }
+
+ return e.complexity.TrivySBOM.SerialNumber(childComplexity), true
+
+ case "TrivySBOM.version":
+ if e.complexity.TrivySBOM.Version == nil {
+ break
+ }
+
+ return e.complexity.TrivySBOM.Version(childComplexity), true
+
+ case "TrivyVul.clusterName":
+ if e.complexity.TrivyVul.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.ClusterName(childComplexity), true
+
+ case "TrivyVul.expiryDate":
+ if e.complexity.TrivyVul.ExpiryDate == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.ExpiryDate(childComplexity), true
+
+ case "TrivyVul.id":
+ if e.complexity.TrivyVul.ID == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.ID(childComplexity), true
+
+ case "TrivyVul.kind":
+ if e.complexity.TrivyVul.Kind == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.Kind(childComplexity), true
+
+ case "TrivyVul.name":
+ if e.complexity.TrivyVul.Name == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.Name(childComplexity), true
+
+ case "TrivyVul.namespace":
+ if e.complexity.TrivyVul.Namespace == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.Namespace(childComplexity), true
+
+ case "TrivyVul.vulFixedVersion":
+ if e.complexity.TrivyVul.VulFixedVersion == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.VulFixedVersion(childComplexity), true
+
+ case "TrivyVul.vulId":
+ if e.complexity.TrivyVul.VulID == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.VulID(childComplexity), true
+
+ case "TrivyVul.vulInstalledVersion":
+ if e.complexity.TrivyVul.VulInstalledVersion == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.VulInstalledVersion(childComplexity), true
+
+ case "TrivyVul.vulLastModifiedDate":
+ if e.complexity.TrivyVul.VulLastModifiedDate == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.VulLastModifiedDate(childComplexity), true
+
+ case "TrivyVul.vulPkgId":
+ if e.complexity.TrivyVul.VulPkgID == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.VulPkgID(childComplexity), true
+
+ case "TrivyVul.vulPkgName":
+ if e.complexity.TrivyVul.VulPkgName == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.VulPkgName(childComplexity), true
+
+ case "TrivyVul.vulPkgPath":
+ if e.complexity.TrivyVul.VulPkgPath == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.VulPkgPath(childComplexity), true
+
+ case "TrivyVul.vulPublishedDate":
+ if e.complexity.TrivyVul.VulPublishedDate == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.VulPublishedDate(childComplexity), true
+
+ case "TrivyVul.vulSeverity":
+ if e.complexity.TrivyVul.VulSeverity == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.VulSeverity(childComplexity), true
+
+ case "TrivyVul.vulTitle":
+ if e.complexity.TrivyVul.VulTitle == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.VulTitle(childComplexity), true
+
+ case "TrivyVul.vulVendorIds":
+ if e.complexity.TrivyVul.VulVendorIds == nil {
+ break
+ }
+
+ return e.complexity.TrivyVul.VulVendorIds(childComplexity), true
+
+ case "Vulnerability.clusterName":
+ if e.complexity.Vulnerability.ClusterName == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.ClusterName(childComplexity), true
+
+ case "Vulnerability.expiryDate":
+ if e.complexity.Vulnerability.ExpiryDate == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.ExpiryDate(childComplexity), true
+
+ case "Vulnerability.exportedAt":
+ if e.complexity.Vulnerability.ExportedAt == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.ExportedAt(childComplexity), true
+
+ case "Vulnerability.id":
+ if e.complexity.Vulnerability.ID == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.ID(childComplexity), true
+
+ case "Vulnerability.kind":
+ if e.complexity.Vulnerability.Kind == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.Kind(childComplexity), true
+
+ case "Vulnerability.name":
+ if e.complexity.Vulnerability.Name == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.Name(childComplexity), true
+
+ case "Vulnerability.namespace":
+ if e.complexity.Vulnerability.Namespace == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.Namespace(childComplexity), true
+
+ case "Vulnerability.vulFixedVersion":
+ if e.complexity.Vulnerability.VulFixedVersion == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.VulFixedVersion(childComplexity), true
+
+ case "Vulnerability.vulId":
+ if e.complexity.Vulnerability.VulID == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.VulID(childComplexity), true
+
+ case "Vulnerability.vulInstalledVersion":
+ if e.complexity.Vulnerability.VulInstalledVersion == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.VulInstalledVersion(childComplexity), true
+
+ case "Vulnerability.vulLastModifiedDate":
+ if e.complexity.Vulnerability.VulLastModifiedDate == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.VulLastModifiedDate(childComplexity), true
+
+ case "Vulnerability.vulPkgId":
+ if e.complexity.Vulnerability.VulPkgID == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.VulPkgID(childComplexity), true
+
+ case "Vulnerability.vulPkgName":
+ if e.complexity.Vulnerability.VulPkgName == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.VulPkgName(childComplexity), true
+
+ case "Vulnerability.vulPkgPath":
+ if e.complexity.Vulnerability.VulPkgPath == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.VulPkgPath(childComplexity), true
+
+ case "Vulnerability.vulPublishedDate":
+ if e.complexity.Vulnerability.VulPublishedDate == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.VulPublishedDate(childComplexity), true
+
+ case "Vulnerability.vulSeverity":
+ if e.complexity.Vulnerability.VulSeverity == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.VulSeverity(childComplexity), true
+
+ case "Vulnerability.vulTitle":
+ if e.complexity.Vulnerability.VulTitle == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.VulTitle(childComplexity), true
+
+ case "Vulnerability.vulVendorIds":
+ if e.complexity.Vulnerability.VulVendorIds == nil {
+ break
+ }
+
+ return e.complexity.Vulnerability.VulVendorIds(childComplexity), true
+
+ }
+ return 0, false
+}
+
+func (e *executableSchema) Exec(ctx context.Context) graphql.ResponseHandler {
+ rc := graphql.GetOperationContext(ctx)
+ ec := executionContext{rc, e, 0, 0, make(chan graphql.DeferredResult)}
+ inputUnmarshalMap := graphql.BuildUnmarshalerMap()
+ first := true
+
+ switch rc.Operation.Operation {
+ case ast.Query:
+ return func(ctx context.Context) *graphql.Response {
+ var response graphql.Response
+ var data graphql.Marshaler
+ if first {
+ first = false
+ ctx = graphql.WithUnmarshalerMap(ctx, inputUnmarshalMap)
+ data = ec._Query(ctx, rc.Operation.SelectionSet)
+ } else {
+ if atomic.LoadInt32(&ec.pendingDeferred) > 0 {
+ result := <-ec.deferredResults
+ atomic.AddInt32(&ec.pendingDeferred, -1)
+ data = result.Result
+ response.Path = result.Path
+ response.Label = result.Label
+ response.Errors = result.Errors
+ } else {
+ return nil
+ }
+ }
+ var buf bytes.Buffer
+ data.MarshalGQL(&buf)
+ response.Data = buf.Bytes()
+ if atomic.LoadInt32(&ec.deferred) > 0 {
+ hasNext := atomic.LoadInt32(&ec.pendingDeferred) > 0
+ response.HasNext = &hasNext
+ }
+
+ return &response
+ }
+
+ default:
+ return graphql.OneShot(graphql.ErrorResponse(ctx, "unsupported GraphQL operation"))
+ }
+}
+
+type executionContext struct {
+ *graphql.OperationContext
+ *executableSchema
+ deferred int32
+ pendingDeferred int32
+ deferredResults chan graphql.DeferredResult
+}
+
+func (ec *executionContext) processDeferredGroup(dg graphql.DeferredGroup) {
+ atomic.AddInt32(&ec.pendingDeferred, 1)
+ go func() {
+ ctx := graphql.WithFreshResponseContext(dg.Context)
+ dg.FieldSet.Dispatch(ctx)
+ ds := graphql.DeferredResult{
+ Path: dg.Path,
+ Label: dg.Label,
+ Result: dg.FieldSet,
+ Errors: graphql.GetErrors(ctx),
+ }
+ // null fields should bubble up
+ if dg.FieldSet.Invalids > 0 {
+ ds.Result = graphql.Null
+ }
+ ec.deferredResults <- ds
+ }()
+}
+
+func (ec *executionContext) introspectSchema() (*introspection.Schema, error) {
+ if ec.DisableIntrospection {
+ return nil, errors.New("introspection disabled")
+ }
+ return introspection.WrapSchema(ec.Schema()), nil
+}
+
+func (ec *executionContext) introspectType(name string) (*introspection.Type, error) {
+ if ec.DisableIntrospection {
+ return nil, errors.New("introspection disabled")
+ }
+ return introspection.WrapTypeFromDef(ec.Schema(), ec.Schema().Types[name]), nil
+}
+
+//go:embed "schema.graphqls"
+var sourcesFS embed.FS
+
+func sourceData(filename string) string {
+ data, err := sourcesFS.ReadFile(filename)
+ if err != nil {
+ panic(fmt.Sprintf("codegen problem: %s not available", filename))
+ }
+ return string(data)
+}
+
+var sources = []*ast.Source{
+ {Name: "schema.graphqls", Input: sourceData("schema.graphqls"), BuiltIn: false},
+}
+var parsedSchema = gqlparser.MustLoadSchema(sources...)
+
+// endregion ************************** generated!.gotpl **************************
+
+// region ***************************** args.gotpl *****************************
+
+func (ec *executionContext) field_Query___type_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["name"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("name"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["name"] = arg0
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_deletedAPICount_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_deletedAPIs_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_deprecatedAPICount_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_deprecatedAPIs_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_eventsByClusterAndNamespace_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ var arg1 string
+ if tmp, ok := rawArgs["namespace"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("namespace"))
+ arg1, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["namespace"] = arg1
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_getAllResources_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ var arg1 string
+ if tmp, ok := rawArgs["namespace"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("namespace"))
+ arg1, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["namespace"] = arg1
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_kubescores_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clustername"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clustername"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clustername"] = arg0
+ var arg1 string
+ if tmp, ok := rawArgs["namespace"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("namespace"))
+ arg1, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["namespace"] = arg1
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_misconfigurations_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ var arg1 string
+ if tmp, ok := rawArgs["namespace"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("namespace"))
+ arg1, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["namespace"] = arg1
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_outdatedImagesByClusterAndNamespace_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ var arg1 string
+ if tmp, ok := rawArgs["namespace"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("namespace"))
+ arg1, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["namespace"] = arg1
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_outdatedImagesCount_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ var arg1 string
+ if tmp, ok := rawArgs["namespace"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("namespace"))
+ arg1, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["namespace"] = arg1
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_trivyImageCount_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_trivyImages_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_trivyMisconfigCount_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ var arg1 string
+ if tmp, ok := rawArgs["namespace"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("namespace"))
+ arg1, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["namespace"] = arg1
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_trivySBOMs_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_trivyVulCount_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ var arg1 string
+ if tmp, ok := rawArgs["namespace"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("namespace"))
+ arg1, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["namespace"] = arg1
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_uniqueNamespaces_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ return args, nil
+}
+
+func (ec *executionContext) field_Query_vulnerabilities_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 string
+ if tmp, ok := rawArgs["clusterName"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("clusterName"))
+ arg0, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["clusterName"] = arg0
+ var arg1 string
+ if tmp, ok := rawArgs["namespace"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("namespace"))
+ arg1, err = ec.unmarshalNString2string(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["namespace"] = arg1
+ return args, nil
+}
+
+func (ec *executionContext) field___Type_enumValues_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 bool
+ if tmp, ok := rawArgs["includeDeprecated"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated"))
+ arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["includeDeprecated"] = arg0
+ return args, nil
+}
+
+func (ec *executionContext) field___Type_fields_args(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
+ var err error
+ args := map[string]interface{}{}
+ var arg0 bool
+ if tmp, ok := rawArgs["includeDeprecated"]; ok {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField("includeDeprecated"))
+ arg0, err = ec.unmarshalOBoolean2bool(ctx, tmp)
+ if err != nil {
+ return nil, err
+ }
+ }
+ args["includeDeprecated"] = arg0
+ return args, nil
+}
+
+// endregion ***************************** args.gotpl *****************************
+
+// region ************************** directives.gotpl **************************
+
+// endregion ************************** directives.gotpl **************************
+
+// region **************************** field.gotpl *****************************
+
+func (ec *executionContext) _Cluster_name(ctx context.Context, field graphql.CollectedField, obj *model.Cluster) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Cluster_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Cluster_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Cluster",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterAPIsCount_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.ClusterAPIsCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterAPIsCount_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterAPIsCount_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterAPIsCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterAPIsCount_count(ctx context.Context, field graphql.CollectedField, obj *model.ClusterAPIsCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterAPIsCount_count(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Count, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterAPIsCount_count(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterAPIsCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterDeletedAPICount_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.ClusterDeletedAPICount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterDeletedAPICount_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterDeletedAPICount_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterDeletedAPICount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterDeletedAPICount_deletedAPICount(ctx context.Context, field graphql.CollectedField, obj *model.ClusterDeletedAPICount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterDeletedAPICount_deletedAPICount(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.DeletedAPICount, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterDeletedAPICount_deletedAPICount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterDeletedAPICount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterDeprecatedAPICount_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.ClusterDeprecatedAPICount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterDeprecatedAPICount_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterDeprecatedAPICount_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterDeprecatedAPICount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterDeprecatedAPICount_deprecatedAPICount(ctx context.Context, field graphql.CollectedField, obj *model.ClusterDeprecatedAPICount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterDeprecatedAPICount_deprecatedAPICount(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.DeprecatedAPICount, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterDeprecatedAPICount_deprecatedAPICount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterDeprecatedAPICount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterNamespaceMisconfigCount_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.ClusterNamespaceMisconfigCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterNamespaceMisconfigCount_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterNamespaceMisconfigCount_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterNamespaceMisconfigCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterNamespaceMisconfigCount_namespace(ctx context.Context, field graphql.CollectedField, obj *model.ClusterNamespaceMisconfigCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterNamespaceMisconfigCount_namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterNamespaceMisconfigCount_namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterNamespaceMisconfigCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterNamespaceMisconfigCount_misconfigCount(ctx context.Context, field graphql.CollectedField, obj *model.ClusterNamespaceMisconfigCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterNamespaceMisconfigCount_misconfigCount(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigCount, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterNamespaceMisconfigCount_misconfigCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterNamespaceMisconfigCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterNamespaceOutdatedCount_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.ClusterNamespaceOutdatedCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterNamespaceOutdatedCount_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterNamespaceOutdatedCount_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterNamespaceOutdatedCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterNamespaceOutdatedCount_namespace(ctx context.Context, field graphql.CollectedField, obj *model.ClusterNamespaceOutdatedCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterNamespaceOutdatedCount_namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterNamespaceOutdatedCount_namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterNamespaceOutdatedCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterNamespaceOutdatedCount_outdatedCount(ctx context.Context, field graphql.CollectedField, obj *model.ClusterNamespaceOutdatedCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterNamespaceOutdatedCount_outdatedCount(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.OutdatedCount, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterNamespaceOutdatedCount_outdatedCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterNamespaceOutdatedCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterNamespaceResourceCount_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.ClusterNamespaceResourceCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterNamespaceResourceCount_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterNamespaceResourceCount_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterNamespaceResourceCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterNamespaceResourceCount_namespace(ctx context.Context, field graphql.CollectedField, obj *model.ClusterNamespaceResourceCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterNamespaceResourceCount_namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterNamespaceResourceCount_namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterNamespaceResourceCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterNamespaceResourceCount_resourceCount(ctx context.Context, field graphql.CollectedField, obj *model.ClusterNamespaceResourceCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterNamespaceResourceCount_resourceCount(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ResourceCount, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterNamespaceResourceCount_resourceCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterNamespaceResourceCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterNamespaceVulCount_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.ClusterNamespaceVulCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterNamespaceVulCount_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterNamespaceVulCount_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterNamespaceVulCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterNamespaceVulCount_namespace(ctx context.Context, field graphql.CollectedField, obj *model.ClusterNamespaceVulCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterNamespaceVulCount_namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterNamespaceVulCount_namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterNamespaceVulCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _ClusterNamespaceVulCount_vulCount(ctx context.Context, field graphql.CollectedField, obj *model.ClusterNamespaceVulCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_ClusterNamespaceVulCount_vulCount(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulCount, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_ClusterNamespaceVulCount_vulCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "ClusterNamespaceVulCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeletedAPI_ClusterName(ctx context.Context, field graphql.CollectedField, obj *model.DeletedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeletedAPI_ClusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeletedAPI_ClusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeletedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeletedAPI_ObjectName(ctx context.Context, field graphql.CollectedField, obj *model.DeletedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeletedAPI_ObjectName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ObjectName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeletedAPI_ObjectName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeletedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeletedAPI_Group(ctx context.Context, field graphql.CollectedField, obj *model.DeletedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeletedAPI_Group(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Group, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeletedAPI_Group(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeletedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeletedAPI_Kind(ctx context.Context, field graphql.CollectedField, obj *model.DeletedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeletedAPI_Kind(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Kind, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeletedAPI_Kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeletedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeletedAPI_Version(ctx context.Context, field graphql.CollectedField, obj *model.DeletedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeletedAPI_Version(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Version, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeletedAPI_Version(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeletedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeletedAPI_Name(ctx context.Context, field graphql.CollectedField, obj *model.DeletedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeletedAPI_Name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeletedAPI_Name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeletedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeletedAPI_Deleted(ctx context.Context, field graphql.CollectedField, obj *model.DeletedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeletedAPI_Deleted(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Deleted, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*bool)
+ fc.Result = res
+ return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeletedAPI_Deleted(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeletedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Boolean does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeletedAPI_Scope(ctx context.Context, field graphql.CollectedField, obj *model.DeletedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeletedAPI_Scope(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Scope, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeletedAPI_Scope(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeletedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeletedAPI_EventTime(ctx context.Context, field graphql.CollectedField, obj *model.DeletedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeletedAPI_EventTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.EventTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeletedAPI_EventTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeletedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeletedAPI_ExpiryDate(ctx context.Context, field graphql.CollectedField, obj *model.DeletedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeletedAPI_ExpiryDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ExpiryDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeletedAPI_ExpiryDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeletedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeprecatedAPI_ClusterName(ctx context.Context, field graphql.CollectedField, obj *model.DeprecatedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeprecatedAPI_ClusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeprecatedAPI_ClusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeprecatedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeprecatedAPI_ObjectName(ctx context.Context, field graphql.CollectedField, obj *model.DeprecatedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeprecatedAPI_ObjectName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ObjectName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeprecatedAPI_ObjectName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeprecatedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeprecatedAPI_Description(ctx context.Context, field graphql.CollectedField, obj *model.DeprecatedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeprecatedAPI_Description(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Description, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeprecatedAPI_Description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeprecatedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeprecatedAPI_Kind(ctx context.Context, field graphql.CollectedField, obj *model.DeprecatedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeprecatedAPI_Kind(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Kind, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeprecatedAPI_Kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeprecatedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeprecatedAPI_Deprecated(ctx context.Context, field graphql.CollectedField, obj *model.DeprecatedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeprecatedAPI_Deprecated(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Deprecated, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*bool)
+ fc.Result = res
+ return ec.marshalOBoolean2ᚖbool(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeprecatedAPI_Deprecated(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeprecatedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Boolean does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeprecatedAPI_Scope(ctx context.Context, field graphql.CollectedField, obj *model.DeprecatedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeprecatedAPI_Scope(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Scope, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeprecatedAPI_Scope(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeprecatedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeprecatedAPI_EventTime(ctx context.Context, field graphql.CollectedField, obj *model.DeprecatedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeprecatedAPI_EventTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.EventTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeprecatedAPI_EventTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeprecatedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _DeprecatedAPI_ExpiryDate(ctx context.Context, field graphql.CollectedField, obj *model.DeprecatedAPI) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_DeprecatedAPI_ExpiryDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ExpiryDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_DeprecatedAPI_ExpiryDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "DeprecatedAPI",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_ClusterName(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_ClusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_ClusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_Id(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_Id(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_Id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_EventTime(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_EventTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.EventTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_EventTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_OpType(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_OpType(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.OpType, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_OpType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_Name(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_Name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_Name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_Namespace(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_Namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_Namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_Kind(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_Kind(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Kind, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_Kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_Message(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_Message(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Message, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_Message(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_Reason(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_Reason(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Reason, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_Reason(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_Host(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_Host(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Host, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_Host(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_Event(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_Event(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Event, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_Event(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_ImageName(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_ImageName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ImageName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_ImageName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_FirstTime(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_FirstTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.FirstTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_FirstTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_LastTime(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_LastTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.LastTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_LastTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Event_ExpiryDate(ctx context.Context, field graphql.CollectedField, obj *model.Event) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Event_ExpiryDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ExpiryDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Event_ExpiryDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Event",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _GetAllResource_ClusterName(ctx context.Context, field graphql.CollectedField, obj *model.GetAllResource) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_GetAllResource_ClusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_GetAllResource_ClusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "GetAllResource",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _GetAllResource_Namespace(ctx context.Context, field graphql.CollectedField, obj *model.GetAllResource) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_GetAllResource_Namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_GetAllResource_Namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "GetAllResource",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _GetAllResource_Kind(ctx context.Context, field graphql.CollectedField, obj *model.GetAllResource) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_GetAllResource_Kind(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Kind, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_GetAllResource_Kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "GetAllResource",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _GetAllResource_Resource(ctx context.Context, field graphql.CollectedField, obj *model.GetAllResource) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_GetAllResource_Resource(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Resource, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_GetAllResource_Resource(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "GetAllResource",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _GetAllResource_Age(ctx context.Context, field graphql.CollectedField, obj *model.GetAllResource) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_GetAllResource_Age(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Age, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_GetAllResource_Age(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "GetAllResource",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _GetAllResource_EventTime(ctx context.Context, field graphql.CollectedField, obj *model.GetAllResource) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_GetAllResource_EventTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.EventTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_GetAllResource_EventTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "GetAllResource",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _GetAllResource_ExpiryDate(ctx context.Context, field graphql.CollectedField, obj *model.GetAllResource) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_GetAllResource_ExpiryDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ExpiryDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_GetAllResource_ExpiryDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "GetAllResource",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_id(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_id(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNID2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type ID does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_objectName(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_objectName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ObjectName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_objectName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_kind(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_kind(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Kind, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_apiVersion(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_apiVersion(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.APIVersion, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_apiVersion(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_name(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_namespace(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_targetType(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_targetType(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.TargetType, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_targetType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_description(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_description(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Description, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_path(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_path(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Path, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_path(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_summary(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_summary(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Summary, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_summary(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_fileName(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_fileName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.FileName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_fileName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_fileRow(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_fileRow(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.FileRow, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_fileRow(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _KubeScore_eventTime(ctx context.Context, field graphql.CollectedField, obj *model.KubeScore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_KubeScore_eventTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.EventTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_KubeScore_eventTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "KubeScore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_id(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_id(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNID2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type ID does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_objectName(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_objectName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ObjectName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_objectName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_kind(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_kind(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Kind, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_apiVersion(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_apiVersion(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.APIVersion, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_apiVersion(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_name(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_namespace(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_targetType(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_targetType(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.TargetType, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_targetType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_description(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_description(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Description, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_path(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_path(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Path, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_path(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_summary(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_summary(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Summary, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_summary(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_fileName(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_fileName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.FileName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_fileName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_fileRow(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_fileRow(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.FileRow, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*int)
+ fc.Result = res
+ return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_fileRow(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Kubescore_eventTime(ctx context.Context, field graphql.CollectedField, obj *model.Kubescore) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Kubescore_eventTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.EventTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Kubescore_eventTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Kubescore",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_id(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_id(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNID2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type ID does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_namespace(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_kind(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_kind(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Kind, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_name(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_misconfigId(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_misconfigId(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_misconfigId(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_misconfigAvdid(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_misconfigAvdid(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigAvdid, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_misconfigAvdid(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_misconfigType(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_misconfigType(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigType, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_misconfigType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_misconfigTitle(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_misconfigTitle(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigTitle, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_misconfigTitle(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_misconfigDesc(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_misconfigDesc(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigDesc, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_misconfigDesc(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_misconfigMsg(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_misconfigMsg(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigMsg, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_misconfigMsg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_misconfigQuery(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_misconfigQuery(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigQuery, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_misconfigQuery(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_misconfigResolution(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_misconfigResolution(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigResolution, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_misconfigResolution(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_misconfigSeverity(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_misconfigSeverity(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigSeverity, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_misconfigSeverity(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_misconfigStatus(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_misconfigStatus(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigStatus, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_misconfigStatus(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_eventTime(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_eventTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.EventTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_eventTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_expiryDate(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_expiryDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ExpiryDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_expiryDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Misconfiguration_exportedAt(ctx context.Context, field graphql.CollectedField, obj *model.Misconfiguration) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Misconfiguration_exportedAt(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ExportedAt, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Misconfiguration_exportedAt(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Misconfiguration",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Namespace_name(ctx context.Context, field graphql.CollectedField, obj *model.Namespace) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Namespace_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Namespace_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Namespace",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _NamespaceData_namespace(ctx context.Context, field graphql.CollectedField, obj *model.NamespaceData) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NamespaceData_namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_NamespaceData_namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "NamespaceData",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _NamespaceData_outdatedImages(ctx context.Context, field graphql.CollectedField, obj *model.NamespaceData) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NamespaceData_outdatedImages(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.OutdatedImages, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.OutdatedImage)
+ fc.Result = res
+ return ec.marshalNOutdatedImage2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐOutdatedImageᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_NamespaceData_outdatedImages(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "NamespaceData",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "clusterName":
+ return ec.fieldContext_OutdatedImage_clusterName(ctx, field)
+ case "namespace":
+ return ec.fieldContext_OutdatedImage_namespace(ctx, field)
+ case "pod":
+ return ec.fieldContext_OutdatedImage_pod(ctx, field)
+ case "currentImage":
+ return ec.fieldContext_OutdatedImage_currentImage(ctx, field)
+ case "currentTag":
+ return ec.fieldContext_OutdatedImage_currentTag(ctx, field)
+ case "latestVersion":
+ return ec.fieldContext_OutdatedImage_latestVersion(ctx, field)
+ case "versionsBehind":
+ return ec.fieldContext_OutdatedImage_versionsBehind(ctx, field)
+ case "eventTime":
+ return ec.fieldContext_OutdatedImage_eventTime(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type OutdatedImage", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _NamespaceData_kubeScores(ctx context.Context, field graphql.CollectedField, obj *model.NamespaceData) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NamespaceData_kubeScores(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.KubeScores, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.KubeScore)
+ fc.Result = res
+ return ec.marshalNKubeScore2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐKubeScoreᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_NamespaceData_kubeScores(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "NamespaceData",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_KubeScore_id(ctx, field)
+ case "clusterName":
+ return ec.fieldContext_KubeScore_clusterName(ctx, field)
+ case "objectName":
+ return ec.fieldContext_KubeScore_objectName(ctx, field)
+ case "kind":
+ return ec.fieldContext_KubeScore_kind(ctx, field)
+ case "apiVersion":
+ return ec.fieldContext_KubeScore_apiVersion(ctx, field)
+ case "name":
+ return ec.fieldContext_KubeScore_name(ctx, field)
+ case "namespace":
+ return ec.fieldContext_KubeScore_namespace(ctx, field)
+ case "targetType":
+ return ec.fieldContext_KubeScore_targetType(ctx, field)
+ case "description":
+ return ec.fieldContext_KubeScore_description(ctx, field)
+ case "path":
+ return ec.fieldContext_KubeScore_path(ctx, field)
+ case "summary":
+ return ec.fieldContext_KubeScore_summary(ctx, field)
+ case "fileName":
+ return ec.fieldContext_KubeScore_fileName(ctx, field)
+ case "fileRow":
+ return ec.fieldContext_KubeScore_fileRow(ctx, field)
+ case "eventTime":
+ return ec.fieldContext_KubeScore_eventTime(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type KubeScore", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _NamespaceData_resources(ctx context.Context, field graphql.CollectedField, obj *model.NamespaceData) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_NamespaceData_resources(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Resources, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.Resource)
+ fc.Result = res
+ return ec.marshalNResource2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐResourceᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_NamespaceData_resources(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "NamespaceData",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "clusterName":
+ return ec.fieldContext_Resource_clusterName(ctx, field)
+ case "namespace":
+ return ec.fieldContext_Resource_namespace(ctx, field)
+ case "kind":
+ return ec.fieldContext_Resource_kind(ctx, field)
+ case "resource":
+ return ec.fieldContext_Resource_resource(ctx, field)
+ case "age":
+ return ec.fieldContext_Resource_age(ctx, field)
+ case "eventTime":
+ return ec.fieldContext_Resource_eventTime(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type Resource", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _OutdatedImage_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.OutdatedImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_OutdatedImage_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_OutdatedImage_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "OutdatedImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _OutdatedImage_namespace(ctx context.Context, field graphql.CollectedField, obj *model.OutdatedImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_OutdatedImage_namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_OutdatedImage_namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "OutdatedImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _OutdatedImage_pod(ctx context.Context, field graphql.CollectedField, obj *model.OutdatedImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_OutdatedImage_pod(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Pod, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_OutdatedImage_pod(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "OutdatedImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _OutdatedImage_currentImage(ctx context.Context, field graphql.CollectedField, obj *model.OutdatedImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_OutdatedImage_currentImage(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.CurrentImage, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_OutdatedImage_currentImage(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "OutdatedImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _OutdatedImage_currentTag(ctx context.Context, field graphql.CollectedField, obj *model.OutdatedImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_OutdatedImage_currentTag(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.CurrentTag, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_OutdatedImage_currentTag(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "OutdatedImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _OutdatedImage_latestVersion(ctx context.Context, field graphql.CollectedField, obj *model.OutdatedImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_OutdatedImage_latestVersion(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.LatestVersion, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_OutdatedImage_latestVersion(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "OutdatedImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _OutdatedImage_versionsBehind(ctx context.Context, field graphql.CollectedField, obj *model.OutdatedImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_OutdatedImage_versionsBehind(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VersionsBehind, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_OutdatedImage_versionsBehind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "OutdatedImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _OutdatedImage_eventTime(ctx context.Context, field graphql.CollectedField, obj *model.OutdatedImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_OutdatedImage_eventTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.EventTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_OutdatedImage_eventTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "OutdatedImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allEvents(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allEvents(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllEvents(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.Event)
+ fc.Result = res
+ return ec.marshalNEvent2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐEventᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allEvents(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "ClusterName":
+ return ec.fieldContext_Event_ClusterName(ctx, field)
+ case "Id":
+ return ec.fieldContext_Event_Id(ctx, field)
+ case "EventTime":
+ return ec.fieldContext_Event_EventTime(ctx, field)
+ case "OpType":
+ return ec.fieldContext_Event_OpType(ctx, field)
+ case "Name":
+ return ec.fieldContext_Event_Name(ctx, field)
+ case "Namespace":
+ return ec.fieldContext_Event_Namespace(ctx, field)
+ case "Kind":
+ return ec.fieldContext_Event_Kind(ctx, field)
+ case "Message":
+ return ec.fieldContext_Event_Message(ctx, field)
+ case "Reason":
+ return ec.fieldContext_Event_Reason(ctx, field)
+ case "Host":
+ return ec.fieldContext_Event_Host(ctx, field)
+ case "Event":
+ return ec.fieldContext_Event_Event(ctx, field)
+ case "ImageName":
+ return ec.fieldContext_Event_ImageName(ctx, field)
+ case "FirstTime":
+ return ec.fieldContext_Event_FirstTime(ctx, field)
+ case "LastTime":
+ return ec.fieldContext_Event_LastTime(ctx, field)
+ case "ExpiryDate":
+ return ec.fieldContext_Event_ExpiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type Event", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allRakkess(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allRakkess(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllRakkess(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.Rakkess)
+ fc.Result = res
+ return ec.marshalNRakkess2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐRakkessᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allRakkess(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "ClusterName":
+ return ec.fieldContext_Rakkess_ClusterName(ctx, field)
+ case "Name":
+ return ec.fieldContext_Rakkess_Name(ctx, field)
+ case "Create":
+ return ec.fieldContext_Rakkess_Create(ctx, field)
+ case "Delete":
+ return ec.fieldContext_Rakkess_Delete(ctx, field)
+ case "List":
+ return ec.fieldContext_Rakkess_List(ctx, field)
+ case "Update":
+ return ec.fieldContext_Rakkess_Update(ctx, field)
+ case "EventTime":
+ return ec.fieldContext_Rakkess_EventTime(ctx, field)
+ case "ExpiryDate":
+ return ec.fieldContext_Rakkess_ExpiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type Rakkess", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allDeprecatedAPIs(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allDeprecatedAPIs(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllDeprecatedAPIs(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.DeprecatedAPI)
+ fc.Result = res
+ return ec.marshalNDeprecatedAPI2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐDeprecatedAPIᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allDeprecatedAPIs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "ClusterName":
+ return ec.fieldContext_DeprecatedAPI_ClusterName(ctx, field)
+ case "ObjectName":
+ return ec.fieldContext_DeprecatedAPI_ObjectName(ctx, field)
+ case "Description":
+ return ec.fieldContext_DeprecatedAPI_Description(ctx, field)
+ case "Kind":
+ return ec.fieldContext_DeprecatedAPI_Kind(ctx, field)
+ case "Deprecated":
+ return ec.fieldContext_DeprecatedAPI_Deprecated(ctx, field)
+ case "Scope":
+ return ec.fieldContext_DeprecatedAPI_Scope(ctx, field)
+ case "EventTime":
+ return ec.fieldContext_DeprecatedAPI_EventTime(ctx, field)
+ case "ExpiryDate":
+ return ec.fieldContext_DeprecatedAPI_ExpiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type DeprecatedAPI", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allDeletedAPIs(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allDeletedAPIs(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllDeletedAPIs(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.DeletedAPI)
+ fc.Result = res
+ return ec.marshalNDeletedAPI2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐDeletedAPIᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allDeletedAPIs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "ClusterName":
+ return ec.fieldContext_DeletedAPI_ClusterName(ctx, field)
+ case "ObjectName":
+ return ec.fieldContext_DeletedAPI_ObjectName(ctx, field)
+ case "Group":
+ return ec.fieldContext_DeletedAPI_Group(ctx, field)
+ case "Kind":
+ return ec.fieldContext_DeletedAPI_Kind(ctx, field)
+ case "Version":
+ return ec.fieldContext_DeletedAPI_Version(ctx, field)
+ case "Name":
+ return ec.fieldContext_DeletedAPI_Name(ctx, field)
+ case "Deleted":
+ return ec.fieldContext_DeletedAPI_Deleted(ctx, field)
+ case "Scope":
+ return ec.fieldContext_DeletedAPI_Scope(ctx, field)
+ case "EventTime":
+ return ec.fieldContext_DeletedAPI_EventTime(ctx, field)
+ case "ExpiryDate":
+ return ec.fieldContext_DeletedAPI_ExpiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type DeletedAPI", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allGetAllResources(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allGetAllResources(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllGetAllResources(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.GetAllResource)
+ fc.Result = res
+ return ec.marshalNGetAllResource2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐGetAllResourceᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allGetAllResources(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "ClusterName":
+ return ec.fieldContext_GetAllResource_ClusterName(ctx, field)
+ case "Namespace":
+ return ec.fieldContext_GetAllResource_Namespace(ctx, field)
+ case "Kind":
+ return ec.fieldContext_GetAllResource_Kind(ctx, field)
+ case "Resource":
+ return ec.fieldContext_GetAllResource_Resource(ctx, field)
+ case "Age":
+ return ec.fieldContext_GetAllResource_Age(ctx, field)
+ case "EventTime":
+ return ec.fieldContext_GetAllResource_EventTime(ctx, field)
+ case "ExpiryDate":
+ return ec.fieldContext_GetAllResource_ExpiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type GetAllResource", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allTrivySBOMs(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allTrivySBOMs(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllTrivySBOMs(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.TrivySbom)
+ fc.Result = res
+ return ec.marshalNTrivySBOM2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivySbomᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allTrivySBOMs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_TrivySBOM_id(ctx, field)
+ case "clusterName":
+ return ec.fieldContext_TrivySBOM_clusterName(ctx, field)
+ case "imageName":
+ return ec.fieldContext_TrivySBOM_imageName(ctx, field)
+ case "packageName":
+ return ec.fieldContext_TrivySBOM_packageName(ctx, field)
+ case "packageUrl":
+ return ec.fieldContext_TrivySBOM_packageUrl(ctx, field)
+ case "bomRef":
+ return ec.fieldContext_TrivySBOM_bomRef(ctx, field)
+ case "serialNumber":
+ return ec.fieldContext_TrivySBOM_serialNumber(ctx, field)
+ case "version":
+ return ec.fieldContext_TrivySBOM_version(ctx, field)
+ case "bomFormat":
+ return ec.fieldContext_TrivySBOM_bomFormat(ctx, field)
+ case "expiryDate":
+ return ec.fieldContext_TrivySBOM_expiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type TrivySBOM", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allTrivyImages(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allTrivyImages(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllTrivyImages(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.TrivyImage)
+ fc.Result = res
+ return ec.marshalNTrivyImage2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyImageᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allTrivyImages(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_TrivyImage_id(ctx, field)
+ case "clusterName":
+ return ec.fieldContext_TrivyImage_clusterName(ctx, field)
+ case "artifactName":
+ return ec.fieldContext_TrivyImage_artifactName(ctx, field)
+ case "vulId":
+ return ec.fieldContext_TrivyImage_vulId(ctx, field)
+ case "vulPkgId":
+ return ec.fieldContext_TrivyImage_vulPkgId(ctx, field)
+ case "vulPkgName":
+ return ec.fieldContext_TrivyImage_vulPkgName(ctx, field)
+ case "vulInstalledVersion":
+ return ec.fieldContext_TrivyImage_vulInstalledVersion(ctx, field)
+ case "vulFixedVersion":
+ return ec.fieldContext_TrivyImage_vulFixedVersion(ctx, field)
+ case "vulTitle":
+ return ec.fieldContext_TrivyImage_vulTitle(ctx, field)
+ case "vulSeverity":
+ return ec.fieldContext_TrivyImage_vulSeverity(ctx, field)
+ case "vulPublishedDate":
+ return ec.fieldContext_TrivyImage_vulPublishedDate(ctx, field)
+ case "vulLastModifiedDate":
+ return ec.fieldContext_TrivyImage_vulLastModifiedDate(ctx, field)
+ case "expiryDate":
+ return ec.fieldContext_TrivyImage_expiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type TrivyImage", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allKubeScores(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allKubeScores(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllKubeScores(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.Kubescore)
+ fc.Result = res
+ return ec.marshalNKubescore2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐKubescoreᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allKubeScores(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_Kubescore_id(ctx, field)
+ case "clusterName":
+ return ec.fieldContext_Kubescore_clusterName(ctx, field)
+ case "objectName":
+ return ec.fieldContext_Kubescore_objectName(ctx, field)
+ case "kind":
+ return ec.fieldContext_Kubescore_kind(ctx, field)
+ case "apiVersion":
+ return ec.fieldContext_Kubescore_apiVersion(ctx, field)
+ case "name":
+ return ec.fieldContext_Kubescore_name(ctx, field)
+ case "namespace":
+ return ec.fieldContext_Kubescore_namespace(ctx, field)
+ case "targetType":
+ return ec.fieldContext_Kubescore_targetType(ctx, field)
+ case "description":
+ return ec.fieldContext_Kubescore_description(ctx, field)
+ case "path":
+ return ec.fieldContext_Kubescore_path(ctx, field)
+ case "summary":
+ return ec.fieldContext_Kubescore_summary(ctx, field)
+ case "fileName":
+ return ec.fieldContext_Kubescore_fileName(ctx, field)
+ case "fileRow":
+ return ec.fieldContext_Kubescore_fileRow(ctx, field)
+ case "eventTime":
+ return ec.fieldContext_Kubescore_eventTime(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type Kubescore", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allTrivyVuls(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allTrivyVuls(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllTrivyVuls(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.TrivyVul)
+ fc.Result = res
+ return ec.marshalNTrivyVul2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyVulᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allTrivyVuls(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_TrivyVul_id(ctx, field)
+ case "clusterName":
+ return ec.fieldContext_TrivyVul_clusterName(ctx, field)
+ case "namespace":
+ return ec.fieldContext_TrivyVul_namespace(ctx, field)
+ case "kind":
+ return ec.fieldContext_TrivyVul_kind(ctx, field)
+ case "name":
+ return ec.fieldContext_TrivyVul_name(ctx, field)
+ case "vulId":
+ return ec.fieldContext_TrivyVul_vulId(ctx, field)
+ case "vulVendorIds":
+ return ec.fieldContext_TrivyVul_vulVendorIds(ctx, field)
+ case "vulPkgId":
+ return ec.fieldContext_TrivyVul_vulPkgId(ctx, field)
+ case "vulPkgName":
+ return ec.fieldContext_TrivyVul_vulPkgName(ctx, field)
+ case "vulPkgPath":
+ return ec.fieldContext_TrivyVul_vulPkgPath(ctx, field)
+ case "vulInstalledVersion":
+ return ec.fieldContext_TrivyVul_vulInstalledVersion(ctx, field)
+ case "vulFixedVersion":
+ return ec.fieldContext_TrivyVul_vulFixedVersion(ctx, field)
+ case "vulTitle":
+ return ec.fieldContext_TrivyVul_vulTitle(ctx, field)
+ case "vulSeverity":
+ return ec.fieldContext_TrivyVul_vulSeverity(ctx, field)
+ case "vulPublishedDate":
+ return ec.fieldContext_TrivyVul_vulPublishedDate(ctx, field)
+ case "vulLastModifiedDate":
+ return ec.fieldContext_TrivyVul_vulLastModifiedDate(ctx, field)
+ case "expiryDate":
+ return ec.fieldContext_TrivyVul_expiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type TrivyVul", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allTrivyMisconfigs(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allTrivyMisconfigs(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllTrivyMisconfigs(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.TrivyMisconfig)
+ fc.Result = res
+ return ec.marshalNTrivyMisconfig2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyMisconfigᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allTrivyMisconfigs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_TrivyMisconfig_id(ctx, field)
+ case "clusterName":
+ return ec.fieldContext_TrivyMisconfig_clusterName(ctx, field)
+ case "namespace":
+ return ec.fieldContext_TrivyMisconfig_namespace(ctx, field)
+ case "kind":
+ return ec.fieldContext_TrivyMisconfig_kind(ctx, field)
+ case "name":
+ return ec.fieldContext_TrivyMisconfig_name(ctx, field)
+ case "misconfigId":
+ return ec.fieldContext_TrivyMisconfig_misconfigId(ctx, field)
+ case "misconfigAvdid":
+ return ec.fieldContext_TrivyMisconfig_misconfigAvdid(ctx, field)
+ case "misconfigType":
+ return ec.fieldContext_TrivyMisconfig_misconfigType(ctx, field)
+ case "misconfigTitle":
+ return ec.fieldContext_TrivyMisconfig_misconfigTitle(ctx, field)
+ case "misconfigDesc":
+ return ec.fieldContext_TrivyMisconfig_misconfigDesc(ctx, field)
+ case "misconfigMsg":
+ return ec.fieldContext_TrivyMisconfig_misconfigMsg(ctx, field)
+ case "misconfigQuery":
+ return ec.fieldContext_TrivyMisconfig_misconfigQuery(ctx, field)
+ case "misconfigResolution":
+ return ec.fieldContext_TrivyMisconfig_misconfigResolution(ctx, field)
+ case "misconfigSeverity":
+ return ec.fieldContext_TrivyMisconfig_misconfigSeverity(ctx, field)
+ case "misconfigStatus":
+ return ec.fieldContext_TrivyMisconfig_misconfigStatus(ctx, field)
+ case "eventTime":
+ return ec.fieldContext_TrivyMisconfig_eventTime(ctx, field)
+ case "expiryDate":
+ return ec.fieldContext_TrivyMisconfig_expiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type TrivyMisconfig", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_uniqueNamespaces(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_uniqueNamespaces(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().UniqueNamespaces(rctx, fc.Args["clusterName"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.Namespace)
+ fc.Result = res
+ return ec.marshalNNamespace2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐNamespaceᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_uniqueNamespaces(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "name":
+ return ec.fieldContext_Namespace_name(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type Namespace", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_uniqueNamespaces_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_uniqueClusters(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_uniqueClusters(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().UniqueClusters(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.Cluster)
+ fc.Result = res
+ return ec.marshalNCluster2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_uniqueClusters(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "name":
+ return ec.fieldContext_Cluster_name(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type Cluster", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_outdatedImagesByClusterAndNamespace(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_outdatedImagesByClusterAndNamespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().OutdatedImagesByClusterAndNamespace(rctx, fc.Args["clusterName"].(string), fc.Args["namespace"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.OutdatedImage)
+ fc.Result = res
+ return ec.marshalNOutdatedImage2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐOutdatedImageᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_outdatedImagesByClusterAndNamespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "clusterName":
+ return ec.fieldContext_OutdatedImage_clusterName(ctx, field)
+ case "namespace":
+ return ec.fieldContext_OutdatedImage_namespace(ctx, field)
+ case "pod":
+ return ec.fieldContext_OutdatedImage_pod(ctx, field)
+ case "currentImage":
+ return ec.fieldContext_OutdatedImage_currentImage(ctx, field)
+ case "currentTag":
+ return ec.fieldContext_OutdatedImage_currentTag(ctx, field)
+ case "latestVersion":
+ return ec.fieldContext_OutdatedImage_latestVersion(ctx, field)
+ case "versionsBehind":
+ return ec.fieldContext_OutdatedImage_versionsBehind(ctx, field)
+ case "eventTime":
+ return ec.fieldContext_OutdatedImage_eventTime(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type OutdatedImage", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_outdatedImagesByClusterAndNamespace_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_outdatedImagesCount(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_outdatedImagesCount(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().OutdatedImagesCount(rctx, fc.Args["clusterName"].(string), fc.Args["namespace"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_outdatedImagesCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_outdatedImagesCount_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allClusterNamespaceOutdatedCounts(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allClusterNamespaceOutdatedCounts(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllClusterNamespaceOutdatedCounts(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.ClusterNamespaceOutdatedCount)
+ fc.Result = res
+ return ec.marshalNClusterNamespaceOutdatedCount2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceOutdatedCountᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allClusterNamespaceOutdatedCounts(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "clusterName":
+ return ec.fieldContext_ClusterNamespaceOutdatedCount_clusterName(ctx, field)
+ case "namespace":
+ return ec.fieldContext_ClusterNamespaceOutdatedCount_namespace(ctx, field)
+ case "outdatedCount":
+ return ec.fieldContext_ClusterNamespaceOutdatedCount_outdatedCount(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type ClusterNamespaceOutdatedCount", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allClusterDeprecatedAPIsCounts(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allClusterDeprecatedAPIsCounts(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllClusterDeprecatedAPIsCounts(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.ClusterAPIsCount)
+ fc.Result = res
+ return ec.marshalNClusterAPIsCount2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterAPIsCountᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allClusterDeprecatedAPIsCounts(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "clusterName":
+ return ec.fieldContext_ClusterAPIsCount_clusterName(ctx, field)
+ case "count":
+ return ec.fieldContext_ClusterAPIsCount_count(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type ClusterAPIsCount", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allClusterDeletedAPIsCounts(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allClusterDeletedAPIsCounts(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllClusterDeletedAPIsCounts(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.ClusterAPIsCount)
+ fc.Result = res
+ return ec.marshalNClusterAPIsCount2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterAPIsCountᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allClusterDeletedAPIsCounts(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "clusterName":
+ return ec.fieldContext_ClusterAPIsCount_clusterName(ctx, field)
+ case "count":
+ return ec.fieldContext_ClusterAPIsCount_count(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type ClusterAPIsCount", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_allClusterNamespaceResourceCounts(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_allClusterNamespaceResourceCounts(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().AllClusterNamespaceResourceCounts(rctx)
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.ClusterNamespaceResourceCount)
+ fc.Result = res
+ return ec.marshalNClusterNamespaceResourceCount2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceResourceCountᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_allClusterNamespaceResourceCounts(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "clusterName":
+ return ec.fieldContext_ClusterNamespaceResourceCount_clusterName(ctx, field)
+ case "namespace":
+ return ec.fieldContext_ClusterNamespaceResourceCount_namespace(ctx, field)
+ case "resourceCount":
+ return ec.fieldContext_ClusterNamespaceResourceCount_resourceCount(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type ClusterNamespaceResourceCount", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_eventsByClusterAndNamespace(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_eventsByClusterAndNamespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().EventsByClusterAndNamespace(rctx, fc.Args["clusterName"].(string), fc.Args["namespace"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.Event)
+ fc.Result = res
+ return ec.marshalNEvent2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐEventᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_eventsByClusterAndNamespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "ClusterName":
+ return ec.fieldContext_Event_ClusterName(ctx, field)
+ case "Id":
+ return ec.fieldContext_Event_Id(ctx, field)
+ case "EventTime":
+ return ec.fieldContext_Event_EventTime(ctx, field)
+ case "OpType":
+ return ec.fieldContext_Event_OpType(ctx, field)
+ case "Name":
+ return ec.fieldContext_Event_Name(ctx, field)
+ case "Namespace":
+ return ec.fieldContext_Event_Namespace(ctx, field)
+ case "Kind":
+ return ec.fieldContext_Event_Kind(ctx, field)
+ case "Message":
+ return ec.fieldContext_Event_Message(ctx, field)
+ case "Reason":
+ return ec.fieldContext_Event_Reason(ctx, field)
+ case "Host":
+ return ec.fieldContext_Event_Host(ctx, field)
+ case "Event":
+ return ec.fieldContext_Event_Event(ctx, field)
+ case "ImageName":
+ return ec.fieldContext_Event_ImageName(ctx, field)
+ case "FirstTime":
+ return ec.fieldContext_Event_FirstTime(ctx, field)
+ case "LastTime":
+ return ec.fieldContext_Event_LastTime(ctx, field)
+ case "ExpiryDate":
+ return ec.fieldContext_Event_ExpiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type Event", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_eventsByClusterAndNamespace_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_vulnerabilities(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_vulnerabilities(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().Vulnerabilities(rctx, fc.Args["clusterName"].(string), fc.Args["namespace"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.Vulnerability)
+ fc.Result = res
+ return ec.marshalNVulnerability2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐVulnerabilityᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_vulnerabilities(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_Vulnerability_id(ctx, field)
+ case "clusterName":
+ return ec.fieldContext_Vulnerability_clusterName(ctx, field)
+ case "namespace":
+ return ec.fieldContext_Vulnerability_namespace(ctx, field)
+ case "kind":
+ return ec.fieldContext_Vulnerability_kind(ctx, field)
+ case "name":
+ return ec.fieldContext_Vulnerability_name(ctx, field)
+ case "vulId":
+ return ec.fieldContext_Vulnerability_vulId(ctx, field)
+ case "vulVendorIds":
+ return ec.fieldContext_Vulnerability_vulVendorIds(ctx, field)
+ case "vulPkgId":
+ return ec.fieldContext_Vulnerability_vulPkgId(ctx, field)
+ case "vulPkgName":
+ return ec.fieldContext_Vulnerability_vulPkgName(ctx, field)
+ case "vulPkgPath":
+ return ec.fieldContext_Vulnerability_vulPkgPath(ctx, field)
+ case "vulInstalledVersion":
+ return ec.fieldContext_Vulnerability_vulInstalledVersion(ctx, field)
+ case "vulFixedVersion":
+ return ec.fieldContext_Vulnerability_vulFixedVersion(ctx, field)
+ case "vulTitle":
+ return ec.fieldContext_Vulnerability_vulTitle(ctx, field)
+ case "vulSeverity":
+ return ec.fieldContext_Vulnerability_vulSeverity(ctx, field)
+ case "vulPublishedDate":
+ return ec.fieldContext_Vulnerability_vulPublishedDate(ctx, field)
+ case "vulLastModifiedDate":
+ return ec.fieldContext_Vulnerability_vulLastModifiedDate(ctx, field)
+ case "expiryDate":
+ return ec.fieldContext_Vulnerability_expiryDate(ctx, field)
+ case "exportedAt":
+ return ec.fieldContext_Vulnerability_exportedAt(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type Vulnerability", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_vulnerabilities_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_misconfigurations(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_misconfigurations(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().Misconfigurations(rctx, fc.Args["clusterName"].(string), fc.Args["namespace"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.Misconfiguration)
+ fc.Result = res
+ return ec.marshalNMisconfiguration2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐMisconfigurationᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_misconfigurations(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_Misconfiguration_id(ctx, field)
+ case "clusterName":
+ return ec.fieldContext_Misconfiguration_clusterName(ctx, field)
+ case "namespace":
+ return ec.fieldContext_Misconfiguration_namespace(ctx, field)
+ case "kind":
+ return ec.fieldContext_Misconfiguration_kind(ctx, field)
+ case "name":
+ return ec.fieldContext_Misconfiguration_name(ctx, field)
+ case "misconfigId":
+ return ec.fieldContext_Misconfiguration_misconfigId(ctx, field)
+ case "misconfigAvdid":
+ return ec.fieldContext_Misconfiguration_misconfigAvdid(ctx, field)
+ case "misconfigType":
+ return ec.fieldContext_Misconfiguration_misconfigType(ctx, field)
+ case "misconfigTitle":
+ return ec.fieldContext_Misconfiguration_misconfigTitle(ctx, field)
+ case "misconfigDesc":
+ return ec.fieldContext_Misconfiguration_misconfigDesc(ctx, field)
+ case "misconfigMsg":
+ return ec.fieldContext_Misconfiguration_misconfigMsg(ctx, field)
+ case "misconfigQuery":
+ return ec.fieldContext_Misconfiguration_misconfigQuery(ctx, field)
+ case "misconfigResolution":
+ return ec.fieldContext_Misconfiguration_misconfigResolution(ctx, field)
+ case "misconfigSeverity":
+ return ec.fieldContext_Misconfiguration_misconfigSeverity(ctx, field)
+ case "misconfigStatus":
+ return ec.fieldContext_Misconfiguration_misconfigStatus(ctx, field)
+ case "eventTime":
+ return ec.fieldContext_Misconfiguration_eventTime(ctx, field)
+ case "expiryDate":
+ return ec.fieldContext_Misconfiguration_expiryDate(ctx, field)
+ case "exportedAt":
+ return ec.fieldContext_Misconfiguration_exportedAt(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type Misconfiguration", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_misconfigurations_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_kubescores(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_kubescores(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().Kubescores(rctx, fc.Args["clustername"].(string), fc.Args["namespace"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.KubeScore)
+ fc.Result = res
+ return ec.marshalNKubeScore2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐKubeScoreᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_kubescores(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_KubeScore_id(ctx, field)
+ case "clusterName":
+ return ec.fieldContext_KubeScore_clusterName(ctx, field)
+ case "objectName":
+ return ec.fieldContext_KubeScore_objectName(ctx, field)
+ case "kind":
+ return ec.fieldContext_KubeScore_kind(ctx, field)
+ case "apiVersion":
+ return ec.fieldContext_KubeScore_apiVersion(ctx, field)
+ case "name":
+ return ec.fieldContext_KubeScore_name(ctx, field)
+ case "namespace":
+ return ec.fieldContext_KubeScore_namespace(ctx, field)
+ case "targetType":
+ return ec.fieldContext_KubeScore_targetType(ctx, field)
+ case "description":
+ return ec.fieldContext_KubeScore_description(ctx, field)
+ case "path":
+ return ec.fieldContext_KubeScore_path(ctx, field)
+ case "summary":
+ return ec.fieldContext_KubeScore_summary(ctx, field)
+ case "fileName":
+ return ec.fieldContext_KubeScore_fileName(ctx, field)
+ case "fileRow":
+ return ec.fieldContext_KubeScore_fileRow(ctx, field)
+ case "eventTime":
+ return ec.fieldContext_KubeScore_eventTime(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type KubeScore", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_kubescores_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_getAllResources(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_getAllResources(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().GetAllResources(rctx, fc.Args["clusterName"].(string), fc.Args["namespace"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.GetAllResource)
+ fc.Result = res
+ return ec.marshalNGetAllResource2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐGetAllResourceᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_getAllResources(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "ClusterName":
+ return ec.fieldContext_GetAllResource_ClusterName(ctx, field)
+ case "Namespace":
+ return ec.fieldContext_GetAllResource_Namespace(ctx, field)
+ case "Kind":
+ return ec.fieldContext_GetAllResource_Kind(ctx, field)
+ case "Resource":
+ return ec.fieldContext_GetAllResource_Resource(ctx, field)
+ case "Age":
+ return ec.fieldContext_GetAllResource_Age(ctx, field)
+ case "EventTime":
+ return ec.fieldContext_GetAllResource_EventTime(ctx, field)
+ case "ExpiryDate":
+ return ec.fieldContext_GetAllResource_ExpiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type GetAllResource", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_getAllResources_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_trivyImages(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_trivyImages(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().TrivyImages(rctx, fc.Args["clusterName"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.TrivyImage)
+ fc.Result = res
+ return ec.marshalNTrivyImage2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyImageᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_trivyImages(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_TrivyImage_id(ctx, field)
+ case "clusterName":
+ return ec.fieldContext_TrivyImage_clusterName(ctx, field)
+ case "artifactName":
+ return ec.fieldContext_TrivyImage_artifactName(ctx, field)
+ case "vulId":
+ return ec.fieldContext_TrivyImage_vulId(ctx, field)
+ case "vulPkgId":
+ return ec.fieldContext_TrivyImage_vulPkgId(ctx, field)
+ case "vulPkgName":
+ return ec.fieldContext_TrivyImage_vulPkgName(ctx, field)
+ case "vulInstalledVersion":
+ return ec.fieldContext_TrivyImage_vulInstalledVersion(ctx, field)
+ case "vulFixedVersion":
+ return ec.fieldContext_TrivyImage_vulFixedVersion(ctx, field)
+ case "vulTitle":
+ return ec.fieldContext_TrivyImage_vulTitle(ctx, field)
+ case "vulSeverity":
+ return ec.fieldContext_TrivyImage_vulSeverity(ctx, field)
+ case "vulPublishedDate":
+ return ec.fieldContext_TrivyImage_vulPublishedDate(ctx, field)
+ case "vulLastModifiedDate":
+ return ec.fieldContext_TrivyImage_vulLastModifiedDate(ctx, field)
+ case "expiryDate":
+ return ec.fieldContext_TrivyImage_expiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type TrivyImage", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_trivyImages_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_deprecatedAPIs(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_deprecatedAPIs(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().DeprecatedAPIs(rctx, fc.Args["clusterName"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.DeprecatedAPI)
+ fc.Result = res
+ return ec.marshalNDeprecatedAPI2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐDeprecatedAPIᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_deprecatedAPIs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "ClusterName":
+ return ec.fieldContext_DeprecatedAPI_ClusterName(ctx, field)
+ case "ObjectName":
+ return ec.fieldContext_DeprecatedAPI_ObjectName(ctx, field)
+ case "Description":
+ return ec.fieldContext_DeprecatedAPI_Description(ctx, field)
+ case "Kind":
+ return ec.fieldContext_DeprecatedAPI_Kind(ctx, field)
+ case "Deprecated":
+ return ec.fieldContext_DeprecatedAPI_Deprecated(ctx, field)
+ case "Scope":
+ return ec.fieldContext_DeprecatedAPI_Scope(ctx, field)
+ case "EventTime":
+ return ec.fieldContext_DeprecatedAPI_EventTime(ctx, field)
+ case "ExpiryDate":
+ return ec.fieldContext_DeprecatedAPI_ExpiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type DeprecatedAPI", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_deprecatedAPIs_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_deletedAPIs(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_deletedAPIs(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().DeletedAPIs(rctx, fc.Args["clusterName"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.DeletedAPI)
+ fc.Result = res
+ return ec.marshalNDeletedAPI2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐDeletedAPIᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_deletedAPIs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "ClusterName":
+ return ec.fieldContext_DeletedAPI_ClusterName(ctx, field)
+ case "ObjectName":
+ return ec.fieldContext_DeletedAPI_ObjectName(ctx, field)
+ case "Group":
+ return ec.fieldContext_DeletedAPI_Group(ctx, field)
+ case "Kind":
+ return ec.fieldContext_DeletedAPI_Kind(ctx, field)
+ case "Version":
+ return ec.fieldContext_DeletedAPI_Version(ctx, field)
+ case "Name":
+ return ec.fieldContext_DeletedAPI_Name(ctx, field)
+ case "Deleted":
+ return ec.fieldContext_DeletedAPI_Deleted(ctx, field)
+ case "Scope":
+ return ec.fieldContext_DeletedAPI_Scope(ctx, field)
+ case "EventTime":
+ return ec.fieldContext_DeletedAPI_EventTime(ctx, field)
+ case "ExpiryDate":
+ return ec.fieldContext_DeletedAPI_ExpiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type DeletedAPI", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_deletedAPIs_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_trivySBOMs(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_trivySBOMs(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().TrivySBOMs(rctx, fc.Args["clusterName"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]*model.TrivySbom)
+ fc.Result = res
+ return ec.marshalNTrivySBOM2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivySbomᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_trivySBOMs(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "id":
+ return ec.fieldContext_TrivySBOM_id(ctx, field)
+ case "clusterName":
+ return ec.fieldContext_TrivySBOM_clusterName(ctx, field)
+ case "imageName":
+ return ec.fieldContext_TrivySBOM_imageName(ctx, field)
+ case "packageName":
+ return ec.fieldContext_TrivySBOM_packageName(ctx, field)
+ case "packageUrl":
+ return ec.fieldContext_TrivySBOM_packageUrl(ctx, field)
+ case "bomRef":
+ return ec.fieldContext_TrivySBOM_bomRef(ctx, field)
+ case "serialNumber":
+ return ec.fieldContext_TrivySBOM_serialNumber(ctx, field)
+ case "version":
+ return ec.fieldContext_TrivySBOM_version(ctx, field)
+ case "bomFormat":
+ return ec.fieldContext_TrivySBOM_bomFormat(ctx, field)
+ case "expiryDate":
+ return ec.fieldContext_TrivySBOM_expiryDate(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type TrivySBOM", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_trivySBOMs_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_trivyVulCount(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_trivyVulCount(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().TrivyVulCount(rctx, fc.Args["clusterName"].(string), fc.Args["namespace"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(*model.ClusterNamespaceVulCount)
+ fc.Result = res
+ return ec.marshalNClusterNamespaceVulCount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceVulCount(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_trivyVulCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "clusterName":
+ return ec.fieldContext_ClusterNamespaceVulCount_clusterName(ctx, field)
+ case "namespace":
+ return ec.fieldContext_ClusterNamespaceVulCount_namespace(ctx, field)
+ case "vulCount":
+ return ec.fieldContext_ClusterNamespaceVulCount_vulCount(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type ClusterNamespaceVulCount", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_trivyVulCount_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_trivyMisconfigCount(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_trivyMisconfigCount(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().TrivyMisconfigCount(rctx, fc.Args["clusterName"].(string), fc.Args["namespace"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(*model.ClusterNamespaceMisconfigCount)
+ fc.Result = res
+ return ec.marshalNClusterNamespaceMisconfigCount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceMisconfigCount(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_trivyMisconfigCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "clusterName":
+ return ec.fieldContext_ClusterNamespaceMisconfigCount_clusterName(ctx, field)
+ case "namespace":
+ return ec.fieldContext_ClusterNamespaceMisconfigCount_namespace(ctx, field)
+ case "misconfigCount":
+ return ec.fieldContext_ClusterNamespaceMisconfigCount_misconfigCount(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type ClusterNamespaceMisconfigCount", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_trivyMisconfigCount_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_deletedAPICount(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_deletedAPICount(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().DeletedAPICount(rctx, fc.Args["clusterName"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(*model.ClusterDeletedAPICount)
+ fc.Result = res
+ return ec.marshalNClusterDeletedAPICount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterDeletedAPICount(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_deletedAPICount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "clusterName":
+ return ec.fieldContext_ClusterDeletedAPICount_clusterName(ctx, field)
+ case "deletedAPICount":
+ return ec.fieldContext_ClusterDeletedAPICount_deletedAPICount(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type ClusterDeletedAPICount", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_deletedAPICount_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_trivyImageCount(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_trivyImageCount(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().TrivyImageCount(rctx, fc.Args["clusterName"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(*model.TrivyImageCount)
+ fc.Result = res
+ return ec.marshalNTrivyImageCount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyImageCount(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_trivyImageCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "clusterName":
+ return ec.fieldContext_TrivyImageCount_clusterName(ctx, field)
+ case "ImageCount":
+ return ec.fieldContext_TrivyImageCount_ImageCount(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type TrivyImageCount", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_trivyImageCount_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query_deprecatedAPICount(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query_deprecatedAPICount(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.resolvers.Query().DeprecatedAPICount(rctx, fc.Args["clusterName"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(*model.ClusterDeprecatedAPICount)
+ fc.Result = res
+ return ec.marshalNClusterDeprecatedAPICount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterDeprecatedAPICount(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query_deprecatedAPICount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: true,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "clusterName":
+ return ec.fieldContext_ClusterDeprecatedAPICount_clusterName(ctx, field)
+ case "deprecatedAPICount":
+ return ec.fieldContext_ClusterDeprecatedAPICount_deprecatedAPICount(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type ClusterDeprecatedAPICount", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query_deprecatedAPICount_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query___type(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query___type(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.introspectType(fc.Args["name"].(string))
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*introspection.Type)
+ fc.Result = res
+ return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query___type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "kind":
+ return ec.fieldContext___Type_kind(ctx, field)
+ case "name":
+ return ec.fieldContext___Type_name(ctx, field)
+ case "description":
+ return ec.fieldContext___Type_description(ctx, field)
+ case "fields":
+ return ec.fieldContext___Type_fields(ctx, field)
+ case "interfaces":
+ return ec.fieldContext___Type_interfaces(ctx, field)
+ case "possibleTypes":
+ return ec.fieldContext___Type_possibleTypes(ctx, field)
+ case "enumValues":
+ return ec.fieldContext___Type_enumValues(ctx, field)
+ case "inputFields":
+ return ec.fieldContext___Type_inputFields(ctx, field)
+ case "ofType":
+ return ec.fieldContext___Type_ofType(ctx, field)
+ case "specifiedByURL":
+ return ec.fieldContext___Type_specifiedByURL(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field_Query___type_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Query___schema(ctx context.Context, field graphql.CollectedField) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Query___schema(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return ec.introspectSchema()
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*introspection.Schema)
+ fc.Result = res
+ return ec.marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Query___schema(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Query",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "description":
+ return ec.fieldContext___Schema_description(ctx, field)
+ case "types":
+ return ec.fieldContext___Schema_types(ctx, field)
+ case "queryType":
+ return ec.fieldContext___Schema_queryType(ctx, field)
+ case "mutationType":
+ return ec.fieldContext___Schema_mutationType(ctx, field)
+ case "subscriptionType":
+ return ec.fieldContext___Schema_subscriptionType(ctx, field)
+ case "directives":
+ return ec.fieldContext___Schema_directives(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __Schema", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Rakkess_ClusterName(ctx context.Context, field graphql.CollectedField, obj *model.Rakkess) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Rakkess_ClusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Rakkess_ClusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Rakkess",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Rakkess_Name(ctx context.Context, field graphql.CollectedField, obj *model.Rakkess) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Rakkess_Name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Rakkess_Name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Rakkess",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Rakkess_Create(ctx context.Context, field graphql.CollectedField, obj *model.Rakkess) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Rakkess_Create(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Create, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Rakkess_Create(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Rakkess",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Rakkess_Delete(ctx context.Context, field graphql.CollectedField, obj *model.Rakkess) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Rakkess_Delete(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Delete, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Rakkess_Delete(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Rakkess",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Rakkess_List(ctx context.Context, field graphql.CollectedField, obj *model.Rakkess) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Rakkess_List(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.List, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Rakkess_List(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Rakkess",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Rakkess_Update(ctx context.Context, field graphql.CollectedField, obj *model.Rakkess) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Rakkess_Update(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Update, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Rakkess_Update(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Rakkess",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Rakkess_EventTime(ctx context.Context, field graphql.CollectedField, obj *model.Rakkess) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Rakkess_EventTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.EventTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Rakkess_EventTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Rakkess",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Rakkess_ExpiryDate(ctx context.Context, field graphql.CollectedField, obj *model.Rakkess) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Rakkess_ExpiryDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ExpiryDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Rakkess_ExpiryDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Rakkess",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Resource_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.Resource) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Resource_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Resource_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Resource",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Resource_namespace(ctx context.Context, field graphql.CollectedField, obj *model.Resource) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Resource_namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Resource_namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Resource",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Resource_kind(ctx context.Context, field graphql.CollectedField, obj *model.Resource) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Resource_kind(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Kind, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Resource_kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Resource",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Resource_resource(ctx context.Context, field graphql.CollectedField, obj *model.Resource) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Resource_resource(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Resource, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Resource_resource(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Resource",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Resource_age(ctx context.Context, field graphql.CollectedField, obj *model.Resource) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Resource_age(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Age, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Resource_age(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Resource",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Resource_eventTime(ctx context.Context, field graphql.CollectedField, obj *model.Resource) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Resource_eventTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.EventTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Resource_eventTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Resource",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImage_id(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImage_id(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNID2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImage_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type ID does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImage_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImage_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImage_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImage_artifactName(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImage_artifactName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ArtifactName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImage_artifactName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImage_vulId(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImage_vulId(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImage_vulId(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImage_vulPkgId(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImage_vulPkgId(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulPkgID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImage_vulPkgId(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImage_vulPkgName(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImage_vulPkgName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulPkgName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImage_vulPkgName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImage_vulInstalledVersion(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImage_vulInstalledVersion(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulInstalledVersion, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImage_vulInstalledVersion(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImage_vulFixedVersion(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImage_vulFixedVersion(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulFixedVersion, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImage_vulFixedVersion(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImage_vulTitle(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImage_vulTitle(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulTitle, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImage_vulTitle(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImage_vulSeverity(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImage_vulSeverity(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulSeverity, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImage_vulSeverity(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImage_vulPublishedDate(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImage_vulPublishedDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulPublishedDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImage_vulPublishedDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImage_vulLastModifiedDate(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImage_vulLastModifiedDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulLastModifiedDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImage_vulLastModifiedDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImage_expiryDate(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImage) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImage_expiryDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ExpiryDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImage_expiryDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImage",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImageCount_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImageCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImageCount_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImageCount_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImageCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyImageCount_ImageCount(ctx context.Context, field graphql.CollectedField, obj *model.TrivyImageCount) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyImageCount_ImageCount(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ImageCount, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(int)
+ fc.Result = res
+ return ec.marshalNInt2int(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyImageCount_ImageCount(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyImageCount",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_id(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_id(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNID2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type ID does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_namespace(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_kind(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_kind(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Kind, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_name(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_misconfigId(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_misconfigId(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_misconfigId(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_misconfigAvdid(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_misconfigAvdid(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigAvdid, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_misconfigAvdid(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_misconfigType(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_misconfigType(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigType, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_misconfigType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_misconfigTitle(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_misconfigTitle(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigTitle, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_misconfigTitle(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_misconfigDesc(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_misconfigDesc(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigDesc, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_misconfigDesc(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_misconfigMsg(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_misconfigMsg(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigMsg, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_misconfigMsg(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_misconfigQuery(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_misconfigQuery(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigQuery, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_misconfigQuery(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_misconfigResolution(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_misconfigResolution(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigResolution, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_misconfigResolution(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_misconfigSeverity(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_misconfigSeverity(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigSeverity, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_misconfigSeverity(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_misconfigStatus(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_misconfigStatus(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MisconfigStatus, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_misconfigStatus(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_eventTime(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_eventTime(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.EventTime, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_eventTime(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyMisconfig_expiryDate(ctx context.Context, field graphql.CollectedField, obj *model.TrivyMisconfig) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyMisconfig_expiryDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ExpiryDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyMisconfig_expiryDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyMisconfig",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivySBOM_id(ctx context.Context, field graphql.CollectedField, obj *model.TrivySbom) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivySBOM_id(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNID2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivySBOM_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivySBOM",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type ID does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivySBOM_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.TrivySbom) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivySBOM_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivySBOM_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivySBOM",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivySBOM_imageName(ctx context.Context, field graphql.CollectedField, obj *model.TrivySbom) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivySBOM_imageName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ImageName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivySBOM_imageName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivySBOM",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivySBOM_packageName(ctx context.Context, field graphql.CollectedField, obj *model.TrivySbom) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivySBOM_packageName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.PackageName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivySBOM_packageName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivySBOM",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivySBOM_packageUrl(ctx context.Context, field graphql.CollectedField, obj *model.TrivySbom) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivySBOM_packageUrl(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.PackageURL, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivySBOM_packageUrl(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivySBOM",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivySBOM_bomRef(ctx context.Context, field graphql.CollectedField, obj *model.TrivySbom) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivySBOM_bomRef(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.BomRef, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivySBOM_bomRef(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivySBOM",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivySBOM_serialNumber(ctx context.Context, field graphql.CollectedField, obj *model.TrivySbom) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivySBOM_serialNumber(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.SerialNumber, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivySBOM_serialNumber(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivySBOM",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivySBOM_version(ctx context.Context, field graphql.CollectedField, obj *model.TrivySbom) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivySBOM_version(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Version, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*int)
+ fc.Result = res
+ return ec.marshalOInt2ᚖint(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivySBOM_version(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivySBOM",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Int does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivySBOM_bomFormat(ctx context.Context, field graphql.CollectedField, obj *model.TrivySbom) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivySBOM_bomFormat(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.BomFormat, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivySBOM_bomFormat(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivySBOM",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivySBOM_expiryDate(ctx context.Context, field graphql.CollectedField, obj *model.TrivySbom) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivySBOM_expiryDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ExpiryDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivySBOM_expiryDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivySBOM",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_id(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_id(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNID2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type ID does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_namespace(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_kind(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_kind(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Kind, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_name(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_vulId(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_vulId(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_vulId(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_vulVendorIds(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_vulVendorIds(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulVendorIds, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_vulVendorIds(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_vulPkgId(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_vulPkgId(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulPkgID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_vulPkgId(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_vulPkgName(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_vulPkgName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulPkgName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_vulPkgName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_vulPkgPath(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_vulPkgPath(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulPkgPath, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_vulPkgPath(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_vulInstalledVersion(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_vulInstalledVersion(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulInstalledVersion, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_vulInstalledVersion(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_vulFixedVersion(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_vulFixedVersion(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulFixedVersion, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_vulFixedVersion(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_vulTitle(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_vulTitle(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulTitle, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_vulTitle(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_vulSeverity(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_vulSeverity(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulSeverity, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_vulSeverity(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_vulPublishedDate(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_vulPublishedDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulPublishedDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_vulPublishedDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_vulLastModifiedDate(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_vulLastModifiedDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulLastModifiedDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_vulLastModifiedDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _TrivyVul_expiryDate(ctx context.Context, field graphql.CollectedField, obj *model.TrivyVul) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_TrivyVul_expiryDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ExpiryDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_TrivyVul_expiryDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "TrivyVul",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_id(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_id(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNID2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_id(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type ID does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_clusterName(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_clusterName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ClusterName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_clusterName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_namespace(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_namespace(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Namespace, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_namespace(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_kind(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_kind(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Kind, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_name(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_vulId(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_vulId(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_vulId(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_vulVendorIds(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_vulVendorIds(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulVendorIds, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_vulVendorIds(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_vulPkgId(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_vulPkgId(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulPkgID, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_vulPkgId(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_vulPkgName(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_vulPkgName(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulPkgName, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_vulPkgName(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_vulPkgPath(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_vulPkgPath(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulPkgPath, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_vulPkgPath(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_vulInstalledVersion(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_vulInstalledVersion(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulInstalledVersion, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_vulInstalledVersion(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_vulFixedVersion(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_vulFixedVersion(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulFixedVersion, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_vulFixedVersion(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_vulTitle(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_vulTitle(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulTitle, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_vulTitle(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_vulSeverity(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_vulSeverity(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulSeverity, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_vulSeverity(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_vulPublishedDate(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_vulPublishedDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulPublishedDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_vulPublishedDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_vulLastModifiedDate(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_vulLastModifiedDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.VulLastModifiedDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_vulLastModifiedDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_expiryDate(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_expiryDate(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ExpiryDate, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_expiryDate(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) _Vulnerability_exportedAt(ctx context.Context, field graphql.CollectedField, obj *model.Vulnerability) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext_Vulnerability_exportedAt(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.ExportedAt, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext_Vulnerability_exportedAt(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "Vulnerability",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Directive_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Directive_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Directive_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Directive",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Directive_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Directive_description(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Description(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Directive_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Directive",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Directive_locations(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Directive_locations(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Locations, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]string)
+ fc.Result = res
+ return ec.marshalN__DirectiveLocation2ᚕstringᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Directive_locations(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Directive",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type __DirectiveLocation does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Directive_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Directive_args(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Args, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]introspection.InputValue)
+ fc.Result = res
+ return ec.marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Directive_args(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Directive",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "name":
+ return ec.fieldContext___InputValue_name(ctx, field)
+ case "description":
+ return ec.fieldContext___InputValue_description(ctx, field)
+ case "type":
+ return ec.fieldContext___InputValue_type(ctx, field)
+ case "defaultValue":
+ return ec.fieldContext___InputValue_defaultValue(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __InputValue", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Directive_isRepeatable(ctx context.Context, field graphql.CollectedField, obj *introspection.Directive) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Directive_isRepeatable(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.IsRepeatable, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(bool)
+ fc.Result = res
+ return ec.marshalNBoolean2bool(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Directive_isRepeatable(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Directive",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Boolean does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___EnumValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___EnumValue_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___EnumValue_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__EnumValue",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___EnumValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___EnumValue_description(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Description(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___EnumValue_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__EnumValue",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___EnumValue_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___EnumValue_isDeprecated(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.IsDeprecated(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(bool)
+ fc.Result = res
+ return ec.marshalNBoolean2bool(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___EnumValue_isDeprecated(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__EnumValue",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Boolean does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___EnumValue_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.EnumValue) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___EnumValue_deprecationReason(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.DeprecationReason(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___EnumValue_deprecationReason(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__EnumValue",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Field_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Field_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Field_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Field",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Field_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Field_description(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Description(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Field_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Field",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Field_args(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Field_args(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Args, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]introspection.InputValue)
+ fc.Result = res
+ return ec.marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Field_args(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Field",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "name":
+ return ec.fieldContext___InputValue_name(ctx, field)
+ case "description":
+ return ec.fieldContext___InputValue_description(ctx, field)
+ case "type":
+ return ec.fieldContext___InputValue_type(ctx, field)
+ case "defaultValue":
+ return ec.fieldContext___InputValue_defaultValue(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __InputValue", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Field_type(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Field_type(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Type, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(*introspection.Type)
+ fc.Result = res
+ return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Field_type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Field",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "kind":
+ return ec.fieldContext___Type_kind(ctx, field)
+ case "name":
+ return ec.fieldContext___Type_name(ctx, field)
+ case "description":
+ return ec.fieldContext___Type_description(ctx, field)
+ case "fields":
+ return ec.fieldContext___Type_fields(ctx, field)
+ case "interfaces":
+ return ec.fieldContext___Type_interfaces(ctx, field)
+ case "possibleTypes":
+ return ec.fieldContext___Type_possibleTypes(ctx, field)
+ case "enumValues":
+ return ec.fieldContext___Type_enumValues(ctx, field)
+ case "inputFields":
+ return ec.fieldContext___Type_inputFields(ctx, field)
+ case "ofType":
+ return ec.fieldContext___Type_ofType(ctx, field)
+ case "specifiedByURL":
+ return ec.fieldContext___Type_specifiedByURL(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Field_isDeprecated(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Field_isDeprecated(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.IsDeprecated(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(bool)
+ fc.Result = res
+ return ec.marshalNBoolean2bool(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Field_isDeprecated(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Field",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type Boolean does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Field_deprecationReason(ctx context.Context, field graphql.CollectedField, obj *introspection.Field) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Field_deprecationReason(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.DeprecationReason(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Field_deprecationReason(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Field",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___InputValue_name(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___InputValue_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalNString2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___InputValue_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__InputValue",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___InputValue_description(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___InputValue_description(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Description(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___InputValue_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__InputValue",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___InputValue_type(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___InputValue_type(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Type, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(*introspection.Type)
+ fc.Result = res
+ return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___InputValue_type(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__InputValue",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "kind":
+ return ec.fieldContext___Type_kind(ctx, field)
+ case "name":
+ return ec.fieldContext___Type_name(ctx, field)
+ case "description":
+ return ec.fieldContext___Type_description(ctx, field)
+ case "fields":
+ return ec.fieldContext___Type_fields(ctx, field)
+ case "interfaces":
+ return ec.fieldContext___Type_interfaces(ctx, field)
+ case "possibleTypes":
+ return ec.fieldContext___Type_possibleTypes(ctx, field)
+ case "enumValues":
+ return ec.fieldContext___Type_enumValues(ctx, field)
+ case "inputFields":
+ return ec.fieldContext___Type_inputFields(ctx, field)
+ case "ofType":
+ return ec.fieldContext___Type_ofType(ctx, field)
+ case "specifiedByURL":
+ return ec.fieldContext___Type_specifiedByURL(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___InputValue_defaultValue(ctx context.Context, field graphql.CollectedField, obj *introspection.InputValue) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___InputValue_defaultValue(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.DefaultValue, nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___InputValue_defaultValue(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__InputValue",
+ Field: field,
+ IsMethod: false,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Schema_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Schema_description(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Description(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Schema_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Schema",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Schema_types(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Schema_types(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Types(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]introspection.Type)
+ fc.Result = res
+ return ec.marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Schema_types(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Schema",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "kind":
+ return ec.fieldContext___Type_kind(ctx, field)
+ case "name":
+ return ec.fieldContext___Type_name(ctx, field)
+ case "description":
+ return ec.fieldContext___Type_description(ctx, field)
+ case "fields":
+ return ec.fieldContext___Type_fields(ctx, field)
+ case "interfaces":
+ return ec.fieldContext___Type_interfaces(ctx, field)
+ case "possibleTypes":
+ return ec.fieldContext___Type_possibleTypes(ctx, field)
+ case "enumValues":
+ return ec.fieldContext___Type_enumValues(ctx, field)
+ case "inputFields":
+ return ec.fieldContext___Type_inputFields(ctx, field)
+ case "ofType":
+ return ec.fieldContext___Type_ofType(ctx, field)
+ case "specifiedByURL":
+ return ec.fieldContext___Type_specifiedByURL(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Schema_queryType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Schema_queryType(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.QueryType(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(*introspection.Type)
+ fc.Result = res
+ return ec.marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Schema_queryType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Schema",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "kind":
+ return ec.fieldContext___Type_kind(ctx, field)
+ case "name":
+ return ec.fieldContext___Type_name(ctx, field)
+ case "description":
+ return ec.fieldContext___Type_description(ctx, field)
+ case "fields":
+ return ec.fieldContext___Type_fields(ctx, field)
+ case "interfaces":
+ return ec.fieldContext___Type_interfaces(ctx, field)
+ case "possibleTypes":
+ return ec.fieldContext___Type_possibleTypes(ctx, field)
+ case "enumValues":
+ return ec.fieldContext___Type_enumValues(ctx, field)
+ case "inputFields":
+ return ec.fieldContext___Type_inputFields(ctx, field)
+ case "ofType":
+ return ec.fieldContext___Type_ofType(ctx, field)
+ case "specifiedByURL":
+ return ec.fieldContext___Type_specifiedByURL(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Schema_mutationType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Schema_mutationType(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.MutationType(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*introspection.Type)
+ fc.Result = res
+ return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Schema_mutationType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Schema",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "kind":
+ return ec.fieldContext___Type_kind(ctx, field)
+ case "name":
+ return ec.fieldContext___Type_name(ctx, field)
+ case "description":
+ return ec.fieldContext___Type_description(ctx, field)
+ case "fields":
+ return ec.fieldContext___Type_fields(ctx, field)
+ case "interfaces":
+ return ec.fieldContext___Type_interfaces(ctx, field)
+ case "possibleTypes":
+ return ec.fieldContext___Type_possibleTypes(ctx, field)
+ case "enumValues":
+ return ec.fieldContext___Type_enumValues(ctx, field)
+ case "inputFields":
+ return ec.fieldContext___Type_inputFields(ctx, field)
+ case "ofType":
+ return ec.fieldContext___Type_ofType(ctx, field)
+ case "specifiedByURL":
+ return ec.fieldContext___Type_specifiedByURL(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Schema_subscriptionType(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Schema_subscriptionType(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.SubscriptionType(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*introspection.Type)
+ fc.Result = res
+ return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Schema_subscriptionType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Schema",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "kind":
+ return ec.fieldContext___Type_kind(ctx, field)
+ case "name":
+ return ec.fieldContext___Type_name(ctx, field)
+ case "description":
+ return ec.fieldContext___Type_description(ctx, field)
+ case "fields":
+ return ec.fieldContext___Type_fields(ctx, field)
+ case "interfaces":
+ return ec.fieldContext___Type_interfaces(ctx, field)
+ case "possibleTypes":
+ return ec.fieldContext___Type_possibleTypes(ctx, field)
+ case "enumValues":
+ return ec.fieldContext___Type_enumValues(ctx, field)
+ case "inputFields":
+ return ec.fieldContext___Type_inputFields(ctx, field)
+ case "ofType":
+ return ec.fieldContext___Type_ofType(ctx, field)
+ case "specifiedByURL":
+ return ec.fieldContext___Type_specifiedByURL(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Schema_directives(ctx context.Context, field graphql.CollectedField, obj *introspection.Schema) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Schema_directives(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Directives(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.([]introspection.Directive)
+ fc.Result = res
+ return ec.marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Schema_directives(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Schema",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "name":
+ return ec.fieldContext___Directive_name(ctx, field)
+ case "description":
+ return ec.fieldContext___Directive_description(ctx, field)
+ case "locations":
+ return ec.fieldContext___Directive_locations(ctx, field)
+ case "args":
+ return ec.fieldContext___Directive_args(ctx, field)
+ case "isRepeatable":
+ return ec.fieldContext___Directive_isRepeatable(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __Directive", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Type_kind(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Type_kind(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Kind(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ if !graphql.HasFieldError(ctx, fc) {
+ ec.Errorf(ctx, "must not be null")
+ }
+ return graphql.Null
+ }
+ res := resTmp.(string)
+ fc.Result = res
+ return ec.marshalN__TypeKind2string(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_kind(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Type",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type __TypeKind does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Type_name(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Type_name(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Name(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_name(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Type",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Type_description(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Type_description(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Description(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_description(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Type",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Type_fields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Type_fields(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Fields(fc.Args["includeDeprecated"].(bool)), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.([]introspection.Field)
+ fc.Result = res
+ return ec.marshalO__Field2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐFieldᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_fields(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Type",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "name":
+ return ec.fieldContext___Field_name(ctx, field)
+ case "description":
+ return ec.fieldContext___Field_description(ctx, field)
+ case "args":
+ return ec.fieldContext___Field_args(ctx, field)
+ case "type":
+ return ec.fieldContext___Field_type(ctx, field)
+ case "isDeprecated":
+ return ec.fieldContext___Field_isDeprecated(ctx, field)
+ case "deprecationReason":
+ return ec.fieldContext___Field_deprecationReason(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __Field", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field___Type_fields_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Type_interfaces(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Type_interfaces(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.Interfaces(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.([]introspection.Type)
+ fc.Result = res
+ return ec.marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_interfaces(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Type",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "kind":
+ return ec.fieldContext___Type_kind(ctx, field)
+ case "name":
+ return ec.fieldContext___Type_name(ctx, field)
+ case "description":
+ return ec.fieldContext___Type_description(ctx, field)
+ case "fields":
+ return ec.fieldContext___Type_fields(ctx, field)
+ case "interfaces":
+ return ec.fieldContext___Type_interfaces(ctx, field)
+ case "possibleTypes":
+ return ec.fieldContext___Type_possibleTypes(ctx, field)
+ case "enumValues":
+ return ec.fieldContext___Type_enumValues(ctx, field)
+ case "inputFields":
+ return ec.fieldContext___Type_inputFields(ctx, field)
+ case "ofType":
+ return ec.fieldContext___Type_ofType(ctx, field)
+ case "specifiedByURL":
+ return ec.fieldContext___Type_specifiedByURL(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Type_possibleTypes(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Type_possibleTypes(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.PossibleTypes(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.([]introspection.Type)
+ fc.Result = res
+ return ec.marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_possibleTypes(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Type",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "kind":
+ return ec.fieldContext___Type_kind(ctx, field)
+ case "name":
+ return ec.fieldContext___Type_name(ctx, field)
+ case "description":
+ return ec.fieldContext___Type_description(ctx, field)
+ case "fields":
+ return ec.fieldContext___Type_fields(ctx, field)
+ case "interfaces":
+ return ec.fieldContext___Type_interfaces(ctx, field)
+ case "possibleTypes":
+ return ec.fieldContext___Type_possibleTypes(ctx, field)
+ case "enumValues":
+ return ec.fieldContext___Type_enumValues(ctx, field)
+ case "inputFields":
+ return ec.fieldContext___Type_inputFields(ctx, field)
+ case "ofType":
+ return ec.fieldContext___Type_ofType(ctx, field)
+ case "specifiedByURL":
+ return ec.fieldContext___Type_specifiedByURL(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Type_enumValues(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Type_enumValues(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.EnumValues(fc.Args["includeDeprecated"].(bool)), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.([]introspection.EnumValue)
+ fc.Result = res
+ return ec.marshalO__EnumValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValueᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_enumValues(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Type",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "name":
+ return ec.fieldContext___EnumValue_name(ctx, field)
+ case "description":
+ return ec.fieldContext___EnumValue_description(ctx, field)
+ case "isDeprecated":
+ return ec.fieldContext___EnumValue_isDeprecated(ctx, field)
+ case "deprecationReason":
+ return ec.fieldContext___EnumValue_deprecationReason(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __EnumValue", field.Name)
+ },
+ }
+ defer func() {
+ if r := recover(); r != nil {
+ err = ec.Recover(ctx, r)
+ ec.Error(ctx, err)
+ }
+ }()
+ ctx = graphql.WithFieldContext(ctx, fc)
+ if fc.Args, err = ec.field___Type_enumValues_args(ctx, field.ArgumentMap(ec.Variables)); err != nil {
+ ec.Error(ctx, err)
+ return fc, err
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Type_inputFields(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Type_inputFields(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.InputFields(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.([]introspection.InputValue)
+ fc.Result = res
+ return ec.marshalO__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_inputFields(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Type",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "name":
+ return ec.fieldContext___InputValue_name(ctx, field)
+ case "description":
+ return ec.fieldContext___InputValue_description(ctx, field)
+ case "type":
+ return ec.fieldContext___InputValue_type(ctx, field)
+ case "defaultValue":
+ return ec.fieldContext___InputValue_defaultValue(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __InputValue", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Type_ofType(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Type_ofType(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.OfType(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*introspection.Type)
+ fc.Result = res
+ return ec.marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_ofType(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Type",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ switch field.Name {
+ case "kind":
+ return ec.fieldContext___Type_kind(ctx, field)
+ case "name":
+ return ec.fieldContext___Type_name(ctx, field)
+ case "description":
+ return ec.fieldContext___Type_description(ctx, field)
+ case "fields":
+ return ec.fieldContext___Type_fields(ctx, field)
+ case "interfaces":
+ return ec.fieldContext___Type_interfaces(ctx, field)
+ case "possibleTypes":
+ return ec.fieldContext___Type_possibleTypes(ctx, field)
+ case "enumValues":
+ return ec.fieldContext___Type_enumValues(ctx, field)
+ case "inputFields":
+ return ec.fieldContext___Type_inputFields(ctx, field)
+ case "ofType":
+ return ec.fieldContext___Type_ofType(ctx, field)
+ case "specifiedByURL":
+ return ec.fieldContext___Type_specifiedByURL(ctx, field)
+ }
+ return nil, fmt.Errorf("no field named %q was found under type __Type", field.Name)
+ },
+ }
+ return fc, nil
+}
+
+func (ec *executionContext) ___Type_specifiedByURL(ctx context.Context, field graphql.CollectedField, obj *introspection.Type) (ret graphql.Marshaler) {
+ fc, err := ec.fieldContext___Type_specifiedByURL(ctx, field)
+ if err != nil {
+ return graphql.Null
+ }
+ ctx = graphql.WithFieldContext(ctx, fc)
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = graphql.Null
+ }
+ }()
+ resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {
+ ctx = rctx // use context from middleware stack in children
+ return obj.SpecifiedByURL(), nil
+ })
+ if err != nil {
+ ec.Error(ctx, err)
+ return graphql.Null
+ }
+ if resTmp == nil {
+ return graphql.Null
+ }
+ res := resTmp.(*string)
+ fc.Result = res
+ return ec.marshalOString2ᚖstring(ctx, field.Selections, res)
+}
+
+func (ec *executionContext) fieldContext___Type_specifiedByURL(ctx context.Context, field graphql.CollectedField) (fc *graphql.FieldContext, err error) {
+ fc = &graphql.FieldContext{
+ Object: "__Type",
+ Field: field,
+ IsMethod: true,
+ IsResolver: false,
+ Child: func(ctx context.Context, field graphql.CollectedField) (*graphql.FieldContext, error) {
+ return nil, errors.New("field of type String does not have child fields")
+ },
+ }
+ return fc, nil
+}
+
+// endregion **************************** field.gotpl *****************************
+
+// region **************************** input.gotpl *****************************
+
+// endregion **************************** input.gotpl *****************************
+
+// region ************************** interface.gotpl ***************************
+
+// endregion ************************** interface.gotpl ***************************
+
+// region **************************** object.gotpl ****************************
+
+var clusterImplementors = []string{"Cluster"}
+
+func (ec *executionContext) _Cluster(ctx context.Context, sel ast.SelectionSet, obj *model.Cluster) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, clusterImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("Cluster")
+ case "name":
+ out.Values[i] = ec._Cluster_name(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var clusterAPIsCountImplementors = []string{"ClusterAPIsCount"}
+
+func (ec *executionContext) _ClusterAPIsCount(ctx context.Context, sel ast.SelectionSet, obj *model.ClusterAPIsCount) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, clusterAPIsCountImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("ClusterAPIsCount")
+ case "clusterName":
+ out.Values[i] = ec._ClusterAPIsCount_clusterName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "count":
+ out.Values[i] = ec._ClusterAPIsCount_count(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var clusterDeletedAPICountImplementors = []string{"ClusterDeletedAPICount"}
+
+func (ec *executionContext) _ClusterDeletedAPICount(ctx context.Context, sel ast.SelectionSet, obj *model.ClusterDeletedAPICount) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, clusterDeletedAPICountImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("ClusterDeletedAPICount")
+ case "clusterName":
+ out.Values[i] = ec._ClusterDeletedAPICount_clusterName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "deletedAPICount":
+ out.Values[i] = ec._ClusterDeletedAPICount_deletedAPICount(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var clusterDeprecatedAPICountImplementors = []string{"ClusterDeprecatedAPICount"}
+
+func (ec *executionContext) _ClusterDeprecatedAPICount(ctx context.Context, sel ast.SelectionSet, obj *model.ClusterDeprecatedAPICount) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, clusterDeprecatedAPICountImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("ClusterDeprecatedAPICount")
+ case "clusterName":
+ out.Values[i] = ec._ClusterDeprecatedAPICount_clusterName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "deprecatedAPICount":
+ out.Values[i] = ec._ClusterDeprecatedAPICount_deprecatedAPICount(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var clusterNamespaceMisconfigCountImplementors = []string{"ClusterNamespaceMisconfigCount"}
+
+func (ec *executionContext) _ClusterNamespaceMisconfigCount(ctx context.Context, sel ast.SelectionSet, obj *model.ClusterNamespaceMisconfigCount) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, clusterNamespaceMisconfigCountImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("ClusterNamespaceMisconfigCount")
+ case "clusterName":
+ out.Values[i] = ec._ClusterNamespaceMisconfigCount_clusterName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "namespace":
+ out.Values[i] = ec._ClusterNamespaceMisconfigCount_namespace(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "misconfigCount":
+ out.Values[i] = ec._ClusterNamespaceMisconfigCount_misconfigCount(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var clusterNamespaceOutdatedCountImplementors = []string{"ClusterNamespaceOutdatedCount"}
+
+func (ec *executionContext) _ClusterNamespaceOutdatedCount(ctx context.Context, sel ast.SelectionSet, obj *model.ClusterNamespaceOutdatedCount) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, clusterNamespaceOutdatedCountImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("ClusterNamespaceOutdatedCount")
+ case "clusterName":
+ out.Values[i] = ec._ClusterNamespaceOutdatedCount_clusterName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "namespace":
+ out.Values[i] = ec._ClusterNamespaceOutdatedCount_namespace(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "outdatedCount":
+ out.Values[i] = ec._ClusterNamespaceOutdatedCount_outdatedCount(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var clusterNamespaceResourceCountImplementors = []string{"ClusterNamespaceResourceCount"}
+
+func (ec *executionContext) _ClusterNamespaceResourceCount(ctx context.Context, sel ast.SelectionSet, obj *model.ClusterNamespaceResourceCount) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, clusterNamespaceResourceCountImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("ClusterNamespaceResourceCount")
+ case "clusterName":
+ out.Values[i] = ec._ClusterNamespaceResourceCount_clusterName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "namespace":
+ out.Values[i] = ec._ClusterNamespaceResourceCount_namespace(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "resourceCount":
+ out.Values[i] = ec._ClusterNamespaceResourceCount_resourceCount(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var clusterNamespaceVulCountImplementors = []string{"ClusterNamespaceVulCount"}
+
+func (ec *executionContext) _ClusterNamespaceVulCount(ctx context.Context, sel ast.SelectionSet, obj *model.ClusterNamespaceVulCount) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, clusterNamespaceVulCountImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("ClusterNamespaceVulCount")
+ case "clusterName":
+ out.Values[i] = ec._ClusterNamespaceVulCount_clusterName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "namespace":
+ out.Values[i] = ec._ClusterNamespaceVulCount_namespace(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "vulCount":
+ out.Values[i] = ec._ClusterNamespaceVulCount_vulCount(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var deletedAPIImplementors = []string{"DeletedAPI"}
+
+func (ec *executionContext) _DeletedAPI(ctx context.Context, sel ast.SelectionSet, obj *model.DeletedAPI) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, deletedAPIImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("DeletedAPI")
+ case "ClusterName":
+ out.Values[i] = ec._DeletedAPI_ClusterName(ctx, field, obj)
+ case "ObjectName":
+ out.Values[i] = ec._DeletedAPI_ObjectName(ctx, field, obj)
+ case "Group":
+ out.Values[i] = ec._DeletedAPI_Group(ctx, field, obj)
+ case "Kind":
+ out.Values[i] = ec._DeletedAPI_Kind(ctx, field, obj)
+ case "Version":
+ out.Values[i] = ec._DeletedAPI_Version(ctx, field, obj)
+ case "Name":
+ out.Values[i] = ec._DeletedAPI_Name(ctx, field, obj)
+ case "Deleted":
+ out.Values[i] = ec._DeletedAPI_Deleted(ctx, field, obj)
+ case "Scope":
+ out.Values[i] = ec._DeletedAPI_Scope(ctx, field, obj)
+ case "EventTime":
+ out.Values[i] = ec._DeletedAPI_EventTime(ctx, field, obj)
+ case "ExpiryDate":
+ out.Values[i] = ec._DeletedAPI_ExpiryDate(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var deprecatedAPIImplementors = []string{"DeprecatedAPI"}
+
+func (ec *executionContext) _DeprecatedAPI(ctx context.Context, sel ast.SelectionSet, obj *model.DeprecatedAPI) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, deprecatedAPIImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("DeprecatedAPI")
+ case "ClusterName":
+ out.Values[i] = ec._DeprecatedAPI_ClusterName(ctx, field, obj)
+ case "ObjectName":
+ out.Values[i] = ec._DeprecatedAPI_ObjectName(ctx, field, obj)
+ case "Description":
+ out.Values[i] = ec._DeprecatedAPI_Description(ctx, field, obj)
+ case "Kind":
+ out.Values[i] = ec._DeprecatedAPI_Kind(ctx, field, obj)
+ case "Deprecated":
+ out.Values[i] = ec._DeprecatedAPI_Deprecated(ctx, field, obj)
+ case "Scope":
+ out.Values[i] = ec._DeprecatedAPI_Scope(ctx, field, obj)
+ case "EventTime":
+ out.Values[i] = ec._DeprecatedAPI_EventTime(ctx, field, obj)
+ case "ExpiryDate":
+ out.Values[i] = ec._DeprecatedAPI_ExpiryDate(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var eventImplementors = []string{"Event"}
+
+func (ec *executionContext) _Event(ctx context.Context, sel ast.SelectionSet, obj *model.Event) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, eventImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("Event")
+ case "ClusterName":
+ out.Values[i] = ec._Event_ClusterName(ctx, field, obj)
+ case "Id":
+ out.Values[i] = ec._Event_Id(ctx, field, obj)
+ case "EventTime":
+ out.Values[i] = ec._Event_EventTime(ctx, field, obj)
+ case "OpType":
+ out.Values[i] = ec._Event_OpType(ctx, field, obj)
+ case "Name":
+ out.Values[i] = ec._Event_Name(ctx, field, obj)
+ case "Namespace":
+ out.Values[i] = ec._Event_Namespace(ctx, field, obj)
+ case "Kind":
+ out.Values[i] = ec._Event_Kind(ctx, field, obj)
+ case "Message":
+ out.Values[i] = ec._Event_Message(ctx, field, obj)
+ case "Reason":
+ out.Values[i] = ec._Event_Reason(ctx, field, obj)
+ case "Host":
+ out.Values[i] = ec._Event_Host(ctx, field, obj)
+ case "Event":
+ out.Values[i] = ec._Event_Event(ctx, field, obj)
+ case "ImageName":
+ out.Values[i] = ec._Event_ImageName(ctx, field, obj)
+ case "FirstTime":
+ out.Values[i] = ec._Event_FirstTime(ctx, field, obj)
+ case "LastTime":
+ out.Values[i] = ec._Event_LastTime(ctx, field, obj)
+ case "ExpiryDate":
+ out.Values[i] = ec._Event_ExpiryDate(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var getAllResourceImplementors = []string{"GetAllResource"}
+
+func (ec *executionContext) _GetAllResource(ctx context.Context, sel ast.SelectionSet, obj *model.GetAllResource) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, getAllResourceImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("GetAllResource")
+ case "ClusterName":
+ out.Values[i] = ec._GetAllResource_ClusterName(ctx, field, obj)
+ case "Namespace":
+ out.Values[i] = ec._GetAllResource_Namespace(ctx, field, obj)
+ case "Kind":
+ out.Values[i] = ec._GetAllResource_Kind(ctx, field, obj)
+ case "Resource":
+ out.Values[i] = ec._GetAllResource_Resource(ctx, field, obj)
+ case "Age":
+ out.Values[i] = ec._GetAllResource_Age(ctx, field, obj)
+ case "EventTime":
+ out.Values[i] = ec._GetAllResource_EventTime(ctx, field, obj)
+ case "ExpiryDate":
+ out.Values[i] = ec._GetAllResource_ExpiryDate(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var kubeScoreImplementors = []string{"KubeScore"}
+
+func (ec *executionContext) _KubeScore(ctx context.Context, sel ast.SelectionSet, obj *model.KubeScore) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, kubeScoreImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("KubeScore")
+ case "id":
+ out.Values[i] = ec._KubeScore_id(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "clusterName":
+ out.Values[i] = ec._KubeScore_clusterName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "objectName":
+ out.Values[i] = ec._KubeScore_objectName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "kind":
+ out.Values[i] = ec._KubeScore_kind(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "apiVersion":
+ out.Values[i] = ec._KubeScore_apiVersion(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "name":
+ out.Values[i] = ec._KubeScore_name(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "namespace":
+ out.Values[i] = ec._KubeScore_namespace(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "targetType":
+ out.Values[i] = ec._KubeScore_targetType(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "description":
+ out.Values[i] = ec._KubeScore_description(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "path":
+ out.Values[i] = ec._KubeScore_path(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "summary":
+ out.Values[i] = ec._KubeScore_summary(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "fileName":
+ out.Values[i] = ec._KubeScore_fileName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "fileRow":
+ out.Values[i] = ec._KubeScore_fileRow(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "eventTime":
+ out.Values[i] = ec._KubeScore_eventTime(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var kubescoreImplementors = []string{"Kubescore"}
+
+func (ec *executionContext) _Kubescore(ctx context.Context, sel ast.SelectionSet, obj *model.Kubescore) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, kubescoreImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("Kubescore")
+ case "id":
+ out.Values[i] = ec._Kubescore_id(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "clusterName":
+ out.Values[i] = ec._Kubescore_clusterName(ctx, field, obj)
+ case "objectName":
+ out.Values[i] = ec._Kubescore_objectName(ctx, field, obj)
+ case "kind":
+ out.Values[i] = ec._Kubescore_kind(ctx, field, obj)
+ case "apiVersion":
+ out.Values[i] = ec._Kubescore_apiVersion(ctx, field, obj)
+ case "name":
+ out.Values[i] = ec._Kubescore_name(ctx, field, obj)
+ case "namespace":
+ out.Values[i] = ec._Kubescore_namespace(ctx, field, obj)
+ case "targetType":
+ out.Values[i] = ec._Kubescore_targetType(ctx, field, obj)
+ case "description":
+ out.Values[i] = ec._Kubescore_description(ctx, field, obj)
+ case "path":
+ out.Values[i] = ec._Kubescore_path(ctx, field, obj)
+ case "summary":
+ out.Values[i] = ec._Kubescore_summary(ctx, field, obj)
+ case "fileName":
+ out.Values[i] = ec._Kubescore_fileName(ctx, field, obj)
+ case "fileRow":
+ out.Values[i] = ec._Kubescore_fileRow(ctx, field, obj)
+ case "eventTime":
+ out.Values[i] = ec._Kubescore_eventTime(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var misconfigurationImplementors = []string{"Misconfiguration"}
+
+func (ec *executionContext) _Misconfiguration(ctx context.Context, sel ast.SelectionSet, obj *model.Misconfiguration) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, misconfigurationImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("Misconfiguration")
+ case "id":
+ out.Values[i] = ec._Misconfiguration_id(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "clusterName":
+ out.Values[i] = ec._Misconfiguration_clusterName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "namespace":
+ out.Values[i] = ec._Misconfiguration_namespace(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "kind":
+ out.Values[i] = ec._Misconfiguration_kind(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "name":
+ out.Values[i] = ec._Misconfiguration_name(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "misconfigId":
+ out.Values[i] = ec._Misconfiguration_misconfigId(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "misconfigAvdid":
+ out.Values[i] = ec._Misconfiguration_misconfigAvdid(ctx, field, obj)
+ case "misconfigType":
+ out.Values[i] = ec._Misconfiguration_misconfigType(ctx, field, obj)
+ case "misconfigTitle":
+ out.Values[i] = ec._Misconfiguration_misconfigTitle(ctx, field, obj)
+ case "misconfigDesc":
+ out.Values[i] = ec._Misconfiguration_misconfigDesc(ctx, field, obj)
+ case "misconfigMsg":
+ out.Values[i] = ec._Misconfiguration_misconfigMsg(ctx, field, obj)
+ case "misconfigQuery":
+ out.Values[i] = ec._Misconfiguration_misconfigQuery(ctx, field, obj)
+ case "misconfigResolution":
+ out.Values[i] = ec._Misconfiguration_misconfigResolution(ctx, field, obj)
+ case "misconfigSeverity":
+ out.Values[i] = ec._Misconfiguration_misconfigSeverity(ctx, field, obj)
+ case "misconfigStatus":
+ out.Values[i] = ec._Misconfiguration_misconfigStatus(ctx, field, obj)
+ case "eventTime":
+ out.Values[i] = ec._Misconfiguration_eventTime(ctx, field, obj)
+ case "expiryDate":
+ out.Values[i] = ec._Misconfiguration_expiryDate(ctx, field, obj)
+ case "exportedAt":
+ out.Values[i] = ec._Misconfiguration_exportedAt(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var namespaceImplementors = []string{"Namespace"}
+
+func (ec *executionContext) _Namespace(ctx context.Context, sel ast.SelectionSet, obj *model.Namespace) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, namespaceImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("Namespace")
+ case "name":
+ out.Values[i] = ec._Namespace_name(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var namespaceDataImplementors = []string{"NamespaceData"}
+
+func (ec *executionContext) _NamespaceData(ctx context.Context, sel ast.SelectionSet, obj *model.NamespaceData) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, namespaceDataImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("NamespaceData")
+ case "namespace":
+ out.Values[i] = ec._NamespaceData_namespace(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "outdatedImages":
+ out.Values[i] = ec._NamespaceData_outdatedImages(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "kubeScores":
+ out.Values[i] = ec._NamespaceData_kubeScores(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "resources":
+ out.Values[i] = ec._NamespaceData_resources(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var outdatedImageImplementors = []string{"OutdatedImage"}
+
+func (ec *executionContext) _OutdatedImage(ctx context.Context, sel ast.SelectionSet, obj *model.OutdatedImage) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, outdatedImageImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("OutdatedImage")
+ case "clusterName":
+ out.Values[i] = ec._OutdatedImage_clusterName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "namespace":
+ out.Values[i] = ec._OutdatedImage_namespace(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "pod":
+ out.Values[i] = ec._OutdatedImage_pod(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "currentImage":
+ out.Values[i] = ec._OutdatedImage_currentImage(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "currentTag":
+ out.Values[i] = ec._OutdatedImage_currentTag(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "latestVersion":
+ out.Values[i] = ec._OutdatedImage_latestVersion(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "versionsBehind":
+ out.Values[i] = ec._OutdatedImage_versionsBehind(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "eventTime":
+ out.Values[i] = ec._OutdatedImage_eventTime(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var queryImplementors = []string{"Query"}
+
+func (ec *executionContext) _Query(ctx context.Context, sel ast.SelectionSet) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, queryImplementors)
+ ctx = graphql.WithFieldContext(ctx, &graphql.FieldContext{
+ Object: "Query",
+ })
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ innerCtx := graphql.WithRootFieldContext(ctx, &graphql.RootFieldContext{
+ Object: field.Name,
+ Field: field,
+ })
+
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("Query")
+ case "allEvents":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allEvents(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "allRakkess":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allRakkess(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "allDeprecatedAPIs":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allDeprecatedAPIs(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "allDeletedAPIs":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allDeletedAPIs(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "allGetAllResources":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allGetAllResources(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "allTrivySBOMs":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allTrivySBOMs(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "allTrivyImages":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allTrivyImages(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "allKubeScores":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allKubeScores(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "allTrivyVuls":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allTrivyVuls(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "allTrivyMisconfigs":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allTrivyMisconfigs(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "uniqueNamespaces":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_uniqueNamespaces(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "uniqueClusters":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_uniqueClusters(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "outdatedImagesByClusterAndNamespace":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_outdatedImagesByClusterAndNamespace(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "outdatedImagesCount":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_outdatedImagesCount(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "allClusterNamespaceOutdatedCounts":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allClusterNamespaceOutdatedCounts(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "allClusterDeprecatedAPIsCounts":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allClusterDeprecatedAPIsCounts(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "allClusterDeletedAPIsCounts":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allClusterDeletedAPIsCounts(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "allClusterNamespaceResourceCounts":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_allClusterNamespaceResourceCounts(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "eventsByClusterAndNamespace":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_eventsByClusterAndNamespace(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "vulnerabilities":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_vulnerabilities(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "misconfigurations":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_misconfigurations(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "kubescores":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_kubescores(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "getAllResources":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_getAllResources(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "trivyImages":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_trivyImages(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "deprecatedAPIs":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_deprecatedAPIs(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "deletedAPIs":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_deletedAPIs(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "trivySBOMs":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_trivySBOMs(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "trivyVulCount":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_trivyVulCount(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "trivyMisconfigCount":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_trivyMisconfigCount(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "deletedAPICount":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_deletedAPICount(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "trivyImageCount":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_trivyImageCount(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "deprecatedAPICount":
+ field := field
+
+ innerFunc := func(ctx context.Context, fs *graphql.FieldSet) (res graphql.Marshaler) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ }
+ }()
+ res = ec._Query_deprecatedAPICount(ctx, field)
+ if res == graphql.Null {
+ atomic.AddUint32(&fs.Invalids, 1)
+ }
+ return res
+ }
+
+ rrm := func(ctx context.Context) graphql.Marshaler {
+ return ec.OperationContext.RootResolverMiddleware(ctx,
+ func(ctx context.Context) graphql.Marshaler { return innerFunc(ctx, out) })
+ }
+
+ out.Concurrently(i, func(ctx context.Context) graphql.Marshaler { return rrm(innerCtx) })
+ case "__type":
+ out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
+ return ec._Query___type(ctx, field)
+ })
+ case "__schema":
+ out.Values[i] = ec.OperationContext.RootResolverMiddleware(innerCtx, func(ctx context.Context) (res graphql.Marshaler) {
+ return ec._Query___schema(ctx, field)
+ })
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var rakkessImplementors = []string{"Rakkess"}
+
+func (ec *executionContext) _Rakkess(ctx context.Context, sel ast.SelectionSet, obj *model.Rakkess) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, rakkessImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("Rakkess")
+ case "ClusterName":
+ out.Values[i] = ec._Rakkess_ClusterName(ctx, field, obj)
+ case "Name":
+ out.Values[i] = ec._Rakkess_Name(ctx, field, obj)
+ case "Create":
+ out.Values[i] = ec._Rakkess_Create(ctx, field, obj)
+ case "Delete":
+ out.Values[i] = ec._Rakkess_Delete(ctx, field, obj)
+ case "List":
+ out.Values[i] = ec._Rakkess_List(ctx, field, obj)
+ case "Update":
+ out.Values[i] = ec._Rakkess_Update(ctx, field, obj)
+ case "EventTime":
+ out.Values[i] = ec._Rakkess_EventTime(ctx, field, obj)
+ case "ExpiryDate":
+ out.Values[i] = ec._Rakkess_ExpiryDate(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var resourceImplementors = []string{"Resource"}
+
+func (ec *executionContext) _Resource(ctx context.Context, sel ast.SelectionSet, obj *model.Resource) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, resourceImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("Resource")
+ case "clusterName":
+ out.Values[i] = ec._Resource_clusterName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "namespace":
+ out.Values[i] = ec._Resource_namespace(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "kind":
+ out.Values[i] = ec._Resource_kind(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "resource":
+ out.Values[i] = ec._Resource_resource(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "age":
+ out.Values[i] = ec._Resource_age(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "eventTime":
+ out.Values[i] = ec._Resource_eventTime(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var trivyImageImplementors = []string{"TrivyImage"}
+
+func (ec *executionContext) _TrivyImage(ctx context.Context, sel ast.SelectionSet, obj *model.TrivyImage) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, trivyImageImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("TrivyImage")
+ case "id":
+ out.Values[i] = ec._TrivyImage_id(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "clusterName":
+ out.Values[i] = ec._TrivyImage_clusterName(ctx, field, obj)
+ case "artifactName":
+ out.Values[i] = ec._TrivyImage_artifactName(ctx, field, obj)
+ case "vulId":
+ out.Values[i] = ec._TrivyImage_vulId(ctx, field, obj)
+ case "vulPkgId":
+ out.Values[i] = ec._TrivyImage_vulPkgId(ctx, field, obj)
+ case "vulPkgName":
+ out.Values[i] = ec._TrivyImage_vulPkgName(ctx, field, obj)
+ case "vulInstalledVersion":
+ out.Values[i] = ec._TrivyImage_vulInstalledVersion(ctx, field, obj)
+ case "vulFixedVersion":
+ out.Values[i] = ec._TrivyImage_vulFixedVersion(ctx, field, obj)
+ case "vulTitle":
+ out.Values[i] = ec._TrivyImage_vulTitle(ctx, field, obj)
+ case "vulSeverity":
+ out.Values[i] = ec._TrivyImage_vulSeverity(ctx, field, obj)
+ case "vulPublishedDate":
+ out.Values[i] = ec._TrivyImage_vulPublishedDate(ctx, field, obj)
+ case "vulLastModifiedDate":
+ out.Values[i] = ec._TrivyImage_vulLastModifiedDate(ctx, field, obj)
+ case "expiryDate":
+ out.Values[i] = ec._TrivyImage_expiryDate(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var trivyImageCountImplementors = []string{"TrivyImageCount"}
+
+func (ec *executionContext) _TrivyImageCount(ctx context.Context, sel ast.SelectionSet, obj *model.TrivyImageCount) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, trivyImageCountImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("TrivyImageCount")
+ case "clusterName":
+ out.Values[i] = ec._TrivyImageCount_clusterName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "ImageCount":
+ out.Values[i] = ec._TrivyImageCount_ImageCount(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var trivyMisconfigImplementors = []string{"TrivyMisconfig"}
+
+func (ec *executionContext) _TrivyMisconfig(ctx context.Context, sel ast.SelectionSet, obj *model.TrivyMisconfig) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, trivyMisconfigImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("TrivyMisconfig")
+ case "id":
+ out.Values[i] = ec._TrivyMisconfig_id(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "clusterName":
+ out.Values[i] = ec._TrivyMisconfig_clusterName(ctx, field, obj)
+ case "namespace":
+ out.Values[i] = ec._TrivyMisconfig_namespace(ctx, field, obj)
+ case "kind":
+ out.Values[i] = ec._TrivyMisconfig_kind(ctx, field, obj)
+ case "name":
+ out.Values[i] = ec._TrivyMisconfig_name(ctx, field, obj)
+ case "misconfigId":
+ out.Values[i] = ec._TrivyMisconfig_misconfigId(ctx, field, obj)
+ case "misconfigAvdid":
+ out.Values[i] = ec._TrivyMisconfig_misconfigAvdid(ctx, field, obj)
+ case "misconfigType":
+ out.Values[i] = ec._TrivyMisconfig_misconfigType(ctx, field, obj)
+ case "misconfigTitle":
+ out.Values[i] = ec._TrivyMisconfig_misconfigTitle(ctx, field, obj)
+ case "misconfigDesc":
+ out.Values[i] = ec._TrivyMisconfig_misconfigDesc(ctx, field, obj)
+ case "misconfigMsg":
+ out.Values[i] = ec._TrivyMisconfig_misconfigMsg(ctx, field, obj)
+ case "misconfigQuery":
+ out.Values[i] = ec._TrivyMisconfig_misconfigQuery(ctx, field, obj)
+ case "misconfigResolution":
+ out.Values[i] = ec._TrivyMisconfig_misconfigResolution(ctx, field, obj)
+ case "misconfigSeverity":
+ out.Values[i] = ec._TrivyMisconfig_misconfigSeverity(ctx, field, obj)
+ case "misconfigStatus":
+ out.Values[i] = ec._TrivyMisconfig_misconfigStatus(ctx, field, obj)
+ case "eventTime":
+ out.Values[i] = ec._TrivyMisconfig_eventTime(ctx, field, obj)
+ case "expiryDate":
+ out.Values[i] = ec._TrivyMisconfig_expiryDate(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var trivySBOMImplementors = []string{"TrivySBOM"}
+
+func (ec *executionContext) _TrivySBOM(ctx context.Context, sel ast.SelectionSet, obj *model.TrivySbom) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, trivySBOMImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("TrivySBOM")
+ case "id":
+ out.Values[i] = ec._TrivySBOM_id(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "clusterName":
+ out.Values[i] = ec._TrivySBOM_clusterName(ctx, field, obj)
+ case "imageName":
+ out.Values[i] = ec._TrivySBOM_imageName(ctx, field, obj)
+ case "packageName":
+ out.Values[i] = ec._TrivySBOM_packageName(ctx, field, obj)
+ case "packageUrl":
+ out.Values[i] = ec._TrivySBOM_packageUrl(ctx, field, obj)
+ case "bomRef":
+ out.Values[i] = ec._TrivySBOM_bomRef(ctx, field, obj)
+ case "serialNumber":
+ out.Values[i] = ec._TrivySBOM_serialNumber(ctx, field, obj)
+ case "version":
+ out.Values[i] = ec._TrivySBOM_version(ctx, field, obj)
+ case "bomFormat":
+ out.Values[i] = ec._TrivySBOM_bomFormat(ctx, field, obj)
+ case "expiryDate":
+ out.Values[i] = ec._TrivySBOM_expiryDate(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var trivyVulImplementors = []string{"TrivyVul"}
+
+func (ec *executionContext) _TrivyVul(ctx context.Context, sel ast.SelectionSet, obj *model.TrivyVul) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, trivyVulImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("TrivyVul")
+ case "id":
+ out.Values[i] = ec._TrivyVul_id(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "clusterName":
+ out.Values[i] = ec._TrivyVul_clusterName(ctx, field, obj)
+ case "namespace":
+ out.Values[i] = ec._TrivyVul_namespace(ctx, field, obj)
+ case "kind":
+ out.Values[i] = ec._TrivyVul_kind(ctx, field, obj)
+ case "name":
+ out.Values[i] = ec._TrivyVul_name(ctx, field, obj)
+ case "vulId":
+ out.Values[i] = ec._TrivyVul_vulId(ctx, field, obj)
+ case "vulVendorIds":
+ out.Values[i] = ec._TrivyVul_vulVendorIds(ctx, field, obj)
+ case "vulPkgId":
+ out.Values[i] = ec._TrivyVul_vulPkgId(ctx, field, obj)
+ case "vulPkgName":
+ out.Values[i] = ec._TrivyVul_vulPkgName(ctx, field, obj)
+ case "vulPkgPath":
+ out.Values[i] = ec._TrivyVul_vulPkgPath(ctx, field, obj)
+ case "vulInstalledVersion":
+ out.Values[i] = ec._TrivyVul_vulInstalledVersion(ctx, field, obj)
+ case "vulFixedVersion":
+ out.Values[i] = ec._TrivyVul_vulFixedVersion(ctx, field, obj)
+ case "vulTitle":
+ out.Values[i] = ec._TrivyVul_vulTitle(ctx, field, obj)
+ case "vulSeverity":
+ out.Values[i] = ec._TrivyVul_vulSeverity(ctx, field, obj)
+ case "vulPublishedDate":
+ out.Values[i] = ec._TrivyVul_vulPublishedDate(ctx, field, obj)
+ case "vulLastModifiedDate":
+ out.Values[i] = ec._TrivyVul_vulLastModifiedDate(ctx, field, obj)
+ case "expiryDate":
+ out.Values[i] = ec._TrivyVul_expiryDate(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var vulnerabilityImplementors = []string{"Vulnerability"}
+
+func (ec *executionContext) _Vulnerability(ctx context.Context, sel ast.SelectionSet, obj *model.Vulnerability) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, vulnerabilityImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("Vulnerability")
+ case "id":
+ out.Values[i] = ec._Vulnerability_id(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "clusterName":
+ out.Values[i] = ec._Vulnerability_clusterName(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "namespace":
+ out.Values[i] = ec._Vulnerability_namespace(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "kind":
+ out.Values[i] = ec._Vulnerability_kind(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "name":
+ out.Values[i] = ec._Vulnerability_name(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "vulId":
+ out.Values[i] = ec._Vulnerability_vulId(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "vulVendorIds":
+ out.Values[i] = ec._Vulnerability_vulVendorIds(ctx, field, obj)
+ case "vulPkgId":
+ out.Values[i] = ec._Vulnerability_vulPkgId(ctx, field, obj)
+ case "vulPkgName":
+ out.Values[i] = ec._Vulnerability_vulPkgName(ctx, field, obj)
+ case "vulPkgPath":
+ out.Values[i] = ec._Vulnerability_vulPkgPath(ctx, field, obj)
+ case "vulInstalledVersion":
+ out.Values[i] = ec._Vulnerability_vulInstalledVersion(ctx, field, obj)
+ case "vulFixedVersion":
+ out.Values[i] = ec._Vulnerability_vulFixedVersion(ctx, field, obj)
+ case "vulTitle":
+ out.Values[i] = ec._Vulnerability_vulTitle(ctx, field, obj)
+ case "vulSeverity":
+ out.Values[i] = ec._Vulnerability_vulSeverity(ctx, field, obj)
+ case "vulPublishedDate":
+ out.Values[i] = ec._Vulnerability_vulPublishedDate(ctx, field, obj)
+ case "vulLastModifiedDate":
+ out.Values[i] = ec._Vulnerability_vulLastModifiedDate(ctx, field, obj)
+ case "expiryDate":
+ out.Values[i] = ec._Vulnerability_expiryDate(ctx, field, obj)
+ case "exportedAt":
+ out.Values[i] = ec._Vulnerability_exportedAt(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var __DirectiveImplementors = []string{"__Directive"}
+
+func (ec *executionContext) ___Directive(ctx context.Context, sel ast.SelectionSet, obj *introspection.Directive) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, __DirectiveImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("__Directive")
+ case "name":
+ out.Values[i] = ec.___Directive_name(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "description":
+ out.Values[i] = ec.___Directive_description(ctx, field, obj)
+ case "locations":
+ out.Values[i] = ec.___Directive_locations(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "args":
+ out.Values[i] = ec.___Directive_args(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "isRepeatable":
+ out.Values[i] = ec.___Directive_isRepeatable(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var __EnumValueImplementors = []string{"__EnumValue"}
+
+func (ec *executionContext) ___EnumValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.EnumValue) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, __EnumValueImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("__EnumValue")
+ case "name":
+ out.Values[i] = ec.___EnumValue_name(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "description":
+ out.Values[i] = ec.___EnumValue_description(ctx, field, obj)
+ case "isDeprecated":
+ out.Values[i] = ec.___EnumValue_isDeprecated(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "deprecationReason":
+ out.Values[i] = ec.___EnumValue_deprecationReason(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var __FieldImplementors = []string{"__Field"}
+
+func (ec *executionContext) ___Field(ctx context.Context, sel ast.SelectionSet, obj *introspection.Field) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, __FieldImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("__Field")
+ case "name":
+ out.Values[i] = ec.___Field_name(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "description":
+ out.Values[i] = ec.___Field_description(ctx, field, obj)
+ case "args":
+ out.Values[i] = ec.___Field_args(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "type":
+ out.Values[i] = ec.___Field_type(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "isDeprecated":
+ out.Values[i] = ec.___Field_isDeprecated(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "deprecationReason":
+ out.Values[i] = ec.___Field_deprecationReason(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var __InputValueImplementors = []string{"__InputValue"}
+
+func (ec *executionContext) ___InputValue(ctx context.Context, sel ast.SelectionSet, obj *introspection.InputValue) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, __InputValueImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("__InputValue")
+ case "name":
+ out.Values[i] = ec.___InputValue_name(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "description":
+ out.Values[i] = ec.___InputValue_description(ctx, field, obj)
+ case "type":
+ out.Values[i] = ec.___InputValue_type(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "defaultValue":
+ out.Values[i] = ec.___InputValue_defaultValue(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var __SchemaImplementors = []string{"__Schema"}
+
+func (ec *executionContext) ___Schema(ctx context.Context, sel ast.SelectionSet, obj *introspection.Schema) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, __SchemaImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("__Schema")
+ case "description":
+ out.Values[i] = ec.___Schema_description(ctx, field, obj)
+ case "types":
+ out.Values[i] = ec.___Schema_types(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "queryType":
+ out.Values[i] = ec.___Schema_queryType(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "mutationType":
+ out.Values[i] = ec.___Schema_mutationType(ctx, field, obj)
+ case "subscriptionType":
+ out.Values[i] = ec.___Schema_subscriptionType(ctx, field, obj)
+ case "directives":
+ out.Values[i] = ec.___Schema_directives(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+var __TypeImplementors = []string{"__Type"}
+
+func (ec *executionContext) ___Type(ctx context.Context, sel ast.SelectionSet, obj *introspection.Type) graphql.Marshaler {
+ fields := graphql.CollectFields(ec.OperationContext, sel, __TypeImplementors)
+
+ out := graphql.NewFieldSet(fields)
+ deferred := make(map[string]*graphql.FieldSet)
+ for i, field := range fields {
+ switch field.Name {
+ case "__typename":
+ out.Values[i] = graphql.MarshalString("__Type")
+ case "kind":
+ out.Values[i] = ec.___Type_kind(ctx, field, obj)
+ if out.Values[i] == graphql.Null {
+ out.Invalids++
+ }
+ case "name":
+ out.Values[i] = ec.___Type_name(ctx, field, obj)
+ case "description":
+ out.Values[i] = ec.___Type_description(ctx, field, obj)
+ case "fields":
+ out.Values[i] = ec.___Type_fields(ctx, field, obj)
+ case "interfaces":
+ out.Values[i] = ec.___Type_interfaces(ctx, field, obj)
+ case "possibleTypes":
+ out.Values[i] = ec.___Type_possibleTypes(ctx, field, obj)
+ case "enumValues":
+ out.Values[i] = ec.___Type_enumValues(ctx, field, obj)
+ case "inputFields":
+ out.Values[i] = ec.___Type_inputFields(ctx, field, obj)
+ case "ofType":
+ out.Values[i] = ec.___Type_ofType(ctx, field, obj)
+ case "specifiedByURL":
+ out.Values[i] = ec.___Type_specifiedByURL(ctx, field, obj)
+ default:
+ panic("unknown field " + strconv.Quote(field.Name))
+ }
+ }
+ out.Dispatch(ctx)
+ if out.Invalids > 0 {
+ return graphql.Null
+ }
+
+ atomic.AddInt32(&ec.deferred, int32(len(deferred)))
+
+ for label, dfs := range deferred {
+ ec.processDeferredGroup(graphql.DeferredGroup{
+ Label: label,
+ Path: graphql.GetPath(ctx),
+ FieldSet: dfs,
+ Context: ctx,
+ })
+ }
+
+ return out
+}
+
+// endregion **************************** object.gotpl ****************************
+
+// region ***************************** type.gotpl *****************************
+
+func (ec *executionContext) unmarshalNBoolean2bool(ctx context.Context, v interface{}) (bool, error) {
+ res, err := graphql.UnmarshalBoolean(v)
+ return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler {
+ res := graphql.MarshalBoolean(v)
+ if res == graphql.Null {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ }
+ return res
+}
+
+func (ec *executionContext) marshalNCluster2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Cluster) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNCluster2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐCluster(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNCluster2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐCluster(ctx context.Context, sel ast.SelectionSet, v *model.Cluster) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._Cluster(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNClusterAPIsCount2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterAPIsCountᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.ClusterAPIsCount) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNClusterAPIsCount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterAPIsCount(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNClusterAPIsCount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterAPIsCount(ctx context.Context, sel ast.SelectionSet, v *model.ClusterAPIsCount) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._ClusterAPIsCount(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNClusterDeletedAPICount2githubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterDeletedAPICount(ctx context.Context, sel ast.SelectionSet, v model.ClusterDeletedAPICount) graphql.Marshaler {
+ return ec._ClusterDeletedAPICount(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalNClusterDeletedAPICount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterDeletedAPICount(ctx context.Context, sel ast.SelectionSet, v *model.ClusterDeletedAPICount) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._ClusterDeletedAPICount(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNClusterDeprecatedAPICount2githubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterDeprecatedAPICount(ctx context.Context, sel ast.SelectionSet, v model.ClusterDeprecatedAPICount) graphql.Marshaler {
+ return ec._ClusterDeprecatedAPICount(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalNClusterDeprecatedAPICount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterDeprecatedAPICount(ctx context.Context, sel ast.SelectionSet, v *model.ClusterDeprecatedAPICount) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._ClusterDeprecatedAPICount(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNClusterNamespaceMisconfigCount2githubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceMisconfigCount(ctx context.Context, sel ast.SelectionSet, v model.ClusterNamespaceMisconfigCount) graphql.Marshaler {
+ return ec._ClusterNamespaceMisconfigCount(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalNClusterNamespaceMisconfigCount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceMisconfigCount(ctx context.Context, sel ast.SelectionSet, v *model.ClusterNamespaceMisconfigCount) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._ClusterNamespaceMisconfigCount(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNClusterNamespaceOutdatedCount2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceOutdatedCountᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.ClusterNamespaceOutdatedCount) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNClusterNamespaceOutdatedCount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceOutdatedCount(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNClusterNamespaceOutdatedCount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceOutdatedCount(ctx context.Context, sel ast.SelectionSet, v *model.ClusterNamespaceOutdatedCount) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._ClusterNamespaceOutdatedCount(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNClusterNamespaceResourceCount2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceResourceCountᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.ClusterNamespaceResourceCount) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNClusterNamespaceResourceCount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceResourceCount(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNClusterNamespaceResourceCount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceResourceCount(ctx context.Context, sel ast.SelectionSet, v *model.ClusterNamespaceResourceCount) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._ClusterNamespaceResourceCount(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNClusterNamespaceVulCount2githubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceVulCount(ctx context.Context, sel ast.SelectionSet, v model.ClusterNamespaceVulCount) graphql.Marshaler {
+ return ec._ClusterNamespaceVulCount(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalNClusterNamespaceVulCount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐClusterNamespaceVulCount(ctx context.Context, sel ast.SelectionSet, v *model.ClusterNamespaceVulCount) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._ClusterNamespaceVulCount(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNDeletedAPI2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐDeletedAPIᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.DeletedAPI) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNDeletedAPI2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐDeletedAPI(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNDeletedAPI2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐDeletedAPI(ctx context.Context, sel ast.SelectionSet, v *model.DeletedAPI) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._DeletedAPI(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNDeprecatedAPI2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐDeprecatedAPIᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.DeprecatedAPI) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNDeprecatedAPI2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐDeprecatedAPI(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNDeprecatedAPI2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐDeprecatedAPI(ctx context.Context, sel ast.SelectionSet, v *model.DeprecatedAPI) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._DeprecatedAPI(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNEvent2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐEventᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Event) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNEvent2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐEvent(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNEvent2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐEvent(ctx context.Context, sel ast.SelectionSet, v *model.Event) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._Event(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNGetAllResource2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐGetAllResourceᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.GetAllResource) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNGetAllResource2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐGetAllResource(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNGetAllResource2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐGetAllResource(ctx context.Context, sel ast.SelectionSet, v *model.GetAllResource) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._GetAllResource(ctx, sel, v)
+}
+
+func (ec *executionContext) unmarshalNID2string(ctx context.Context, v interface{}) (string, error) {
+ res, err := graphql.UnmarshalID(v)
+ return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNID2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
+ res := graphql.MarshalID(v)
+ if res == graphql.Null {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ }
+ return res
+}
+
+func (ec *executionContext) unmarshalNInt2int(ctx context.Context, v interface{}) (int, error) {
+ res, err := graphql.UnmarshalInt(v)
+ return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNInt2int(ctx context.Context, sel ast.SelectionSet, v int) graphql.Marshaler {
+ res := graphql.MarshalInt(v)
+ if res == graphql.Null {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ }
+ return res
+}
+
+func (ec *executionContext) marshalNKubeScore2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐKubeScoreᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.KubeScore) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNKubeScore2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐKubeScore(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNKubeScore2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐKubeScore(ctx context.Context, sel ast.SelectionSet, v *model.KubeScore) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._KubeScore(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNKubescore2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐKubescoreᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Kubescore) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNKubescore2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐKubescore(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNKubescore2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐKubescore(ctx context.Context, sel ast.SelectionSet, v *model.Kubescore) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._Kubescore(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNMisconfiguration2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐMisconfigurationᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Misconfiguration) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNMisconfiguration2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐMisconfiguration(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNMisconfiguration2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐMisconfiguration(ctx context.Context, sel ast.SelectionSet, v *model.Misconfiguration) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._Misconfiguration(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNNamespace2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐNamespaceᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Namespace) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNNamespace2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐNamespace(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNNamespace2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐNamespace(ctx context.Context, sel ast.SelectionSet, v *model.Namespace) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._Namespace(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNOutdatedImage2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐOutdatedImageᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.OutdatedImage) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNOutdatedImage2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐOutdatedImage(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNOutdatedImage2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐOutdatedImage(ctx context.Context, sel ast.SelectionSet, v *model.OutdatedImage) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._OutdatedImage(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNRakkess2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐRakkessᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Rakkess) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNRakkess2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐRakkess(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNRakkess2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐRakkess(ctx context.Context, sel ast.SelectionSet, v *model.Rakkess) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._Rakkess(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNResource2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐResourceᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Resource) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNResource2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐResource(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNResource2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐResource(ctx context.Context, sel ast.SelectionSet, v *model.Resource) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._Resource(ctx, sel, v)
+}
+
+func (ec *executionContext) unmarshalNString2string(ctx context.Context, v interface{}) (string, error) {
+ res, err := graphql.UnmarshalString(v)
+ return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalNString2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
+ res := graphql.MarshalString(v)
+ if res == graphql.Null {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ }
+ return res
+}
+
+func (ec *executionContext) marshalNTrivyImage2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyImageᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.TrivyImage) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNTrivyImage2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyImage(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNTrivyImage2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyImage(ctx context.Context, sel ast.SelectionSet, v *model.TrivyImage) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._TrivyImage(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNTrivyImageCount2githubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyImageCount(ctx context.Context, sel ast.SelectionSet, v model.TrivyImageCount) graphql.Marshaler {
+ return ec._TrivyImageCount(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalNTrivyImageCount2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyImageCount(ctx context.Context, sel ast.SelectionSet, v *model.TrivyImageCount) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._TrivyImageCount(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNTrivyMisconfig2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyMisconfigᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.TrivyMisconfig) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNTrivyMisconfig2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyMisconfig(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNTrivyMisconfig2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyMisconfig(ctx context.Context, sel ast.SelectionSet, v *model.TrivyMisconfig) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._TrivyMisconfig(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNTrivySBOM2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivySbomᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.TrivySbom) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNTrivySBOM2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivySbom(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNTrivySBOM2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivySbom(ctx context.Context, sel ast.SelectionSet, v *model.TrivySbom) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._TrivySBOM(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNTrivyVul2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyVulᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.TrivyVul) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNTrivyVul2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyVul(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNTrivyVul2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐTrivyVul(ctx context.Context, sel ast.SelectionSet, v *model.TrivyVul) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._TrivyVul(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalNVulnerability2ᚕᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐVulnerabilityᚄ(ctx context.Context, sel ast.SelectionSet, v []*model.Vulnerability) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalNVulnerability2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐVulnerability(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalNVulnerability2ᚖgithubᚗcomᚋintelopsᚋkubvizᚋgraphqlserverᚋgraphᚋmodelᚐVulnerability(ctx context.Context, sel ast.SelectionSet, v *model.Vulnerability) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec._Vulnerability(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx context.Context, sel ast.SelectionSet, v introspection.Directive) graphql.Marshaler {
+ return ec.___Directive(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalN__Directive2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirectiveᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Directive) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalN__Directive2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐDirective(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) unmarshalN__DirectiveLocation2string(ctx context.Context, v interface{}) (string, error) {
+ res, err := graphql.UnmarshalString(v)
+ return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalN__DirectiveLocation2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
+ res := graphql.MarshalString(v)
+ if res == graphql.Null {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ }
+ return res
+}
+
+func (ec *executionContext) unmarshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, v interface{}) ([]string, error) {
+ var vSlice []interface{}
+ if v != nil {
+ vSlice = graphql.CoerceList(v)
+ }
+ var err error
+ res := make([]string, len(vSlice))
+ for i := range vSlice {
+ ctx := graphql.WithPathContext(ctx, graphql.NewPathWithIndex(i))
+ res[i], err = ec.unmarshalN__DirectiveLocation2string(ctx, vSlice[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return res, nil
+}
+
+func (ec *executionContext) marshalN__DirectiveLocation2ᚕstringᚄ(ctx context.Context, sel ast.SelectionSet, v []string) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalN__DirectiveLocation2string(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx context.Context, sel ast.SelectionSet, v introspection.EnumValue) graphql.Marshaler {
+ return ec.___EnumValue(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx context.Context, sel ast.SelectionSet, v introspection.Field) graphql.Marshaler {
+ return ec.___Field(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx context.Context, sel ast.SelectionSet, v introspection.InputValue) graphql.Marshaler {
+ return ec.___InputValue(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalN__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v introspection.Type) graphql.Marshaler {
+ return ec.___Type(ctx, sel, &v)
+}
+
+func (ec *executionContext) marshalN__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler {
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalN__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler {
+ if v == nil {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ return graphql.Null
+ }
+ return ec.___Type(ctx, sel, v)
+}
+
+func (ec *executionContext) unmarshalN__TypeKind2string(ctx context.Context, v interface{}) (string, error) {
+ res, err := graphql.UnmarshalString(v)
+ return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalN__TypeKind2string(ctx context.Context, sel ast.SelectionSet, v string) graphql.Marshaler {
+ res := graphql.MarshalString(v)
+ if res == graphql.Null {
+ if !graphql.HasFieldError(ctx, graphql.GetFieldContext(ctx)) {
+ ec.Errorf(ctx, "the requested element is null which the schema does not allow")
+ }
+ }
+ return res
+}
+
+func (ec *executionContext) unmarshalOBoolean2bool(ctx context.Context, v interface{}) (bool, error) {
+ res, err := graphql.UnmarshalBoolean(v)
+ return res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalOBoolean2bool(ctx context.Context, sel ast.SelectionSet, v bool) graphql.Marshaler {
+ res := graphql.MarshalBoolean(v)
+ return res
+}
+
+func (ec *executionContext) unmarshalOBoolean2ᚖbool(ctx context.Context, v interface{}) (*bool, error) {
+ if v == nil {
+ return nil, nil
+ }
+ res, err := graphql.UnmarshalBoolean(v)
+ return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalOBoolean2ᚖbool(ctx context.Context, sel ast.SelectionSet, v *bool) graphql.Marshaler {
+ if v == nil {
+ return graphql.Null
+ }
+ res := graphql.MarshalBoolean(*v)
+ return res
+}
+
+func (ec *executionContext) unmarshalOInt2ᚖint(ctx context.Context, v interface{}) (*int, error) {
+ if v == nil {
+ return nil, nil
+ }
+ res, err := graphql.UnmarshalInt(v)
+ return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalOInt2ᚖint(ctx context.Context, sel ast.SelectionSet, v *int) graphql.Marshaler {
+ if v == nil {
+ return graphql.Null
+ }
+ res := graphql.MarshalInt(*v)
+ return res
+}
+
+func (ec *executionContext) unmarshalOString2ᚖstring(ctx context.Context, v interface{}) (*string, error) {
+ if v == nil {
+ return nil, nil
+ }
+ res, err := graphql.UnmarshalString(v)
+ return &res, graphql.ErrorOnPath(ctx, err)
+}
+
+func (ec *executionContext) marshalOString2ᚖstring(ctx context.Context, sel ast.SelectionSet, v *string) graphql.Marshaler {
+ if v == nil {
+ return graphql.Null
+ }
+ res := graphql.MarshalString(*v)
+ return res
+}
+
+func (ec *executionContext) marshalO__EnumValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.EnumValue) graphql.Marshaler {
+ if v == nil {
+ return graphql.Null
+ }
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalN__EnumValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐEnumValue(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalO__Field2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐFieldᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Field) graphql.Marshaler {
+ if v == nil {
+ return graphql.Null
+ }
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalN__Field2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐField(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalO__InputValue2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValueᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.InputValue) graphql.Marshaler {
+ if v == nil {
+ return graphql.Null
+ }
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalN__InputValue2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐInputValue(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalO__Schema2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐSchema(ctx context.Context, sel ast.SelectionSet, v *introspection.Schema) graphql.Marshaler {
+ if v == nil {
+ return graphql.Null
+ }
+ return ec.___Schema(ctx, sel, v)
+}
+
+func (ec *executionContext) marshalO__Type2ᚕgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐTypeᚄ(ctx context.Context, sel ast.SelectionSet, v []introspection.Type) graphql.Marshaler {
+ if v == nil {
+ return graphql.Null
+ }
+ ret := make(graphql.Array, len(v))
+ var wg sync.WaitGroup
+ isLen1 := len(v) == 1
+ if !isLen1 {
+ wg.Add(len(v))
+ }
+ for i := range v {
+ i := i
+ fc := &graphql.FieldContext{
+ Index: &i,
+ Result: &v[i],
+ }
+ ctx := graphql.WithFieldContext(ctx, fc)
+ f := func(i int) {
+ defer func() {
+ if r := recover(); r != nil {
+ ec.Error(ctx, ec.Recover(ctx, r))
+ ret = nil
+ }
+ }()
+ if !isLen1 {
+ defer wg.Done()
+ }
+ ret[i] = ec.marshalN__Type2githubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx, sel, v[i])
+ }
+ if isLen1 {
+ f(i)
+ } else {
+ go f(i)
+ }
+
+ }
+ wg.Wait()
+
+ for _, e := range ret {
+ if e == graphql.Null {
+ return graphql.Null
+ }
+ }
+
+ return ret
+}
+
+func (ec *executionContext) marshalO__Type2ᚖgithubᚗcomᚋ99designsᚋgqlgenᚋgraphqlᚋintrospectionᚐType(ctx context.Context, sel ast.SelectionSet, v *introspection.Type) graphql.Marshaler {
+ if v == nil {
+ return graphql.Null
+ }
+ return ec.___Type(ctx, sel, v)
+}
+
+// endregion ***************************** type.gotpl *****************************
diff --git a/graphqlserver/graph/model/models_gen.go b/graphqlserver/graph/model/models_gen.go
new file mode 100644
index 00000000..41e4076b
--- /dev/null
+++ b/graphqlserver/graph/model/models_gen.go
@@ -0,0 +1,293 @@
+// Code generated by github.com/99designs/gqlgen, DO NOT EDIT.
+
+package model
+
+type Cluster struct {
+ Name string `json:"name"`
+}
+
+type ClusterAPIsCount struct {
+ ClusterName string `json:"clusterName"`
+ Count int `json:"count"`
+}
+
+type ClusterDeletedAPICount struct {
+ ClusterName string `json:"clusterName"`
+ DeletedAPICount int `json:"deletedAPICount"`
+}
+
+type ClusterDeprecatedAPICount struct {
+ ClusterName string `json:"clusterName"`
+ DeprecatedAPICount int `json:"deprecatedAPICount"`
+}
+
+type ClusterNamespaceMisconfigCount struct {
+ ClusterName string `json:"clusterName"`
+ Namespace string `json:"namespace"`
+ MisconfigCount int `json:"misconfigCount"`
+}
+
+type ClusterNamespaceOutdatedCount struct {
+ ClusterName string `json:"clusterName"`
+ Namespace string `json:"namespace"`
+ OutdatedCount int `json:"outdatedCount"`
+}
+
+type ClusterNamespaceResourceCount struct {
+ ClusterName string `json:"clusterName"`
+ Namespace string `json:"namespace"`
+ ResourceCount int `json:"resourceCount"`
+}
+
+type ClusterNamespaceVulCount struct {
+ ClusterName string `json:"clusterName"`
+ Namespace string `json:"namespace"`
+ VulCount int `json:"vulCount"`
+}
+
+type DeletedAPI struct {
+ ClusterName *string `json:"ClusterName,omitempty"`
+ ObjectName *string `json:"ObjectName,omitempty"`
+ Group *string `json:"Group,omitempty"`
+ Kind *string `json:"Kind,omitempty"`
+ Version *string `json:"Version,omitempty"`
+ Name *string `json:"Name,omitempty"`
+ Deleted *bool `json:"Deleted,omitempty"`
+ Scope *string `json:"Scope,omitempty"`
+ EventTime *string `json:"EventTime,omitempty"`
+ ExpiryDate *string `json:"ExpiryDate,omitempty"`
+}
+
+type DeprecatedAPI struct {
+ ClusterName *string `json:"ClusterName,omitempty"`
+ ObjectName *string `json:"ObjectName,omitempty"`
+ Description *string `json:"Description,omitempty"`
+ Kind *string `json:"Kind,omitempty"`
+ Deprecated *bool `json:"Deprecated,omitempty"`
+ Scope *string `json:"Scope,omitempty"`
+ EventTime *string `json:"EventTime,omitempty"`
+ ExpiryDate *string `json:"ExpiryDate,omitempty"`
+}
+
+type Event struct {
+ ClusterName *string `json:"ClusterName,omitempty"`
+ ID *string `json:"Id,omitempty"`
+ EventTime *string `json:"EventTime,omitempty"`
+ OpType *string `json:"OpType,omitempty"`
+ Name *string `json:"Name,omitempty"`
+ Namespace *string `json:"Namespace,omitempty"`
+ Kind *string `json:"Kind,omitempty"`
+ Message *string `json:"Message,omitempty"`
+ Reason *string `json:"Reason,omitempty"`
+ Host *string `json:"Host,omitempty"`
+ Event *string `json:"Event,omitempty"`
+ ImageName *string `json:"ImageName,omitempty"`
+ FirstTime *string `json:"FirstTime,omitempty"`
+ LastTime *string `json:"LastTime,omitempty"`
+ ExpiryDate *string `json:"ExpiryDate,omitempty"`
+}
+
+type GetAllResource struct {
+ ClusterName *string `json:"ClusterName,omitempty"`
+ Namespace *string `json:"Namespace,omitempty"`
+ Kind *string `json:"Kind,omitempty"`
+ Resource *string `json:"Resource,omitempty"`
+ Age *string `json:"Age,omitempty"`
+ EventTime *string `json:"EventTime,omitempty"`
+ ExpiryDate *string `json:"ExpiryDate,omitempty"`
+}
+
+type KubeScore struct {
+ ID string `json:"id"`
+ ClusterName string `json:"clusterName"`
+ ObjectName string `json:"objectName"`
+ Kind string `json:"kind"`
+ APIVersion string `json:"apiVersion"`
+ Name string `json:"name"`
+ Namespace string `json:"namespace"`
+ TargetType string `json:"targetType"`
+ Description string `json:"description"`
+ Path string `json:"path"`
+ Summary string `json:"summary"`
+ FileName string `json:"fileName"`
+ FileRow int `json:"fileRow"`
+ EventTime string `json:"eventTime"`
+}
+
+type Kubescore struct {
+ ID string `json:"id"`
+ ClusterName *string `json:"clusterName,omitempty"`
+ ObjectName *string `json:"objectName,omitempty"`
+ Kind *string `json:"kind,omitempty"`
+ APIVersion *string `json:"apiVersion,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Namespace *string `json:"namespace,omitempty"`
+ TargetType *string `json:"targetType,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Path *string `json:"path,omitempty"`
+ Summary *string `json:"summary,omitempty"`
+ FileName *string `json:"fileName,omitempty"`
+ FileRow *int `json:"fileRow,omitempty"`
+ EventTime *string `json:"eventTime,omitempty"`
+}
+
+type Misconfiguration struct {
+ ID string `json:"id"`
+ ClusterName string `json:"clusterName"`
+ Namespace string `json:"namespace"`
+ Kind string `json:"kind"`
+ Name string `json:"name"`
+ MisconfigID string `json:"misconfigId"`
+ MisconfigAvdid *string `json:"misconfigAvdid,omitempty"`
+ MisconfigType *string `json:"misconfigType,omitempty"`
+ MisconfigTitle *string `json:"misconfigTitle,omitempty"`
+ MisconfigDesc *string `json:"misconfigDesc,omitempty"`
+ MisconfigMsg *string `json:"misconfigMsg,omitempty"`
+ MisconfigQuery *string `json:"misconfigQuery,omitempty"`
+ MisconfigResolution *string `json:"misconfigResolution,omitempty"`
+ MisconfigSeverity *string `json:"misconfigSeverity,omitempty"`
+ MisconfigStatus *string `json:"misconfigStatus,omitempty"`
+ EventTime *string `json:"eventTime,omitempty"`
+ ExpiryDate *string `json:"expiryDate,omitempty"`
+ ExportedAt *string `json:"exportedAt,omitempty"`
+}
+
+type Namespace struct {
+ Name string `json:"name"`
+}
+
+type NamespaceData struct {
+ Namespace string `json:"namespace"`
+ OutdatedImages []*OutdatedImage `json:"outdatedImages"`
+ KubeScores []*KubeScore `json:"kubeScores"`
+ Resources []*Resource `json:"resources"`
+}
+
+type OutdatedImage struct {
+ ClusterName string `json:"clusterName"`
+ Namespace string `json:"namespace"`
+ Pod string `json:"pod"`
+ CurrentImage string `json:"currentImage"`
+ CurrentTag string `json:"currentTag"`
+ LatestVersion string `json:"latestVersion"`
+ VersionsBehind int `json:"versionsBehind"`
+ EventTime string `json:"eventTime"`
+}
+
+type Query struct {
+}
+
+type Rakkess struct {
+ ClusterName *string `json:"ClusterName,omitempty"`
+ Name *string `json:"Name,omitempty"`
+ Create *string `json:"Create,omitempty"`
+ Delete *string `json:"Delete,omitempty"`
+ List *string `json:"List,omitempty"`
+ Update *string `json:"Update,omitempty"`
+ EventTime *string `json:"EventTime,omitempty"`
+ ExpiryDate *string `json:"ExpiryDate,omitempty"`
+}
+
+type Resource struct {
+ ClusterName string `json:"clusterName"`
+ Namespace string `json:"namespace"`
+ Kind string `json:"kind"`
+ Resource string `json:"resource"`
+ Age string `json:"age"`
+ EventTime string `json:"eventTime"`
+}
+
+type TrivyImage struct {
+ ID string `json:"id"`
+ ClusterName *string `json:"clusterName,omitempty"`
+ ArtifactName *string `json:"artifactName,omitempty"`
+ VulID *string `json:"vulId,omitempty"`
+ VulPkgID *string `json:"vulPkgId,omitempty"`
+ VulPkgName *string `json:"vulPkgName,omitempty"`
+ VulInstalledVersion *string `json:"vulInstalledVersion,omitempty"`
+ VulFixedVersion *string `json:"vulFixedVersion,omitempty"`
+ VulTitle *string `json:"vulTitle,omitempty"`
+ VulSeverity *string `json:"vulSeverity,omitempty"`
+ VulPublishedDate *string `json:"vulPublishedDate,omitempty"`
+ VulLastModifiedDate *string `json:"vulLastModifiedDate,omitempty"`
+ ExpiryDate *string `json:"expiryDate,omitempty"`
+}
+
+type TrivyImageCount struct {
+ ClusterName string `json:"clusterName"`
+ ImageCount int `json:"ImageCount"`
+}
+
+type TrivyMisconfig struct {
+ ID string `json:"id"`
+ ClusterName *string `json:"clusterName,omitempty"`
+ Namespace *string `json:"namespace,omitempty"`
+ Kind *string `json:"kind,omitempty"`
+ Name *string `json:"name,omitempty"`
+ MisconfigID *string `json:"misconfigId,omitempty"`
+ MisconfigAvdid *string `json:"misconfigAvdid,omitempty"`
+ MisconfigType *string `json:"misconfigType,omitempty"`
+ MisconfigTitle *string `json:"misconfigTitle,omitempty"`
+ MisconfigDesc *string `json:"misconfigDesc,omitempty"`
+ MisconfigMsg *string `json:"misconfigMsg,omitempty"`
+ MisconfigQuery *string `json:"misconfigQuery,omitempty"`
+ MisconfigResolution *string `json:"misconfigResolution,omitempty"`
+ MisconfigSeverity *string `json:"misconfigSeverity,omitempty"`
+ MisconfigStatus *string `json:"misconfigStatus,omitempty"`
+ EventTime *string `json:"eventTime,omitempty"`
+ ExpiryDate *string `json:"expiryDate,omitempty"`
+}
+
+type TrivySbom struct {
+ ID string `json:"id"`
+ ClusterName *string `json:"clusterName,omitempty"`
+ ImageName *string `json:"imageName,omitempty"`
+ PackageName *string `json:"packageName,omitempty"`
+ PackageURL *string `json:"packageUrl,omitempty"`
+ BomRef *string `json:"bomRef,omitempty"`
+ SerialNumber *string `json:"serialNumber,omitempty"`
+ Version *int `json:"version,omitempty"`
+ BomFormat *string `json:"bomFormat,omitempty"`
+ ExpiryDate *string `json:"expiryDate,omitempty"`
+}
+
+type TrivyVul struct {
+ ID string `json:"id"`
+ ClusterName *string `json:"clusterName,omitempty"`
+ Namespace *string `json:"namespace,omitempty"`
+ Kind *string `json:"kind,omitempty"`
+ Name *string `json:"name,omitempty"`
+ VulID *string `json:"vulId,omitempty"`
+ VulVendorIds *string `json:"vulVendorIds,omitempty"`
+ VulPkgID *string `json:"vulPkgId,omitempty"`
+ VulPkgName *string `json:"vulPkgName,omitempty"`
+ VulPkgPath *string `json:"vulPkgPath,omitempty"`
+ VulInstalledVersion *string `json:"vulInstalledVersion,omitempty"`
+ VulFixedVersion *string `json:"vulFixedVersion,omitempty"`
+ VulTitle *string `json:"vulTitle,omitempty"`
+ VulSeverity *string `json:"vulSeverity,omitempty"`
+ VulPublishedDate *string `json:"vulPublishedDate,omitempty"`
+ VulLastModifiedDate *string `json:"vulLastModifiedDate,omitempty"`
+ ExpiryDate *string `json:"expiryDate,omitempty"`
+}
+
+type Vulnerability struct {
+ ID string `json:"id"`
+ ClusterName string `json:"clusterName"`
+ Namespace string `json:"namespace"`
+ Kind string `json:"kind"`
+ Name string `json:"name"`
+ VulID string `json:"vulId"`
+ VulVendorIds *string `json:"vulVendorIds,omitempty"`
+ VulPkgID *string `json:"vulPkgId,omitempty"`
+ VulPkgName *string `json:"vulPkgName,omitempty"`
+ VulPkgPath *string `json:"vulPkgPath,omitempty"`
+ VulInstalledVersion *string `json:"vulInstalledVersion,omitempty"`
+ VulFixedVersion *string `json:"vulFixedVersion,omitempty"`
+ VulTitle *string `json:"vulTitle,omitempty"`
+ VulSeverity *string `json:"vulSeverity,omitempty"`
+ VulPublishedDate *string `json:"vulPublishedDate,omitempty"`
+ VulLastModifiedDate *string `json:"vulLastModifiedDate,omitempty"`
+ ExpiryDate *string `json:"expiryDate,omitempty"`
+ ExportedAt *string `json:"exportedAt,omitempty"`
+}
diff --git a/graphqlserver/graph/resolver.go b/graphqlserver/graph/resolver.go
new file mode 100644
index 00000000..37fad417
--- /dev/null
+++ b/graphqlserver/graph/resolver.go
@@ -0,0 +1,17 @@
+package graph
+
+import (
+ "database/sql"
+)
+
+// This file will not be regenerated automatically.
+//
+// It serves as dependency injection for your app, add any dependencies you require here.
+
+type Resolver struct {
+ DB *sql.DB
+}
+
+func NewResolver(db *sql.DB) *Resolver {
+ return &Resolver{DB: db}
+}
diff --git a/graphqlserver/graph/schema.graphqls b/graphqlserver/graph/schema.graphqls
new file mode 100644
index 00000000..9b6e3258
--- /dev/null
+++ b/graphqlserver/graph/schema.graphqls
@@ -0,0 +1,327 @@
+type Query {
+ allEvents: [Event!]!
+ allRakkess: [Rakkess!]!
+ allDeprecatedAPIs: [DeprecatedAPI!]!
+ allDeletedAPIs: [DeletedAPI!]!
+ allGetAllResources: [GetAllResource!]!
+ allTrivySBOMs: [TrivySBOM!]!
+ allTrivyImages: [TrivyImage!]!
+ allKubeScores: [Kubescore!]!
+ allTrivyVuls: [TrivyVul!]!
+ allTrivyMisconfigs: [TrivyMisconfig!]!
+ uniqueNamespaces(clusterName: String!): [Namespace!]!
+ uniqueClusters: [Cluster!]!
+ outdatedImagesByClusterAndNamespace(clusterName: String!, namespace: String!): [OutdatedImage!]!
+ outdatedImagesCount(clusterName: String!, namespace: String!): Int!
+ allClusterNamespaceOutdatedCounts: [ClusterNamespaceOutdatedCount!]!
+ allClusterDeprecatedAPIsCounts: [ClusterAPIsCount!]!
+ allClusterDeletedAPIsCounts: [ClusterAPIsCount!]!
+ allClusterNamespaceResourceCounts: [ClusterNamespaceResourceCount!]!
+ eventsByClusterAndNamespace(clusterName: String!, namespace: String!): [Event!]!
+ vulnerabilities(clusterName: String!, namespace: String!): [Vulnerability!]!
+ misconfigurations(clusterName: String!, namespace: String!): [Misconfiguration!]!
+ kubescores(clustername: String!, namespace: String!): [KubeScore!]!
+ getAllResources(clusterName: String!, namespace: String!): [GetAllResource!]!
+ trivyImages(clusterName: String!): [TrivyImage!]!
+ deprecatedAPIs(clusterName: String!): [DeprecatedAPI!]!
+ deletedAPIs(clusterName: String!): [DeletedAPI!]!
+ trivySBOMs(clusterName: String!): [TrivySBOM!]!
+ trivyVulCount(clusterName: String!, namespace: String!): ClusterNamespaceVulCount!
+ trivyMisconfigCount(clusterName: String!, namespace: String!): ClusterNamespaceMisconfigCount!
+ deletedAPICount(clusterName: String!): ClusterDeletedAPICount!
+ trivyImageCount(clusterName: String!): TrivyImageCount!
+ deprecatedAPICount(clusterName: String!): ClusterDeprecatedAPICount!
+}
+
+type ClusterDeprecatedAPICount {
+ clusterName: String!
+ deprecatedAPICount: Int!
+}
+
+type TrivyImageCount {
+ clusterName: String!
+ ImageCount: Int!
+}
+
+
+type ClusterDeletedAPICount {
+ clusterName: String!
+ deletedAPICount: Int!
+}
+
+
+type ClusterNamespaceMisconfigCount {
+ clusterName: String!
+ namespace: String!
+ misconfigCount: Int!
+}
+
+
+type ClusterNamespaceVulCount {
+ clusterName: String!
+ namespace: String!
+ vulCount: Int!
+}
+
+type Kubescore {
+ id: ID!
+ clusterName: String
+ objectName: String
+ kind: String
+ apiVersion: String
+ name: String
+ namespace: String
+ targetType: String
+ description: String
+ path: String
+ summary: String
+ fileName: String
+ fileRow: Int
+ eventTime: String
+}
+
+
+type Misconfiguration {
+ id: ID!
+ clusterName: String!
+ namespace: String!
+ kind: String!
+ name: String!
+ misconfigId: String!
+ misconfigAvdid: String
+ misconfigType: String
+ misconfigTitle: String
+ misconfigDesc: String
+ misconfigMsg: String
+ misconfigQuery: String
+ misconfigResolution: String
+ misconfigSeverity: String
+ misconfigStatus: String
+ eventTime: String
+ expiryDate: String
+ exportedAt: String
+}
+
+type Vulnerability {
+ id: ID!
+ clusterName: String!
+ namespace: String!
+ kind: String!
+ name: String!
+ vulId: String!
+ vulVendorIds: String
+ vulPkgId: String
+ vulPkgName: String
+ vulPkgPath: String
+ vulInstalledVersion: String
+ vulFixedVersion: String
+ vulTitle: String
+ vulSeverity: String
+ vulPublishedDate: String
+ vulLastModifiedDate: String
+ expiryDate: String
+ exportedAt: String
+}
+
+type Event {
+ ClusterName: String
+ Id: String
+ EventTime: String
+ OpType: String
+ Name: String
+ Namespace: String
+ Kind: String
+ Message: String
+ Reason: String
+ Host: String
+ Event: String
+ ImageName: String
+ FirstTime: String
+ LastTime: String
+ ExpiryDate: String
+}
+
+type Namespace {
+ name: String!
+}
+
+type Cluster {
+ name: String!
+}
+
+type ClusterNamespaceResourceCount {
+ clusterName: String!
+ namespace: String!
+ resourceCount: Int!
+}
+type ClusterAPIsCount {
+ clusterName: String!
+ count: Int!
+}
+
+
+type ClusterNamespaceOutdatedCount {
+ clusterName: String!
+ namespace: String!
+ outdatedCount: Int!
+}
+
+type TrivyMisconfig {
+ id: ID!
+ clusterName: String
+ namespace: String
+ kind: String
+ name: String
+ misconfigId: String
+ misconfigAvdid: String
+ misconfigType: String
+ misconfigTitle: String
+ misconfigDesc: String
+ misconfigMsg: String
+ misconfigQuery: String
+ misconfigResolution: String
+ misconfigSeverity: String
+ misconfigStatus: String
+ eventTime: String
+ expiryDate: String
+}
+
+type TrivyVul {
+ id: ID!
+ clusterName: String
+ namespace: String
+ kind: String
+ name: String
+ vulId: String
+ vulVendorIds: String
+ vulPkgId: String
+ vulPkgName: String
+ vulPkgPath: String
+ vulInstalledVersion: String
+ vulFixedVersion: String
+ vulTitle: String
+ vulSeverity: String
+ vulPublishedDate: String
+ vulLastModifiedDate: String
+ expiryDate: String
+}
+
+type TrivyImage {
+ id: ID!
+ clusterName: String
+ artifactName: String
+ vulId: String
+ vulPkgId: String
+ vulPkgName: String
+ vulInstalledVersion: String
+ vulFixedVersion: String
+ vulTitle: String
+ vulSeverity: String
+ vulPublishedDate: String
+ vulLastModifiedDate: String
+ expiryDate: String
+}
+
+type TrivySBOM {
+ id: ID!
+ clusterName: String
+ imageName: String
+ packageName: String
+ packageUrl: String
+ bomRef: String
+ serialNumber: String
+ version: Int
+ bomFormat: String
+ expiryDate: String
+}
+
+type GetAllResource {
+ ClusterName: String
+ Namespace: String
+ Kind: String
+ Resource: String
+ Age: String
+ EventTime: String
+ ExpiryDate: String
+}
+
+type DeletedAPI {
+ ClusterName: String
+ ObjectName: String
+ Group: String
+ Kind: String
+ Version: String
+ Name: String
+ Deleted: Boolean
+ Scope: String
+ EventTime: String
+ ExpiryDate: String
+}
+
+type DeprecatedAPI {
+ ClusterName: String
+ ObjectName: String
+ Description: String
+ Kind: String
+ Deprecated: Boolean
+ Scope: String
+ EventTime: String
+ ExpiryDate: String
+}
+
+type NamespaceData {
+ namespace: String!
+ outdatedImages: [OutdatedImage!]!
+ kubeScores: [KubeScore!]!
+ resources: [Resource!]!
+}
+
+type OutdatedImage {
+ clusterName: String!
+ namespace: String!
+ pod: String!
+ currentImage: String!
+ currentTag: String!
+ latestVersion: String!
+ versionsBehind: Int!
+ eventTime: String!
+}
+
+type KubeScore {
+ id: ID!
+ clusterName: String!
+ objectName: String!
+ kind: String!
+ apiVersion: String!
+ name: String!
+ namespace: String!
+ targetType: String!
+ description: String!
+ path: String!
+ summary: String!
+ fileName: String!
+ fileRow: Int!
+ eventTime: String!
+}
+
+type Resource {
+ clusterName: String!
+ namespace: String!
+ kind: String!
+ resource: String!
+ age: String!
+ eventTime: String!
+}
+
+
+
+type Rakkess {
+ ClusterName: String
+ Name: String
+ Create: String
+ Delete: String
+ List: String
+ Update: String
+ EventTime: String
+ ExpiryDate: String
+}
diff --git a/graphqlserver/graph/schema.resolvers.go b/graphqlserver/graph/schema.resolvers.go
new file mode 100644
index 00000000..71cb5330
--- /dev/null
+++ b/graphqlserver/graph/schema.resolvers.go
@@ -0,0 +1,1040 @@
+package graph
+
+// This file will be automatically regenerated based on the schema, any resolver implementations
+// will be copied through when generating and any unknown code will be moved to the end.
+// Code generated by github.com/99designs/gqlgen version v0.17.42
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+
+ "github.com/intelops/kubviz/graphqlserver/graph/model"
+)
+
+// AllEvents is the resolver for the allEvents field.
+func (r *queryResolver) AllEvents(ctx context.Context) ([]*model.Event, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `SELECT ClusterName, Id, EventTime, OpType, Name, Namespace, Kind, Message, Reason, Host, Event, FirstTime, LastTime, ExpiryDate FROM events`
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []*model.Event{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var events []*model.Event
+ for rows.Next() {
+ var e model.Event
+ if err := rows.Scan(&e.ClusterName, &e.ID, &e.EventTime, &e.OpType, &e.Name, &e.Namespace, &e.Kind, &e.Message, &e.Reason, &e.Host, &e.Event, &e.FirstTime, &e.LastTime, &e.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ events = append(events, &e)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return events, nil
+}
+
+// AllRakkess is the resolver for the allRakkess field.
+func (r *queryResolver) AllRakkess(ctx context.Context) ([]*model.Rakkess, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `SELECT ClusterName, Name, Create, Delete, List, Update, EventTime, ExpiryDate FROM rakkess`
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []*model.Rakkess{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var rakkessRecords []*model.Rakkess
+ for rows.Next() {
+ var r model.Rakkess
+ if err := rows.Scan(&r.ClusterName, &r.Name, &r.Create, &r.Delete, &r.List, &r.Update, &r.EventTime, &r.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ rakkessRecords = append(rakkessRecords, &r)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return rakkessRecords, nil
+}
+
+// AllDeprecatedAPIs is the resolver for the allDeprecatedAPIs field.
+func (r *queryResolver) AllDeprecatedAPIs(ctx context.Context) ([]*model.DeprecatedAPI, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `SELECT ClusterName, ObjectName, Description, Kind, Deprecated, Scope, EventTime, ExpiryDate FROM DeprecatedAPIs`
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []*model.DeprecatedAPI{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var deprecatedAPIs []*model.DeprecatedAPI
+ for rows.Next() {
+ var d model.DeprecatedAPI
+ var deprecatedInt uint8
+ if err := rows.Scan(&d.ClusterName, &d.ObjectName, &d.Description, &d.Kind, &deprecatedInt, &d.Scope, &d.EventTime, &d.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+
+ // Convert uint8 to bool
+ deprecatedBool := deprecatedInt != 0
+ d.Deprecated = &deprecatedBool
+
+ deprecatedAPIs = append(deprecatedAPIs, &d)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return deprecatedAPIs, nil
+}
+
+// AllDeletedAPIs is the resolver for the allDeletedAPIs field.
+func (r *queryResolver) AllDeletedAPIs(ctx context.Context) ([]*model.DeletedAPI, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `SELECT ClusterName, ObjectName, Group, Kind, Version, Name, Deleted, Scope, EventTime, ExpiryDate FROM DeletedAPIs`
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []*model.DeletedAPI{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var deletedAPIs []*model.DeletedAPI
+ for rows.Next() {
+ var d model.DeletedAPI
+ var deletedInt uint8
+ if err := rows.Scan(&d.ClusterName, &d.ObjectName, &d.Group, &d.Kind, &d.Version, &d.Name, &deletedInt, &d.Scope, &d.EventTime, &d.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+
+ // Convert uint8 to bool
+ deletedBool := deletedInt != 0
+ d.Deleted = &deletedBool
+
+ deletedAPIs = append(deletedAPIs, &d)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return deletedAPIs, nil
+}
+
+// AllGetAllResources is the resolver for the allGetAllResources field.
+func (r *queryResolver) AllGetAllResources(ctx context.Context) ([]*model.GetAllResource, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `SELECT ClusterName, Namespace, Kind, Resource, Age, EventTime, ExpiryDate FROM getall_resources`
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []*model.GetAllResource{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var resources []*model.GetAllResource
+ for rows.Next() {
+ var res model.GetAllResource
+ if err := rows.Scan(&res.ClusterName, &res.Namespace, &res.Kind, &res.Resource, &res.Age, &res.EventTime, &res.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ resources = append(resources, &res)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return resources, nil
+}
+
+// AllTrivySBOMs is the resolver for the allTrivySBOMs field.
+func (r *queryResolver) AllTrivySBOMs(ctx context.Context) ([]*model.TrivySbom, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `SELECT id, cluster_name, image_name, package_name, package_url, bom_ref, serial_number, version, bom_format, ExpiryDate FROM trivysbom`
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []*model.TrivySbom{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var sboms []*model.TrivySbom
+ for rows.Next() {
+ var s model.TrivySbom
+ if err := rows.Scan(&s.ID, &s.ClusterName, &s.ImageName, &s.PackageName, &s.PackageURL, &s.BomRef, &s.SerialNumber, &s.Version, &s.BomFormat, &s.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ sboms = append(sboms, &s)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return sboms, nil
+}
+
+// AllTrivyImages is the resolver for the allTrivyImages field.
+func (r *queryResolver) AllTrivyImages(ctx context.Context) ([]*model.TrivyImage, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `SELECT id, cluster_name, artifact_name, vul_id, vul_pkg_id, vul_pkg_name, vul_installed_version, vul_fixed_version, vul_title, vul_severity, vul_published_date, vul_last_modified_date, ExpiryDate FROM trivyimage`
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []*model.TrivyImage{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var images []*model.TrivyImage
+ for rows.Next() {
+ var img model.TrivyImage
+ if err := rows.Scan(&img.ID, &img.ClusterName, &img.ArtifactName, &img.VulID, &img.VulPkgID, &img.VulPkgName, &img.VulInstalledVersion, &img.VulFixedVersion, &img.VulTitle, &img.VulSeverity, &img.VulPublishedDate, &img.VulLastModifiedDate, &img.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ images = append(images, &img)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return images, nil
+}
+
+// AllKubeScores is the resolver for the allKubeScores field.
+func (r *queryResolver) AllKubeScores(ctx context.Context) ([]*model.Kubescore, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `SELECT id, clustername, object_name, kind, apiVersion, name, namespace, target_type, description, path, summary, file_name, file_row, EventTime, ExpiryDate FROM kubescore`
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []*model.Kubescore{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var kubeScores []*model.Kubescore
+ for rows.Next() {
+ var ks model.Kubescore
+ if err := rows.Scan(&ks.ID, &ks.ClusterName, &ks.ObjectName, &ks.Kind, &ks.APIVersion, &ks.Name, &ks.Namespace, &ks.TargetType, &ks.Description, &ks.Path, &ks.Summary, &ks.FileName, &ks.FileRow, &ks.EventTime); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ kubeScores = append(kubeScores, &ks)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return kubeScores, nil
+}
+
+// AllTrivyVuls is the resolver for the allTrivyVuls field.
+func (r *queryResolver) AllTrivyVuls(ctx context.Context) ([]*model.TrivyVul, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `SELECT id, cluster_name, namespace, kind, name, vul_id, vul_vendor_ids, vul_pkg_id, vul_pkg_name, vul_pkg_path, vul_installed_version, vul_fixed_version, vul_title, vul_severity, vul_published_date, vul_last_modified_date, ExpiryDate FROM trivy_vul`
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []*model.TrivyVul{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var trivyVuls []*model.TrivyVul
+ for rows.Next() {
+ var tv model.TrivyVul
+ if err := rows.Scan(&tv.ID, &tv.ClusterName, &tv.Namespace, &tv.Kind, &tv.Name, &tv.VulID, &tv.VulVendorIds, &tv.VulPkgID, &tv.VulPkgName, &tv.VulPkgPath, &tv.VulInstalledVersion, &tv.VulFixedVersion, &tv.VulTitle, &tv.VulSeverity, &tv.VulPublishedDate, &tv.VulLastModifiedDate, &tv.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ trivyVuls = append(trivyVuls, &tv)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return trivyVuls, nil
+}
+
+// AllTrivyMisconfigs is the resolver for the allTrivyMisconfigs field.
+func (r *queryResolver) AllTrivyMisconfigs(ctx context.Context) ([]*model.TrivyMisconfig, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `SELECT id, cluster_name, namespace, kind, name, misconfig_id, misconfig_avdid, misconfig_type, misconfig_title, misconfig_desc, misconfig_msg, misconfig_query, misconfig_resolution, misconfig_severity, misconfig_status, EventTime, ExpiryDate FROM trivy_misconfig`
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []*model.TrivyMisconfig{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var misconfigs []*model.TrivyMisconfig
+ for rows.Next() {
+ var tm model.TrivyMisconfig
+ if err := rows.Scan(&tm.ID, &tm.ClusterName, &tm.Namespace, &tm.Kind, &tm.Name, &tm.MisconfigID, &tm.MisconfigAvdid, &tm.MisconfigType, &tm.MisconfigTitle, &tm.MisconfigDesc, &tm.MisconfigMsg, &tm.MisconfigQuery, &tm.MisconfigResolution, &tm.MisconfigSeverity, &tm.MisconfigStatus, &tm.EventTime, &tm.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ misconfigs = append(misconfigs, &tm)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return misconfigs, nil
+}
+
+// UniqueNamespaces is the resolver for the uniqueNamespaces field.
+func (r *queryResolver) UniqueNamespaces(ctx context.Context, clusterName string) ([]*model.Namespace, error) {
+ namespaces, err := r.fetchNamespacesFromDatabase(ctx, clusterName)
+ if err != nil {
+ return nil, err
+ }
+
+ var namespaceObjects []*model.Namespace
+ for _, ns := range namespaces {
+ namespaceObjects = append(namespaceObjects, &model.Namespace{Name: ns})
+ }
+
+ return namespaceObjects, nil
+}
+
+// UniqueClusters is the resolver for the uniqueClusters field.
+func (r *queryResolver) UniqueClusters(ctx context.Context) ([]*model.Cluster, error) {
+ clusters, err := r.fetchClustersFromDatabase(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ var clusterObjects []*model.Cluster
+ for _, cluster := range clusters {
+ clusterObjects = append(clusterObjects, &model.Cluster{Name: cluster})
+ }
+
+ return clusterObjects, nil
+}
+
+// OutdatedImagesByClusterAndNamespace is the resolver for the outdatedImagesByClusterAndNamespace field.
+func (r *queryResolver) OutdatedImagesByClusterAndNamespace(ctx context.Context, clusterName string, namespace string) ([]*model.OutdatedImage, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" || namespace == "" {
+ return nil, fmt.Errorf("clusterName and namespace cannot be empty")
+ }
+
+ query := `SELECT ClusterName, Namespace, Pod, CurrentImage, CurrentTag, LatestVersion, VersionsBehind, EventTime FROM outdated_images WHERE ClusterName = ? AND Namespace = ?`
+
+ rows, err := r.DB.QueryContext(ctx, query, clusterName, namespace)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []*model.OutdatedImage{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var outdatedImages []*model.OutdatedImage
+ for rows.Next() {
+ var oi model.OutdatedImage
+ if err := rows.Scan(&oi.ClusterName, &oi.Namespace, &oi.Pod, &oi.CurrentImage, &oi.CurrentTag, &oi.LatestVersion, &oi.VersionsBehind, &oi.EventTime); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ outdatedImages = append(outdatedImages, &oi)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return outdatedImages, nil
+}
+
+// OutdatedImagesCount is the resolver for the outdatedImagesCount field.
+func (r *queryResolver) OutdatedImagesCount(ctx context.Context, clusterName string, namespace string) (int, error) {
+ if r.DB == nil {
+ return 0, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" || namespace == "" {
+ return 0, fmt.Errorf("clusterName and namespace cannot be empty")
+ }
+
+ query := `SELECT COUNT(*) FROM outdated_images WHERE ClusterName = ? AND Namespace = ?`
+
+ var count int
+ err := r.DB.QueryRowContext(ctx, query, clusterName, namespace).Scan(&count)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return 0, nil
+ }
+ return 0, fmt.Errorf("error executing query: %v", err)
+ }
+
+ return count, nil
+}
+
+// AllClusterNamespaceOutdatedCounts is the resolver for the allClusterNamespaceOutdatedCounts field.
+func (r *queryResolver) AllClusterNamespaceOutdatedCounts(ctx context.Context) ([]*model.ClusterNamespaceOutdatedCount, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `
+ SELECT ClusterName, Namespace, COUNT(*) as outdatedCount
+ FROM outdated_images
+ GROUP BY ClusterName, Namespace
+ `
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var results []*model.ClusterNamespaceOutdatedCount
+ for rows.Next() {
+ var result model.ClusterNamespaceOutdatedCount
+ if err := rows.Scan(&result.ClusterName, &result.Namespace, &result.OutdatedCount); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ results = append(results, &result)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return results, nil
+}
+
+// AllClusterDeprecatedAPIsCounts is the resolver for the allClusterDeprecatedAPIsCounts field.
+func (r *queryResolver) AllClusterDeprecatedAPIsCounts(ctx context.Context) ([]*model.ClusterAPIsCount, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+ query := `
+ SELECT ClusterName, COUNT(*) as count
+ FROM DeprecatedAPIs
+ GROUP BY ClusterName
+`
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+ var results []*model.ClusterAPIsCount
+ for rows.Next() {
+ var result model.ClusterAPIsCount
+ if err := rows.Scan(&result.ClusterName, &result.Count); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ results = append(results, &result)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return results, nil
+}
+
+// AllClusterDeletedAPIsCounts is the resolver for the allClusterDeletedAPIsCounts field.
+func (r *queryResolver) AllClusterDeletedAPIsCounts(ctx context.Context) ([]*model.ClusterAPIsCount, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `
+ SELECT ClusterName, COUNT(*) as count
+ FROM DeletedAPIs
+ GROUP BY ClusterName
+ `
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var results []*model.ClusterAPIsCount
+ for rows.Next() {
+ var result model.ClusterAPIsCount
+ if err := rows.Scan(&result.ClusterName, &result.Count); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ results = append(results, &result)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return results, nil
+}
+
+// AllClusterNamespaceResourceCounts is the resolver for the allClusterNamespaceResourceCounts field.
+func (r *queryResolver) AllClusterNamespaceResourceCounts(ctx context.Context) ([]*model.ClusterNamespaceResourceCount, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `
+ SELECT ClusterName, Namespace, COUNT(*) as resourceCount
+ FROM getall_resources
+ GROUP BY ClusterName, Namespace
+ `
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var results []*model.ClusterNamespaceResourceCount
+ for rows.Next() {
+ var result model.ClusterNamespaceResourceCount
+ if err := rows.Scan(&result.ClusterName, &result.Namespace, &result.ResourceCount); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ results = append(results, &result)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return results, nil
+}
+
+// EventsByClusterAndNamespace is the resolver for the eventsByClusterAndNamespace field.
+func (r *queryResolver) EventsByClusterAndNamespace(ctx context.Context, clusterName string, namespace string) ([]*model.Event, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" || namespace == "" {
+ return nil, fmt.Errorf("clusterName and namespace cannot be empty")
+ }
+
+ query := `SELECT ClusterName, Id, EventTime, OpType, Name, Namespace, Kind, Message, Reason, Host, Event, ImageName, FirstTime, LastTime FROM events WHERE ClusterName = ? AND Namespace = ?`
+
+ rows, err := r.DB.QueryContext(ctx, query, clusterName, namespace)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []*model.Event{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var events []*model.Event
+ for rows.Next() {
+ var e model.Event
+ if err := rows.Scan(&e.ClusterName, &e.ID, &e.EventTime, &e.OpType, &e.Name, &e.Namespace, &e.Kind, &e.Message, &e.Reason, &e.Host, &e.Event, &e.ImageName, &e.FirstTime, &e.LastTime); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ events = append(events, &e)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return events, nil
+}
+
+// Vulnerabilities is the resolver for the vulnerabilities field.
+func (r *queryResolver) Vulnerabilities(ctx context.Context, clusterName string, namespace string) ([]*model.Vulnerability, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" || namespace == "" {
+ return nil, fmt.Errorf("clusterName and namespace cannot be empty")
+ }
+ query := `
+ SELECT id, cluster_name, namespace, kind, name, vul_id, vul_vendor_ids, vul_pkg_id, vul_pkg_name, vul_pkg_path, vul_installed_version, vul_fixed_version, vul_title, vul_severity, vul_published_date, vul_last_modified_date, ExpiryDate
+ FROM trivy_vul
+ WHERE cluster_name = ? AND namespace = ?
+ `
+ rows, err := r.DB.QueryContext(ctx, query, clusterName, namespace)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+ var vulnerabilities []*model.Vulnerability
+ for rows.Next() {
+ var v model.Vulnerability
+ if err := rows.Scan(&v.ID, &v.ClusterName, &v.Namespace, &v.Kind, &v.Name, &v.VulID, &v.VulVendorIds, &v.VulPkgID, &v.VulPkgName, &v.VulPkgPath, &v.VulInstalledVersion, &v.VulFixedVersion, &v.VulTitle, &v.VulSeverity, &v.VulPublishedDate, &v.VulLastModifiedDate, &v.ExpiryDate, &v.ExportedAt); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ vulnerabilities = append(vulnerabilities, &v)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return vulnerabilities, nil
+}
+
+// Misconfigurations is the resolver for the misconfigurations field.
+func (r *queryResolver) Misconfigurations(ctx context.Context, clusterName string, namespace string) ([]*model.Misconfiguration, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" || namespace == "" {
+ return nil, fmt.Errorf("clusterName and namespace cannot be empty")
+ }
+
+ query := `
+ SELECT id, cluster_name, namespace, kind, name, misconfig_id, misconfig_avdid, misconfig_type, misconfig_title, misconfig_desc, misconfig_msg, misconfig_query, misconfig_resolution, misconfig_severity, misconfig_status, EventTime, ExpiryDate
+ FROM trivy_misconfig
+ WHERE cluster_name = ? AND namespace = ?
+ `
+
+ rows, err := r.DB.QueryContext(ctx, query, clusterName, namespace)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var misconfigurations []*model.Misconfiguration
+ for rows.Next() {
+ var m model.Misconfiguration
+ if err := rows.Scan(&m.ID, &m.ClusterName, &m.Namespace, &m.Kind, &m.Name, &m.MisconfigID, &m.MisconfigAvdid, &m.MisconfigType, &m.MisconfigTitle, &m.MisconfigDesc, &m.MisconfigMsg, &m.MisconfigQuery, &m.MisconfigResolution, &m.MisconfigSeverity, &m.MisconfigStatus, &m.EventTime, &m.ExpiryDate, &m.ExportedAt); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ misconfigurations = append(misconfigurations, &m)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return misconfigurations, nil
+}
+
+// Kubescores is the resolver for the kubescores field.
+func (r *queryResolver) Kubescores(ctx context.Context, clustername string, namespace string) ([]*model.KubeScore, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clustername == "" || namespace == "" {
+ return nil, fmt.Errorf("clustername and namespace cannot be empty")
+ }
+
+ query := `
+ SELECT id, clustername, object_name, kind, apiVersion, name, namespace, target_type, description, path, summary, file_name, file_row, EventTime, ExpiryDate
+ FROM kubescore
+ WHERE clustername = ? AND namespace = ?
+ `
+
+ rows, err := r.DB.QueryContext(ctx, query, clustername, namespace)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var scores []*model.KubeScore
+ for rows.Next() {
+ var s model.KubeScore
+ if err := rows.Scan(&s.ID, &s.ClusterName, &s.ObjectName, &s.Kind, &s.APIVersion, &s.Name, &s.Namespace, &s.TargetType, &s.Description, &s.Path, &s.Summary, &s.FileName, &s.FileRow, &s.EventTime); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ scores = append(scores, &s)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return scores, nil
+}
+
+// GetAllResources is the resolver for the getAllResources field.
+func (r *queryResolver) GetAllResources(ctx context.Context, clusterName string, namespace string) ([]*model.GetAllResource, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" || namespace == "" {
+ return nil, fmt.Errorf("clusterName and namespace cannot be empty")
+ }
+
+ query := `
+ SELECT ClusterName, Namespace, Kind, Resource, Age, EventTime, ExpiryDate
+ FROM getall_resources
+ WHERE ClusterName = ? AND Namespace = ?
+ `
+
+ rows, err := r.DB.QueryContext(ctx, query, clusterName, namespace)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var resources []*model.GetAllResource
+ for rows.Next() {
+ var r model.GetAllResource
+ if err := rows.Scan(&r.ClusterName, &r.Namespace, &r.Kind, &r.Resource, &r.Age, &r.EventTime, &r.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ resources = append(resources, &r)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return resources, nil
+}
+
+// TrivyImages is the resolver for the trivyImages field.
+func (r *queryResolver) TrivyImages(ctx context.Context, clusterName string) ([]*model.TrivyImage, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" {
+ return nil, fmt.Errorf("clusterName cannot be empty")
+ }
+
+ query := `
+ SELECT id, cluster_name, artifact_name, vul_id, vul_pkg_id, vul_pkg_name, vul_installed_version, vul_fixed_version, vul_title, vul_severity, vul_published_date, vul_last_modified_date, ExpiryDate
+ FROM trivyimage
+ WHERE cluster_name = ?
+ `
+
+ rows, err := r.DB.QueryContext(ctx, query, clusterName)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var images []*model.TrivyImage
+ for rows.Next() {
+ var img model.TrivyImage
+ if err := rows.Scan(&img.ID, &img.ClusterName, &img.ArtifactName, &img.VulID, &img.VulPkgID, &img.VulPkgName, &img.VulInstalledVersion, &img.VulFixedVersion, &img.VulTitle, &img.VulSeverity, &img.VulPublishedDate, &img.VulLastModifiedDate, &img.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ images = append(images, &img)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return images, nil
+}
+
+// DeprecatedAPIs is the resolver for the deprecatedAPIs field.
+func (r *queryResolver) DeprecatedAPIs(ctx context.Context, clusterName string) ([]*model.DeprecatedAPI, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" {
+ return nil, fmt.Errorf("ClusterName cannot be empty")
+ }
+
+ query := `
+ SELECT ClusterName, ObjectName, Description, Kind, Deprecated, Scope, EventTime, ExpiryDate
+ FROM DeprecatedAPIs
+ WHERE ClusterName = ?
+ `
+
+ rows, err := r.DB.QueryContext(ctx, query, clusterName)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+ var apis []*model.DeprecatedAPI
+ for rows.Next() {
+ var api model.DeprecatedAPI
+ var deprecated uint8
+ if err := rows.Scan(&api.ClusterName, &api.ObjectName, &api.Description, &api.Kind, &deprecated, &api.Scope, &api.EventTime, &api.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ deprecatedBool := deprecated != 0
+ api.Deprecated = &deprecatedBool
+ apis = append(apis, &api)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return apis, nil
+}
+
+// DeletedAPIs is the resolver for the deletedAPIs field.
+func (r *queryResolver) DeletedAPIs(ctx context.Context, clusterName string) ([]*model.DeletedAPI, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" {
+ return nil, fmt.Errorf("ClusterName cannot be empty")
+ }
+
+ query := `
+ SELECT ClusterName, ObjectName, Group, Kind, Version, Name, Deleted, Scope, EventTime, ExpiryDate
+ FROM DeletedAPIs
+ WHERE ClusterName = ?
+ `
+ rows, err := r.DB.QueryContext(ctx, query, clusterName)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+ var apis []*model.DeletedAPI
+ for rows.Next() {
+ var api model.DeletedAPI
+ var deleted uint8
+ if err := rows.Scan(&api.ClusterName, &api.ObjectName, &api.Group, &api.Kind, &api.Version, &api.Name, &deleted, &api.Scope, &api.EventTime, &api.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ deletedBool := deleted != 0
+ api.Deleted = &deletedBool
+ apis = append(apis, &api)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return apis, nil
+}
+
+// TrivySBOMs is the resolver for the trivySBOMs field.
+func (r *queryResolver) TrivySBOMs(ctx context.Context, clusterName string) ([]*model.TrivySbom, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" {
+ return nil, fmt.Errorf("clusterName cannot be empty")
+ }
+
+ query := `
+ SELECT id, cluster_name, image_name, package_name, package_url, bom_ref, serial_number, version, bom_format, ExpiryDate
+ FROM trivysbom
+ WHERE cluster_name = ?
+ `
+
+ rows, err := r.DB.QueryContext(ctx, query, clusterName)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var sboms []*model.TrivySbom
+ for rows.Next() {
+ var sbom model.TrivySbom
+ if err := rows.Scan(&sbom.ID, &sbom.ClusterName, &sbom.ImageName, &sbom.PackageName, &sbom.PackageURL, &sbom.BomRef, &sbom.SerialNumber, &sbom.Version, &sbom.BomFormat, &sbom.ExpiryDate); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ sboms = append(sboms, &sbom)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return sboms, nil
+}
+
+// TrivyVulCount is the resolver for the trivyVulCount field.
+func (r *queryResolver) TrivyVulCount(ctx context.Context, clusterName string, namespace string) (*model.ClusterNamespaceVulCount, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" || namespace == "" {
+ return nil, fmt.Errorf("clusterName and namespace cannot be empty")
+ }
+
+ query := `SELECT COUNT(*) FROM trivy_vul WHERE cluster_name = ? AND namespace = ?`
+
+ var count int
+ err := r.DB.QueryRowContext(ctx, query, clusterName, namespace).Scan(&count)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+
+ return &model.ClusterNamespaceVulCount{
+ ClusterName: clusterName,
+ Namespace: namespace,
+ VulCount: count,
+ }, nil
+}
+
+// TrivyMisconfigCount is the resolver for the trivyMisconfigCount field.
+func (r *queryResolver) TrivyMisconfigCount(ctx context.Context, clusterName string, namespace string) (*model.ClusterNamespaceMisconfigCount, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" || namespace == "" {
+ return nil, fmt.Errorf("clusterName and namespace cannot be empty")
+ }
+
+ query := `SELECT COUNT(*) FROM trivy_misconfig WHERE cluster_name = ? AND namespace = ?`
+
+ var count int
+ err := r.DB.QueryRowContext(ctx, query, clusterName, namespace).Scan(&count)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+
+ return &model.ClusterNamespaceMisconfigCount{
+ ClusterName: clusterName,
+ Namespace: namespace,
+ MisconfigCount: count,
+ }, nil
+}
+
+// DeletedAPICount is the resolver for the deletedAPICount field.
+func (r *queryResolver) DeletedAPICount(ctx context.Context, clusterName string) (*model.ClusterDeletedAPICount, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" {
+ return nil, fmt.Errorf("ClusterName cannot be empty")
+ }
+
+ query := `SELECT COUNT(*) FROM DeletedAPIs WHERE ClusterName = ?`
+
+ var count int
+ err := r.DB.QueryRowContext(ctx, query, clusterName).Scan(&count)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+
+ return &model.ClusterDeletedAPICount{
+ ClusterName: clusterName,
+ DeletedAPICount: count,
+ }, nil
+}
+
+// TrivyImageVulCount is the resolver for the trivyImageVulCount field.
+func (r *queryResolver) TrivyImageCount(ctx context.Context, clusterName string) (*model.TrivyImageCount, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" {
+ return nil, fmt.Errorf("clusterName cannot be empty")
+ }
+
+ query := `SELECT COUNT(*) FROM trivyimage WHERE cluster_name = ?`
+
+ var count int
+ err := r.DB.QueryRowContext(ctx, query, clusterName).Scan(&count)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+
+ return &model.TrivyImageCount{
+ ClusterName: clusterName,
+ ImageCount: count,
+ }, nil
+}
+
+// DeprecatedAPICount is the resolver for the deprecatedAPICount field.
+func (r *queryResolver) DeprecatedAPICount(ctx context.Context, clusterName string) (*model.ClusterDeprecatedAPICount, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ if clusterName == "" {
+ return nil, fmt.Errorf("ClusterName cannot be empty")
+ }
+
+ query := `SELECT COUNT(*) FROM DeprecatedAPIs WHERE ClusterName = ?`
+
+ var count int
+ err := r.DB.QueryRowContext(ctx, query, clusterName).Scan(&count)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+
+ return &model.ClusterDeprecatedAPICount{
+ ClusterName: clusterName,
+ DeprecatedAPICount: count,
+ }, nil
+}
+
+// Query returns QueryResolver implementation.
+func (r *Resolver) Query() QueryResolver { return &queryResolver{r} }
+
+type queryResolver struct{ *Resolver }
diff --git a/graphqlserver/graph/utils.go b/graphqlserver/graph/utils.go
new file mode 100644
index 00000000..dd7bb02a
--- /dev/null
+++ b/graphqlserver/graph/utils.go
@@ -0,0 +1,158 @@
+package graph
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+
+ "github.com/intelops/kubviz/graphqlserver/graph/model"
+)
+
+func (r *Resolver) fetchClustersFromDatabase(ctx context.Context) ([]string, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+ query := `SELECT DISTINCT ClusterName FROM events`
+
+ rows, err := r.DB.QueryContext(ctx, query)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var clusters []string
+ for rows.Next() {
+ var cluster string
+ if err := rows.Scan(&cluster); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ clusters = append(clusters, cluster)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return clusters, nil
+}
+
+func (r *Resolver) fetchNamespacesFromDatabase(ctx context.Context, clusterName string) ([]string, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+ // Include the cluster name in the WHERE clause to filter namespaces by cluster
+ query := `SELECT DISTINCT Namespace FROM events WHERE ClusterName = ?`
+
+ rows, err := r.DB.QueryContext(ctx, query, clusterName)
+ if err != nil {
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var namespaces []string
+ for rows.Next() {
+ var namespace string
+ if err := rows.Scan(&namespace); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ namespaces = append(namespaces, namespace)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return namespaces, nil
+}
+func (r *Resolver) fetchOutdatedImages(ctx context.Context, namespace string) ([]*model.OutdatedImage, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+ query := `SELECT ClusterName, Namespace, Pod, CurrentImage, CurrentTag, LatestVersion, VersionsBehind, EventTime FROM outdated_images WHERE Namespace = ?`
+
+ rows, err := r.DB.QueryContext(ctx, query, namespace)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return []*model.OutdatedImage{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var outdatedImages []*model.OutdatedImage
+ for rows.Next() {
+ var oi model.OutdatedImage
+ if err := rows.Scan(&oi.ClusterName, &oi.Namespace, &oi.Pod, &oi.CurrentImage, &oi.CurrentTag, &oi.LatestVersion, &oi.VersionsBehind, &oi.EventTime); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ outdatedImages = append(outdatedImages, &oi)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return outdatedImages, nil
+}
+func (r *Resolver) fetchKubeScores(ctx context.Context, namespace string) ([]*model.KubeScore, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `SELECT id, clustername, object_name, kind, apiVersion, name, namespace, target_type, description, path, summary, file_name, file_row, EventTime FROM kubescore WHERE namespace = ?`
+ rows, err := r.DB.QueryContext(ctx, query, namespace)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ // No data for the namespace, return an empty slice
+ return []*model.KubeScore{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var kubeScores []*model.KubeScore
+ for rows.Next() {
+ var ks model.KubeScore
+ if err := rows.Scan(&ks.ID, &ks.ClusterName, &ks.ObjectName, &ks.Kind, &ks.APIVersion, &ks.Name, &ks.Namespace, &ks.TargetType, &ks.Description, &ks.Path, &ks.Summary, &ks.FileName, &ks.FileRow, &ks.EventTime); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ kubeScores = append(kubeScores, &ks)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return kubeScores, nil
+}
+func (r *Resolver) fetchResources(ctx context.Context, namespace string) ([]*model.Resource, error) {
+ if r.DB == nil {
+ return nil, fmt.Errorf("database connection is not initialized")
+ }
+
+ query := `SELECT ClusterName, Namespace, Kind, Resource, Age, EventTime FROM getall_resources WHERE Namespace = ?`
+ rows, err := r.DB.QueryContext(ctx, query, namespace)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ // No data for the namespace, return an empty slice
+ return []*model.Resource{}, nil
+ }
+ return nil, fmt.Errorf("error executing query: %v", err)
+ }
+ defer rows.Close()
+
+ var resources []*model.Resource
+ for rows.Next() {
+ var res model.Resource
+ if err := rows.Scan(&res.ClusterName, &res.Namespace, &res.Kind, &res.Resource, &res.Age, &res.EventTime); err != nil {
+ return nil, fmt.Errorf("error scanning row: %v", err)
+ }
+ resources = append(resources, &res)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("error iterating rows: %v", err)
+ }
+
+ return resources, nil
+}
diff --git a/graphqlserver/server.go b/graphqlserver/server.go
new file mode 100644
index 00000000..8e191aff
--- /dev/null
+++ b/graphqlserver/server.go
@@ -0,0 +1,70 @@
+package main
+
+import (
+ "database/sql"
+ "log"
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/99designs/gqlgen/graphql/handler"
+ "github.com/99designs/gqlgen/graphql/playground"
+ "github.com/intelops/kubviz/client/pkg/clickhouse"
+ "github.com/intelops/kubviz/client/pkg/config"
+ "github.com/intelops/kubviz/graphqlserver/graph"
+ "github.com/kelseyhightower/envconfig"
+)
+
+const defaultPort = "8085"
+const (
+ maxRetries = 5
+ retryDelay = 5 * time.Second
+)
+
+func main() {
+ log.Println("Graph ql server starting ... Iteration one")
+ cfg := &config.GraphQlConfig{}
+ if err := envconfig.Process("", cfg); err != nil {
+ log.Fatalf("Could not parse env Config: %v", err)
+ }
+ db, err := initializeDatabase(cfg)
+ if err != nil {
+ log.Fatalf("Failed to initialize database: %v", err)
+ }
+ resolver := graph.NewResolver(db)
+ port := os.Getenv("PORT")
+ if port == "" {
+ port = defaultPort
+ }
+
+ srv := handler.NewDefaultServer(graph.NewExecutableSchema(graph.Config{Resolvers: resolver}))
+
+ http.Handle("/", playground.Handler("GraphQL playground", "/query"))
+ http.Handle("/query", srv)
+
+ log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
+ log.Fatal(http.ListenAndServe(":"+port, nil))
+}
+
+func initializeDatabase(cfg *config.GraphQlConfig) (*sql.DB, error) {
+ var db *sql.DB
+ var err error
+ var config = &config.Config{
+ DbPort: cfg.DbPort,
+ DBAddress: cfg.DBAddress,
+ ClickHouseUsername: cfg.ClickHouseUsername,
+ ClickHousePassword: cfg.ClickHousePassword,
+ }
+ for i := 0; i < maxRetries; i++ {
+ _, db, err = clickhouse.NewDBClient(config)
+ if err == nil {
+ log.Println("Successfully connected to the database")
+ return db, nil
+ }
+ log.Printf("Failed to connect to database, retrying (%d/%d): %v", i+1, maxRetries, err)
+ time.Sleep(retryDelay)
+ }
+
+ // If the loop exits and the connection is not established, return the error
+ return nil, err
+}
diff --git a/model/gitbridge.go b/model/gitbridge.go
index 950af4b2..d69131e4 100644
--- a/model/gitbridge.go
+++ b/model/gitbridge.go
@@ -40,6 +40,6 @@ type GitCommonAttribute struct {
CommitUrl string
EventType string
RepoName string
- TimeStamp string
+ TimeStamp time.Time
Event string
}
diff --git a/model/jfrogcontainer.go b/model/jfrogcontainer.go
new file mode 100644
index 00000000..b6f1d38e
--- /dev/null
+++ b/model/jfrogcontainer.go
@@ -0,0 +1,18 @@
+package model
+
+type JfrogContainerPushEventPayload struct {
+ Domain string `json:"domain"`
+ EventType string `json:"event_type"`
+ Data struct {
+ RepoKey string `json:"repo_key"`
+ Path string `json:"path"`
+ Name string `json:"name"`
+ SHA256 string `json:"sha256"`
+ Size int32 `json:"size"`
+ ImageName string `json:"image_name"`
+ Tag string `json:"tag"`
+ } `json:"data"`
+ SubscriptionKey string `json:"subscription_key"`
+ JPDOrigin string `json:"jpd_origin"`
+ Source string `json:"source"`
+}
diff --git a/model/kuberhealthy.go b/model/kuberhealthy.go
new file mode 100644
index 00000000..7ce2f931
--- /dev/null
+++ b/model/kuberhealthy.go
@@ -0,0 +1,17 @@
+package model
+
+import "time"
+
+// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+type KuberhealthyCheckDetail struct {
+ CurrentUUID string `json:"currentUUID"`
+ CheckName string `json:"checkName"`
+ OK uint8 `json:"ok"`
+ Errors string `json:"errors"`
+ RunDuration string `json:"runDuration"`
+ Namespace string `json:"namespace"`
+ Node string `json:"node"`
+ LastRun time.Time `json:"lastRun"`
+ AuthoritativePod string `json:"authoritativePod"`
+}
diff --git a/model/kubescore.go b/model/kubescore.go
index 918ada85..fb63f747 100644
--- a/model/kubescore.go
+++ b/model/kubescore.go
@@ -1,8 +1,9 @@
package model
+import "github.com/zegl/kube-score/renderer/json_v2"
+
type KubeScoreRecommendations struct {
- ID string
- Namespace string
- Recommendations string
- ClusterName string
+ ID string
+ ClusterName string
+ Report []json_v2.ScoredObject
}
diff --git a/model/metrics.go b/model/metrics.go
index 9a462ef1..6e339720 100644
--- a/model/metrics.go
+++ b/model/metrics.go
@@ -7,4 +7,5 @@ type Metrics struct {
Type string
Event *v1.Event
ClusterName string
+ ImageName string
}
diff --git a/model/quay.go b/model/quay.go
new file mode 100644
index 00000000..f8ae495b
--- /dev/null
+++ b/model/quay.go
@@ -0,0 +1,10 @@
+package model
+
+type QuayImagePushPayload struct {
+ Name string `json:"name"`
+ Repository string `json:"repository"`
+ Namespace string `json:"namespace"`
+ DockerURL string `json:"docker_url"`
+ Homepage string `json:"homepage"`
+ UpdatedTags []string `json:"updated_tags"`
+}
diff --git a/model/trivy_sbom.go b/model/trivy_sbom.go
index bcbe7219..e763418c 100644
--- a/model/trivy_sbom.go
+++ b/model/trivy_sbom.go
@@ -1,59 +1,7 @@
package model
-import (
- "time"
-)
-
-type Reports struct {
- ID string
- Report Sbom
-}
-
type Sbom struct {
- Schema string `json:"$schema"`
- BomFormat string `json:"bomFormat"`
- SpecVersion string `json:"specVersion"`
- SerialNumber string `json:"serialNumber"`
- Version int `json:"version"`
- Metadata struct {
- Timestamp time.Time `json:"timestamp"`
- Tools []struct {
- Vendor string `json:"vendor"`
- Name string `json:"name"`
- Version string `json:"version"`
- } `json:"tools"`
- Component struct {
- BomRef string `json:"bom-ref"`
- Type string `json:"type"`
- Name string `json:"name"`
- Purl string `json:"purl"`
- Properties []struct {
- Name string `json:"name"`
- Value string `json:"value"`
- } `json:"properties"`
- } `json:"component"`
- } `json:"metadata"`
- Components []struct {
- BomRef string `json:"bom-ref"`
- Type string `json:"type"`
- Name string `json:"name"`
- Version string `json:"version"`
- Properties []struct {
- Name string `json:"name"`
- Value string `json:"value"`
- } `json:"properties"`
- Hashes []struct {
- Alg string `json:"alg"`
- Content string `json:"content"`
- } `json:"hashes,omitempty"`
- Licenses []struct {
- Expression string `json:"expression"`
- } `json:"licenses,omitempty"`
- Purl string `json:"purl,omitempty"`
- } `json:"components"`
- Dependencies []struct {
- Ref string `json:"ref"`
- DependsOn []string `json:"dependsOn"`
- } `json:"dependencies"`
- Vulnerabilities []interface{} `json:"vulnerabilities"`
+ ID string
+ ClusterName string
+ Report map[string]interface{}
}
diff --git a/pkg/mtlsnats/mtlsnats.go b/pkg/mtlsnats/mtlsnats.go
new file mode 100644
index 00000000..bc82cc94
--- /dev/null
+++ b/pkg/mtlsnats/mtlsnats.go
@@ -0,0 +1,97 @@
+package mtlsnats
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/kelseyhightower/envconfig"
+)
+
+type MtlsConfig struct {
+ CertificateFilePath string `envconfig:"CERT_FILE" default:""`
+ KeyFilePath string `envconfig:"KEY_FILE" default:""`
+ CAFilePath string `envconfig:"CA_FILE" default:""`
+ IsEnabled bool `envconfig:"ENABLE_MTLS_NATS" default:"false"`
+}
+
+func ReadMtlsCerts(certificateFilePath, keyFilePath, CAFilePath string) (certPEM, keyPEM, CACertPEM []byte, err error) {
+ certPEM, err = ReadMtlsFileContents(certificateFilePath)
+ if err != nil {
+ err = fmt.Errorf("error while reading cert file: %w", err)
+ return
+ }
+
+ keyPEM, err = ReadMtlsFileContents(keyFilePath)
+ if err != nil {
+ err = fmt.Errorf("error while reading key file: %w", err)
+ return
+ }
+
+ CACertPEM, err = ReadMtlsFileContents(CAFilePath)
+ if err != nil {
+ err = fmt.Errorf("error while reading CAcert file: %w", err)
+ return
+ }
+
+ return
+
+}
+
+func OpenMtlsCertFile(filepath string) (f *os.File, err error) {
+ f, err = os.Open(filepath)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to open mtls certificate file: %w", err)
+ }
+ return f, nil
+}
+
+func ReadMtlsFileContents(filePath string) ([]byte, error) {
+ file, err := OpenMtlsCertFile(filePath)
+ if err != nil {
+ return nil, err
+ }
+
+ defer file.Close()
+
+ contents, err := io.ReadAll(file)
+ if err != nil {
+ return nil, fmt.Errorf("Error while reading file %s:%w", filePath, err)
+ }
+
+ return contents, nil
+}
+
+func GetTlsConfig() (*tls.Config, error) {
+
+ var cfg MtlsConfig
+ err := envconfig.Process("", &cfg)
+
+ if err != nil {
+ return nil, fmt.Errorf("Unable to read mtls config %w", err)
+
+ }
+
+ certPEM, keyPEM, CACertPEM, err := ReadMtlsCerts(cfg.CertificateFilePath, cfg.KeyFilePath, cfg.CAFilePath)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to read mtls certificates %w", err)
+
+ }
+
+ cert, err := tls.X509KeyPair(certPEM, keyPEM)
+ if err != nil {
+ return nil, fmt.Errorf("Error loading X509 key pair from PEM: %w", err)
+ }
+
+ caCertPool := x509.NewCertPool()
+ caCertPool.AppendCertsFromPEM(CACertPEM)
+ tlsConfig := &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ RootCAs: caCertPool,
+ InsecureSkipVerify: false,
+ }
+
+ return tlsConfig, nil
+}
diff --git a/pkg/opentelemetry/opentelemetry.go b/pkg/opentelemetry/opentelemetry.go
new file mode 100644
index 00000000..201587b9
--- /dev/null
+++ b/pkg/opentelemetry/opentelemetry.go
@@ -0,0 +1,92 @@
+package opentelemetry
+
+import (
+ "context"
+ "log"
+
+ "github.com/kelseyhightower/envconfig"
+ "github.com/pkg/errors"
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/sdk/trace"
+ sdktrace "go.opentelemetry.io/otel/sdk/trace"
+)
+
+type Configurations struct {
+ ServiceName string `envconfig:"APPLICATION_NAME" default:"Kubviz"`
+ CollectorURL string `envconfig:"OPTEL_URL" default:"otelcollector.azureagent.optimizor.app:80"`
+ //IsEnabled bool `envconfig:"IS_OPTEL_ENABLED" default:"false"`
+}
+
+func GetConfigurations() (opteConfig *Configurations, err error) {
+ opteConfig = &Configurations{}
+ if err = envconfig.Process("", opteConfig); err != nil {
+ return nil, errors.WithStack(err)
+ }
+ return
+}
+
+func InitTracer() (*sdktrace.TracerProvider, error) {
+ ctx := context.Background()
+
+ config, err := GetConfigurations()
+ if err != nil {
+ log.Println("Unable to read open telemetry configurations")
+ return nil, err
+ }
+
+ // if !config.IsEnabled {
+ // return nil, nil
+ // }
+
+ headers := map[string]string{
+ "signoz-service-name": config.ServiceName,
+ }
+
+ client := otlptracegrpc.NewClient(
+ otlptracegrpc.WithEndpoint(config.CollectorURL),
+ otlptracegrpc.WithHeaders(headers),
+ otlptracegrpc.WithInsecure(),
+ )
+
+ exporter, err := otlptrace.New(ctx, client)
+ if err != nil {
+ log.Fatalf("failed to initialize exporter: %e", err)
+ }
+
+ res, err := resource.New(
+ ctx,
+ resource.WithAttributes(
+ attribute.String("service.name", config.ServiceName),
+ attribute.String("library.language", "go"),
+ ),
+ )
+ if err != nil {
+ log.Fatalf("failed to initialize resource: %e", err)
+ }
+
+ // Create the trace provider
+ tp := sdktrace.NewTracerProvider(
+ trace.WithSampler(trace.AlwaysSample()),
+ sdktrace.WithBatcher(exporter),
+ sdktrace.WithResource(res),
+ )
+
+ // Set the global trace provider
+ otel.SetTracerProvider(tp)
+
+ // Set the propagator
+ propagator := propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})
+ otel.SetTextMapPropagator(propagator)
+
+ return tp, nil
+}
+
+func BuildContext(ctx context.Context) context.Context {
+ newCtx, _ := context.WithCancel(ctx)
+ return newCtx
+}
diff --git a/script/wait-for-clickhouse.sh b/script/wait-for-clickhouse.sh
new file mode 100644
index 00000000..9d7b4692
--- /dev/null
+++ b/script/wait-for-clickhouse.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+CLICKHOUSE_HOST="${DB_ADDRESS}"
+CLICKHOUSE_PORT="${DB_PORT}"
+RETRY_INTERVAL=5
+MAX_RETRIES=60
+
+retry_count=0
+while [ $retry_count -lt $MAX_RETRIES ]; do
+ if nc -z -v -w5 $CLICKHOUSE_HOST $CLICKHOUSE_PORT; then
+ echo "ClickHouse is ready!"
+ exit 0
+ else
+ echo "Failed to connect to ClickHouse. Retrying in $RETRY_INTERVAL seconds..."
+ retry_count=$((retry_count + 1))
+ sleep $RETRY_INTERVAL
+ fi
+done
+
+echo "Failed to connect to ClickHouse after $MAX_RETRIES retries. Exiting."
+exit 1
diff --git a/sql/0000010_trivy_misconfig.down.sql b/sql/0000010_trivy_misconfig.down.sql
new file mode 100644
index 00000000..518dab21
--- /dev/null
+++ b/sql/0000010_trivy_misconfig.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS trivy_misconfig;
diff --git a/sql/0000010_trivy_misconfig.up.sql b/sql/0000010_trivy_misconfig.up.sql
new file mode 100644
index 00000000..11767e76
--- /dev/null
+++ b/sql/0000010_trivy_misconfig.up.sql
@@ -0,0 +1,23 @@
+CREATE TABLE IF NOT EXISTS trivy_misconfig (
+ id UUID,
+ cluster_name String,
+ namespace String,
+ kind String,
+ name String,
+ misconfig_id String,
+ misconfig_avdid String,
+ misconfig_type String,
+ misconfig_title String,
+ misconfig_desc String,
+ misconfig_msg String,
+ misconfig_query String,
+ misconfig_resolution String,
+ misconfig_severity String,
+ misconfig_status String,
+ EventTime DateTime('UTC'),
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
+
diff --git a/sql/0000011_trivyimage.down.sql b/sql/0000011_trivyimage.down.sql
new file mode 100644
index 00000000..54ee8f76
--- /dev/null
+++ b/sql/0000011_trivyimage.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS trivyimage;
diff --git a/sql/0000011_trivyimage.up.sql b/sql/0000011_trivyimage.up.sql
new file mode 100644
index 00000000..29e5ad1c
--- /dev/null
+++ b/sql/0000011_trivyimage.up.sql
@@ -0,0 +1,19 @@
+CREATE TABLE IF NOT EXISTS trivyimage (
+ id UUID,
+ cluster_name String,
+ artifact_name String,
+ vul_id String,
+ vul_pkg_id String,
+ vul_pkg_name String,
+ vul_installed_version String,
+ vul_fixed_version String,
+ vul_title String,
+ vul_severity String,
+ vul_published_date DateTime('UTC'),
+ vul_last_modified_date DateTime('UTC'),
+ EventTime DateTime('UTC'),
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
diff --git a/sql/0000012_dockerhubbuild.down.sql b/sql/0000012_dockerhubbuild.down.sql
new file mode 100644
index 00000000..2cc2ee1d
--- /dev/null
+++ b/sql/0000012_dockerhubbuild.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS dockerhubbuild;
diff --git a/sql/0000012_dockerhubbuild.up.sql b/sql/0000012_dockerhubbuild.up.sql
new file mode 100644
index 00000000..9050b61f
--- /dev/null
+++ b/sql/0000012_dockerhubbuild.up.sql
@@ -0,0 +1,13 @@
+CREATE TABLE IF NOT EXISTS dockerhubbuild (
+ PushedBy String,
+ ImageTag String,
+ RepositoryName String,
+ DateCreated String,
+ Owner String,
+ Event String,
+ EventTime DateTime('UTC'),
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
diff --git a/sql/0000013_azurecontainerpush.down.sql b/sql/0000013_azurecontainerpush.down.sql
new file mode 100644
index 00000000..3d043e15
--- /dev/null
+++ b/sql/0000013_azurecontainerpush.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS azurecontainerpush;
diff --git a/sql/0000013_azurecontainerpush.up.sql b/sql/0000013_azurecontainerpush.up.sql
new file mode 100644
index 00000000..e23c03a8
--- /dev/null
+++ b/sql/0000013_azurecontainerpush.up.sql
@@ -0,0 +1,14 @@
+CREATE TABLE IF NOT EXISTS azurecontainerpush (
+ RegistryURL String,
+ RepositoryName String,
+ Tag String,
+ ImageName String,
+ Event String,
+ Size Int32,
+ SHAID String,
+ EventTime DateTime('UTC'),
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
\ No newline at end of file
diff --git a/sql/0000014_quaycontainerpush.down.sql b/sql/0000014_quaycontainerpush.down.sql
new file mode 100644
index 00000000..e157e46a
--- /dev/null
+++ b/sql/0000014_quaycontainerpush.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS quaycontainerpush;
diff --git a/sql/0000014_quaycontainerpush.up.sql b/sql/0000014_quaycontainerpush.up.sql
new file mode 100644
index 00000000..1361d75d
--- /dev/null
+++ b/sql/0000014_quaycontainerpush.up.sql
@@ -0,0 +1,14 @@
+CREATE TABLE IF NOT EXISTS quaycontainerpush (
+ name String,
+ repository String,
+ nameSpace String,
+ dockerURL String,
+ homePage String,
+ tag String,
+ Event String,
+ EventTime DateTime('UTC'),
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
diff --git a/sql/0000015_trivysbom.down.sql b/sql/0000015_trivysbom.down.sql
new file mode 100644
index 00000000..af2515cb
--- /dev/null
+++ b/sql/0000015_trivysbom.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS trivysbom;
diff --git a/sql/0000015_trivysbom.up.sql b/sql/0000015_trivysbom.up.sql
new file mode 100644
index 00000000..cab4007f
--- /dev/null
+++ b/sql/0000015_trivysbom.up.sql
@@ -0,0 +1,16 @@
+CREATE TABLE IF NOT EXISTS trivysbom (
+ id UUID,
+ cluster_name String,
+ bom_format String,
+ serial_number String,
+ bom_ref String,
+ image_name String,
+ component_type String,
+ package_url String,
+ event_time DateTime('UTC'),
+ other_component_name String,
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
diff --git a/sql/0000016_azure_devops.down.sql b/sql/0000016_azure_devops.down.sql
new file mode 100644
index 00000000..e6227a49
--- /dev/null
+++ b/sql/0000016_azure_devops.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS azure_devops;
diff --git a/sql/0000016_azure_devops.up.sql b/sql/0000016_azure_devops.up.sql
new file mode 100644
index 00000000..040a25b1
--- /dev/null
+++ b/sql/0000016_azure_devops.up.sql
@@ -0,0 +1,14 @@
+CREATE TABLE IF NOT EXISTS azure_devops (
+ Author String,
+ Provider String,
+ CommitID String,
+ CommitUrl String,
+ EventType String,
+ RepoName String,
+ TimeStamp DateTime('UTC'),
+ Event String,
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
diff --git a/sql/0000017_github.down.sql b/sql/0000017_github.down.sql
new file mode 100644
index 00000000..dd91d720
--- /dev/null
+++ b/sql/0000017_github.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS github;
diff --git a/sql/0000017_github.up.sql b/sql/0000017_github.up.sql
new file mode 100644
index 00000000..b10088c6
--- /dev/null
+++ b/sql/0000017_github.up.sql
@@ -0,0 +1,14 @@
+CREATE TABLE IF NOT EXISTS github (
+ Author String,
+ Provider String,
+ CommitID String,
+ CommitUrl String,
+ EventType String,
+ RepoName String,
+ TimeStamp DateTime('UTC'),
+ Event String,
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
diff --git a/sql/0000018_gitlab.down.sql b/sql/0000018_gitlab.down.sql
new file mode 100644
index 00000000..769364c9
--- /dev/null
+++ b/sql/0000018_gitlab.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS gitlab;
diff --git a/sql/0000018_gitlab.up.sql b/sql/0000018_gitlab.up.sql
new file mode 100644
index 00000000..3fde07fc
--- /dev/null
+++ b/sql/0000018_gitlab.up.sql
@@ -0,0 +1,14 @@
+CREATE TABLE IF NOT EXISTS gitlab (
+ Author String,
+ Provider String,
+ CommitID String,
+ CommitUrl String,
+ EventType String,
+ RepoName String,
+ TimeStamp DateTime('UTC'),
+ Event String,
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
diff --git a/sql/0000019_bitbucket.down.sql b/sql/0000019_bitbucket.down.sql
new file mode 100644
index 00000000..b0df312d
--- /dev/null
+++ b/sql/0000019_bitbucket.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS bitbucket;
diff --git a/sql/0000019_bitbucket.up.sql b/sql/0000019_bitbucket.up.sql
new file mode 100644
index 00000000..62c2adb6
--- /dev/null
+++ b/sql/0000019_bitbucket.up.sql
@@ -0,0 +1,14 @@
+CREATE TABLE IF NOT EXISTS bitbucket (
+ Author String,
+ Provider String,
+ CommitID String,
+ CommitUrl String,
+ EventType String,
+ RepoName String,
+ TimeStamp DateTime('UTC'),
+ Event String,
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
diff --git a/sql/000001_events.down.sql b/sql/000001_events.down.sql
new file mode 100644
index 00000000..653c7400
--- /dev/null
+++ b/sql/000001_events.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS events;
diff --git a/sql/000001_events.up.sql b/sql/000001_events.up.sql
new file mode 100644
index 00000000..8905854f
--- /dev/null
+++ b/sql/000001_events.up.sql
@@ -0,0 +1,20 @@
+CREATE TABLE IF NOT EXISTS events (
+ ClusterName String,
+ Id String,
+ EventTime DateTime('UTC'),
+ OpType String,
+ Name String,
+ Namespace String,
+ Kind String,
+ Message String,
+ Reason String,
+ Host String,
+ Event String,
+ ImageName String,
+ FirstTime String,
+ LastTime String,
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
diff --git a/sql/0000020_gitea.down.sql b/sql/0000020_gitea.down.sql
new file mode 100644
index 00000000..9c24ca85
--- /dev/null
+++ b/sql/0000020_gitea.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS gitea;
diff --git a/sql/0000020_gitea.up.sql b/sql/0000020_gitea.up.sql
new file mode 100644
index 00000000..6767a05b
--- /dev/null
+++ b/sql/0000020_gitea.up.sql
@@ -0,0 +1,14 @@
+CREATE TABLE IF NOT EXISTS gitea (
+ Author String,
+ Provider String,
+ CommitID String,
+ CommitUrl String,
+ EventType String,
+ RepoName String,
+ TimeStamp DateTime('UTC'),
+ Event String,
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
diff --git a/sql/0000021_kuberhealthy.down.sql b/sql/0000021_kuberhealthy.down.sql
new file mode 100644
index 00000000..e69f5b26
--- /dev/null
+++ b/sql/0000021_kuberhealthy.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS kuberhealthy;
diff --git a/sql/0000021_kuberhealthy.up.sql b/sql/0000021_kuberhealthy.up.sql
new file mode 100644
index 00000000..3462ca6c
--- /dev/null
+++ b/sql/0000021_kuberhealthy.up.sql
@@ -0,0 +1,15 @@
+CREATE TABLE IF NOT EXISTS kuberhealthy (
+ CurrentUUID String,
+ CheckName String,
+ OK UInt8,
+ Errors String,
+ RunDuration String,
+ Namespace String,
+ Node String,
+ LastRun DateTime('UTC'),
+ AuthoritativePod String,
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
diff --git a/sql/000002_rakkess.down.sql b/sql/000002_rakkess.down.sql
new file mode 100644
index 00000000..e182c3cb
--- /dev/null
+++ b/sql/000002_rakkess.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS rakkess;
diff --git a/sql/000002_rakkess.up.sql b/sql/000002_rakkess.up.sql
new file mode 100644
index 00000000..d4b66dc2
--- /dev/null
+++ b/sql/000002_rakkess.up.sql
@@ -0,0 +1,14 @@
+CREATE TABLE IF NOT EXISTS rakkess (
+ ClusterName String,
+ Name String,
+ Create String,
+ Delete String,
+ List String,
+ Update String,
+ EventTime DateTime('UTC'),
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
+
diff --git a/sql/000003_DeprecatedAPIs.down.sql b/sql/000003_DeprecatedAPIs.down.sql
new file mode 100644
index 00000000..af77c59a
--- /dev/null
+++ b/sql/000003_DeprecatedAPIs.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS DeprecatedAPIs;
diff --git a/sql/000003_DeprecatedAPIs.up.sql b/sql/000003_DeprecatedAPIs.up.sql
new file mode 100644
index 00000000..c8aa1f76
--- /dev/null
+++ b/sql/000003_DeprecatedAPIs.up.sql
@@ -0,0 +1,14 @@
+CREATE TABLE IF NOT EXISTS DeprecatedAPIs (
+ ClusterName String,
+ ObjectName String,
+ Description String,
+ Kind String,
+ Deprecated UInt8,
+ Scope String,
+ EventTime DateTime('UTC'),
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
+
diff --git a/sql/000004_DeletedAPIs.down.sql b/sql/000004_DeletedAPIs.down.sql
new file mode 100644
index 00000000..e0157f5e
--- /dev/null
+++ b/sql/000004_DeletedAPIs.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS DeletedAPIs;
diff --git a/sql/000004_DeletedAPIs.up.sql b/sql/000004_DeletedAPIs.up.sql
new file mode 100644
index 00000000..dbfe3360
--- /dev/null
+++ b/sql/000004_DeletedAPIs.up.sql
@@ -0,0 +1,15 @@
+CREATE TABLE IF NOT EXISTS DeletedAPIs (
+ ClusterName String,
+ ObjectName String,
+ Group String,
+ Kind String,
+ Version String,
+ Name String,
+ Deleted UInt8,
+ Scope String,
+ EventTime DateTime('UTC'),
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
diff --git a/sql/000005_jfrogcontainerpush.down.sql b/sql/000005_jfrogcontainerpush.down.sql
new file mode 100644
index 00000000..03c41863
--- /dev/null
+++ b/sql/000005_jfrogcontainerpush.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS jfrogcontainerpush;
diff --git a/sql/000005_jfrogcontainerpush.up.sql b/sql/000005_jfrogcontainerpush.up.sql
new file mode 100644
index 00000000..4f47f001
--- /dev/null
+++ b/sql/000005_jfrogcontainerpush.up.sql
@@ -0,0 +1,17 @@
+CREATE TABLE IF NOT EXISTS jfrogcontainerpush (
+ Domain String,
+ EventType String,
+ RegistryURL String,
+ RepositoryName String,
+ SHAID String,
+ Size Int32,
+ ImageName String,
+ Tag String,
+ Event String,
+ EventTime DateTime('UTC'),
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
+
diff --git a/sql/000006_getall_resources.down.sql b/sql/000006_getall_resources.down.sql
new file mode 100644
index 00000000..866aaa72
--- /dev/null
+++ b/sql/000006_getall_resources.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS getall_resources;
diff --git a/sql/000006_getall_resources.up.sql b/sql/000006_getall_resources.up.sql
new file mode 100644
index 00000000..b77b322c
--- /dev/null
+++ b/sql/000006_getall_resources.up.sql
@@ -0,0 +1,13 @@
+CREATE TABLE IF NOT EXISTS getall_resources (
+ ClusterName String,
+ Namespace String,
+ Kind String,
+ Resource String,
+ Age String,
+ EventTime DateTime('UTC'),
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
+
diff --git a/sql/000007_outdated_images.down.sql b/sql/000007_outdated_images.down.sql
new file mode 100644
index 00000000..4741b3c3
--- /dev/null
+++ b/sql/000007_outdated_images.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS outdated_images;
diff --git a/sql/000007_outdated_images.up.sql b/sql/000007_outdated_images.up.sql
new file mode 100644
index 00000000..f89c6132
--- /dev/null
+++ b/sql/000007_outdated_images.up.sql
@@ -0,0 +1,15 @@
+CREATE TABLE IF NOT EXISTS outdated_images (
+ ClusterName String,
+ Namespace String,
+ Pod String,
+ CurrentImage String,
+ CurrentTag String,
+ LatestVersion String,
+ VersionsBehind Int64,
+ EventTime DateTime('UTC'),
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
+
diff --git a/sql/000008_kubescore.down.sql b/sql/000008_kubescore.down.sql
new file mode 100644
index 00000000..db35ad56
--- /dev/null
+++ b/sql/000008_kubescore.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS kubescore;
diff --git a/sql/000008_kubescore.up.sql b/sql/000008_kubescore.up.sql
new file mode 100644
index 00000000..db4086f1
--- /dev/null
+++ b/sql/000008_kubescore.up.sql
@@ -0,0 +1,21 @@
+CREATE TABLE IF NOT EXISTS kubescore (
+ id UUID,
+ clustername String,
+ object_name String,
+ kind String,
+ apiVersion String,
+ name String,
+ namespace String,
+ target_type String,
+ description String,
+ path String,
+ summary String,
+ file_name String,
+ file_row BIGINT,
+ EventTime DateTime('UTC'),
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
+
diff --git a/sql/000009_trivy_vul.down.sql b/sql/000009_trivy_vul.down.sql
new file mode 100644
index 00000000..52940cbf
--- /dev/null
+++ b/sql/000009_trivy_vul.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS trivy_vul;
diff --git a/sql/000009_trivy_vul.up.sql b/sql/000009_trivy_vul.up.sql
new file mode 100644
index 00000000..808a909f
--- /dev/null
+++ b/sql/000009_trivy_vul.up.sql
@@ -0,0 +1,23 @@
+CREATE TABLE IF NOT EXISTS trivy_vul (
+ id UUID,
+ cluster_name String,
+ namespace String,
+ kind String,
+ name String,
+ vul_id String,
+ vul_vendor_ids String,
+ vul_pkg_id String,
+ vul_pkg_name String,
+ vul_pkg_path String,
+ vul_installed_version String,
+ vul_fixed_version String,
+ vul_title String,
+ vul_severity String,
+ vul_published_date DateTime('UTC'),
+ vul_last_modified_date DateTime('UTC'),
+ ExpiryDate DateTime DEFAULT now() + INTERVAL {{.TTLValue}} {{.TTLUnit}},
+ ExportedAt DateTime DEFAULT NULL
+) ENGINE = MergeTree()
+ORDER BY ExpiryDate
+TTL ExpiryDate;
+
diff --git a/steps-to-test.txt b/steps-to-test.txt
index 66e17f5a..04d60b68 100644
--- a/steps-to-test.txt
+++ b/steps-to-test.txt
@@ -33,4 +33,134 @@ docker tag ubuntu:latest localhost:5001/ubuntu:v1
docker push localhost:5001/ubuntu:v1
-# test commit
+
+tables connected with namespace in common: outdated , kubescore , resources
+
+sample query:
+query {
+ allNamespaceData {
+ namespace
+ outdatedImages {
+ clusterName
+ namespace
+ pod
+ currentImage
+ currentTag
+ latestVersion
+ versionsBehind
+ eventTime
+ }
+ kubeScores {
+ id
+ clusterName
+ objectName
+ kind
+ apiVersion
+ name
+ namespace
+ targetType
+ description
+ path
+ summary
+ fileName
+ fileRow
+ eventTime
+ }
+ resources {
+ clusterName
+ namespace
+ kind
+ resource
+ age
+ eventTime
+ }
+ }
+ }
+
+
+ sample response:
+
+ {
+ "data": {
+ "allNamespaceData": [
+ {
+ "namespace": "namespace1",
+ "outdatedImages": [
+ {
+ "clusterName": "cluster1",
+ "namespace": "namespace1",
+ "pod": "pod1",
+ "currentImage": "image1:v1",
+ "currentTag": "v1",
+ "latestVersion": "v2",
+ "versionsBehind": 1,
+ "eventTime": "2022-01-01T12:00:00Z"
+ },
+ // ... more outdatedImages for namespace1 ...
+ ],
+ "kubeScores": [
+ {
+ "id": "ks1",
+ "clusterName": "cluster1",
+ "objectName": "object1",
+ "kind": "Deployment",
+ "apiVersion": "v1",
+ "name": "deployment1",
+ "namespace": "namespace1",
+ "targetType": "type1",
+ "description": "description1",
+ "path": "path1",
+ "summary": "summary1",
+ "fileName": "file1",
+ "fileRow": 10,
+ "eventTime": "2022-01-01T12:00:00Z"
+ },
+ // ... more kubeScores for namespace1 ...
+ ],
+ "resources": [
+ {
+ "clusterName": "cluster1",
+ "namespace": "namespace1",
+ "kind": "Pod",
+ "resource": "pod1",
+ "age": "10d",
+ "eventTime": "2022-01-01T12:00:00Z"
+ },
+ // ... more resources for namespace1 ...
+ ]
+ },
+ // ... more namespace data objects ...
+ ]
+ }
+ }
+
+ ...
+
+
+
+ resources - clusterName, namespace
+
+
+ MOM:
+ 1. filter by cluster name in the unique namespaces. - Vijesh
+ 2.Role id - is single (it is missed in create group request) - Anila
+ 3. Total no of users in group response is missing - get group - Nithu
+ 4. Role details missing in get group users response - Nithu
+ 5. Update group --> combine both the apis - vijesh (need to discuss with Iyappan)
+
+
+
+ DONE:
+ outdated
+ events
+
+ NOT DONE:
+ trivy_vul -
+ trivy_misconfig
+ trivyimage - only cluster name filter
+ kubescore
+ getall_resources
+ DeletedAPIs - by cluster name
+ depricated - by cluster name
+
+graphql and other docker files included data
\ No newline at end of file