From 1872670a402bc49e7c8648844e25be94910b77a5 Mon Sep 17 00:00:00 2001 From: Massimiliano Ribuoli Date: Mon, 27 Oct 2025 16:57:21 +0100 Subject: [PATCH 1/5] Update vmware_exporter.py --- vmware_exporter/vmware_exporter.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vmware_exporter/vmware_exporter.py b/vmware_exporter/vmware_exporter.py index e497fa2..de3734f 100755 --- a/vmware_exporter/vmware_exporter.py +++ b/vmware_exporter/vmware_exporter.py @@ -738,6 +738,7 @@ def vm_inventory(self): 'runtime.host', 'parent', 'summary.config.vmPathName', + 'guest.ipAddress', ] if self.collect_only['vms'] is True: From 507831edb15eb421509801b94dc4edc03f22384e Mon Sep 17 00:00:00 2001 From: Massimiliano Ribuoli Date: Mon, 27 Oct 2025 17:32:21 +0100 Subject: [PATCH 2/5] Update project --- .github/workflows/docker-push.yml | 2 +- .pre-commit-config.yaml | 2 +- Dockerfile | 2 +- README.md | 18 +++++++++--------- catalog-info.yaml | 2 +- docker-compose.yml | 2 +- kubernetes/vmware-exporter.yml | 2 +- setup.py | 4 ++-- 8 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/docker-push.yml b/.github/workflows/docker-push.yml index 3d0bf4c..4f8f501 100644 --- a/.github/workflows/docker-push.yml +++ b/.github/workflows/docker-push.yml @@ -15,7 +15,7 @@ jobs: id: build-image uses: redhat-actions/buildah-build@v2 with: - image: pryorda/vmware_exporter + image: maxetiqua/vmware_exporter tags: latest ${{ github.sha }} ${{ github.event.release.tag_name }} dockerfiles: | ./Dockerfile diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 37e87b2..8ec9158 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -41,7 +41,7 @@ repos: stages: [commit] args: - --ignore=F705,E123,E402 -- repo: https://github.com/pryorda/dockerfilelint-precommit-hooks +- repo: https://github.com/maxetiqua/dockerfilelint-precommit-hooks rev: v0.1.0 hooks: - id: dockerfilelint diff --git a/Dockerfile b/Dockerfile index 4ade0e9..3fe82a3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ FROM python:3.7-alpine -LABEL MAINTAINER="Daniel Pryor " +LABEL MAINTAINER="Max Etiqua " LABEL NAME=vmware_exporter WORKDIR /opt/vmware_exporter/ diff --git a/README.md b/README.md index 5215eaa..9968275 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Needs new home. Since Broadcom is no longer catering to its opensource/free base, I have decided to move on from maintaining this project. Please create a discussion if you would like to take over managing this project. +# Needs new home. Since Broadcom is no longer catering to its opensource/free base, I have decided to move on from maintaining this project. Please create a discussion if you would like to take over managing this project. # vmware_exporter @@ -12,12 +12,12 @@ Get VMware vCenter information: - Snapshot Unix timestamp creation date ## Badges -![Docker Stars](https://img.shields.io/docker/stars/pryorda/vmware_exporter.svg) -![Docker Pulls](https://img.shields.io/docker/pulls/pryorda/vmware_exporter.svg) -![Docker Automated](https://img.shields.io/docker/automated/pryorda/vmware_exporter.svg) +![Docker Stars](https://img.shields.io/docker/stars/maxetiqua/vmware_exporter.svg) +![Docker Pulls](https://img.shields.io/docker/pulls/maxetiqua/vmware_exporter.svg) +![Docker Automated](https://img.shields.io/docker/automated/maxetiqua/vmware_exporter.svg) -[![Travis Build Status](https://travis-ci.org/pryorda/vmware_exporter.svg?branch=master)](https://travis-ci.org/pryorda/vmware_exporter) -![Docker Build](https://img.shields.io/docker/build/pryorda/vmware_exporter.svg) +[![Travis Build Status](https://travis-ci.org/maxetiqua/vmware_exporter.svg?branch=master)](https://travis-ci.org/maxetiqua/vmware_exporter) +![Docker Build](https://img.shields.io/docker/build/maxetiqua/vmware_exporter.svg) [![Join the chat at https://gitter.im/vmware_exporter/community](https://badges.gitter.im/vmware_exporter/community.svg)](https://gitter.im/vmware_exporter/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) ## Usage @@ -32,13 +32,13 @@ Get VMware vCenter information: Alternatively, if you don't wish to install the package, run it using `$ vmware_exporter/vmware_exporter.py` or use the following docker command: ``` -docker run -it --rm -p 9272:9272 -e VSPHERE_USER=${VSPHERE_USERNAME} -e VSPHERE_PASSWORD=${VSPHERE_PASSWORD} -e VSPHERE_HOST=${VSPHERE_HOST} -e VSPHERE_IGNORE_SSL=True -e VSPHERE_SPECS_SIZE=2000 --name vmware_exporter pryorda/vmware_exporter +docker run -it --rm -p 9272:9272 -e VSPHERE_USER=${VSPHERE_USERNAME} -e VSPHERE_PASSWORD=${VSPHERE_PASSWORD} -e VSPHERE_HOST=${VSPHERE_HOST} -e VSPHERE_IGNORE_SSL=True -e VSPHERE_SPECS_SIZE=2000 --name vmware_exporter maxetiqua/vmware_exporter ``` When using containers combined with `--env-file` flag, please use capital letters to set bolleans, for example: ``` -$ podman run -it --rm -p 9272:9272 --name vmware_exporter --env-file config.env pryorda/vmware_exporter +$ podman run -it --rm -p 9272:9272 --name vmware_exporter --env-file config.env maxetiqua/vmware_exporter $ cat config.env VSPHERE_USER=administrator@vsphere.my.domain.com VSPHERE_PASSWORD=Secure-Pass @@ -269,7 +269,7 @@ Forked from https://github.com/rverchere/vmware_exporter. I removed the fork so ## Maintainer -Daniel Pryor [pryorda](https://github.com/pryorda) +Max Etiqua [maxetiqua](https://github.com/maxetiqua) ## License diff --git a/catalog-info.yaml b/catalog-info.yaml index 3aa0124..f732615 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -3,7 +3,7 @@ kind: Component metadata: name: vmware_exporter annotations: - github.com/project-slug: pryorda/vmware_exporter + github.com/project-slug: maxetiqua/vmware_exporter spec: type: service lifecycle: unknown diff --git a/docker-compose.yml b/docker-compose.yml index 64d4dc3..99064bf 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: '2' services: vmware_exporter: # Using the latest tag, but you can use vers(v0.9.5 for example - image: pryorda/vmware_exporter:latest + image: maxetiqua/vmware_exporter:latest ports: - "9275:9272" environment: diff --git a/kubernetes/vmware-exporter.yml b/kubernetes/vmware-exporter.yml index b3b8831..e8b85b3 100644 --- a/kubernetes/vmware-exporter.yml +++ b/kubernetes/vmware-exporter.yml @@ -18,7 +18,7 @@ spec: spec: containers: - name: vmware-exporter - image: "pryorda/vmware_exporter:latest" + image: "maxetiqua/vmware_exporter:latest" imagePullPolicy: Always ports: - containerPort: 9272 diff --git a/setup.py b/setup.py index e7f2ecd..e2a4697 100644 --- a/setup.py +++ b/setup.py @@ -8,8 +8,8 @@ description='VMWare VCenter Exporter for Prometheus', long_description=open('README.md').read(), long_description_content_type="text/markdown", - url='https://github.com/pryorda/vmware_exporter', - download_url=("https://github.com/pryorda/vmware_exporter/tarball/%s" % + url='https://github.com/maxetiqua/vmware_exporter', + download_url=("https://github.com/maxetiqua/vmware_exporter/tarball/%s" % vmware_exporter.__version__), keywords=['VMWare', 'VCenter', 'Prometheus'], license=vmware_exporter.__license__, From 53c09c37877137570293e141f0b517702971115f Mon Sep 17 00:00:00 2001 From: Massimiliano Ribuoli Date: Mon, 27 Oct 2025 19:37:44 +0100 Subject: [PATCH 3/5] Fallback to 0.11.1 --- .flake8 | 1 - .github/workflows/docker-push.yml | 36 ----------- .github/workflows/python-package.yml | 84 ------------------------ .pre-commit-config.yaml | 13 +--- .travis.yml | 56 ++++++++++++++++ CHANGELOG.md | 50 --------------- Dockerfile | 6 +- README.md | 51 +++------------ catalog-info.yaml | 10 --- docker-compose.yml | 2 +- kubernetes/vmware-exporter.yml | 2 +- openshift/README.md | 27 -------- openshift/configmap.yaml | 19 ------ openshift/deployment.yaml | 47 -------------- openshift/rolebinding.yaml | 30 --------- openshift/service.yaml | 17 ----- openshift/servicemonitor.yaml | 19 ------ requirements-tests.txt | 11 ++-- requirements.txt | 2 +- setup.cfg | 1 - setup.py | 4 +- tests/unit/test_helpers.py | 15 ----- tests/unit/test_vmware_exporter.py | 40 ++---------- validate-signature.rb | 19 ------ vmware_exporter/__init__.py | 2 +- vmware_exporter/defer.py | 3 +- vmware_exporter/helpers.py | 95 +--------------------------- vmware_exporter/vmware_exporter.py | 2 +- 28 files changed, 90 insertions(+), 574 deletions(-) create mode 100644 .travis.yml diff --git a/.flake8 b/.flake8 index e7c5cb3..6deafc2 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,2 @@ [flake8] -ignore = E402 max-line-length = 120 diff --git a/.github/workflows/docker-push.yml b/.github/workflows/docker-push.yml index 4f8f501..e69de29 100644 --- a/.github/workflows/docker-push.yml +++ b/.github/workflows/docker-push.yml @@ -1,36 +0,0 @@ -name: Build and Push Image -on: - release: - types: [released] - -jobs: - build: - name: Build and push image - runs-on: ubuntu-20.04 - - steps: - - uses: actions/checkout@v2 - - - name: Build Image - id: build-image - uses: redhat-actions/buildah-build@v2 - with: - image: maxetiqua/vmware_exporter - tags: latest ${{ github.sha }} ${{ github.event.release.tag_name }} - dockerfiles: | - ./Dockerfile - - # Podman Login action (https://github.com/redhat-actions/podman-login) also be used to log in, - # in which case 'username' and 'password' can be omitted. - - name: Push To dhub - id: push-to-dhub - uses: redhat-actions/push-to-registry@v2 - with: - image: ${{ steps.build-image.outputs.image }} - tags: ${{ steps.build-image.outputs.tags }} - registry: docker.io - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Print image url - run: echo "Image pushed to ${{ steps.push-to-dhub.outputs.registry-paths }}" diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 5c5a328..e69de29 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -1,84 +0,0 @@ -# This workflow will upload a Python Package using Twine when a release is created -# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries - -name: Python Package - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] - -jobs: - build: - if: github.event_name == 'pull_request' - runs-on: ubuntu-latest - - strategy: - matrix: - python-version: [3.7] - - steps: - - uses: actions/checkout@v2 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.sha }} - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -e . -r requirements.txt -r requirements-tests.txt python-semantic-release - sudo apt-get update && sudo apt-get install -y nodejs npm && sudo npm install -g dockerfilelint - - name: Lint with flake8 - run: | - # stop the build if there are Python syntax errors or undefined names - flake8 vmware_exporter tests --statistics - - name: Lint dockerfile - run: dockerfilelint ./Dockerfile - - name: Test with pytest - run: | - pytest --cov=. -v tests/unit - - uses: codecov/codecov-action@v1 - - name: integration tests - run: | - pytest tests/integration - - name: Setup ruby env - uses: actions/setup-ruby@v1 - with: - ruby-version: 2.7 - - run: | - gem install pre-commit-sign - export COMMIT_MESSAGE=$(git log -1) - ruby validate-signature.rb "${COMMIT_MESSAGE}" - - deploy: - strategy: - matrix: - python-version: [3.7] - - runs-on: ubuntu-latest - if: ${{ github.ref == 'refs/heads/main' && github.event_name == 'push' }} - - steps: - - uses: actions/checkout@v2 - with: - token: ${{ secrets.GH_TOKEN }} - fetch-depth: 0 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - name: install semantic-release - run: | - pip install python-semantic-release - - name: deploy pip - run: | - git config --global user.name "semantic-release (via github actions)" - git config --global user.email "semantic-release@github-actions" - semantic-release publish - env: # Or as an environment variable - GH_TOKEN: ${{ secrets.GH_TOKEN }} - PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8ec9158..e736368 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,8 +1,4 @@ repos: -- repo: https://github.com/pre-commit/mirrors-autopep8 - rev: v1.5.6 # Use the sha / tag you want to point at - hooks: - - id: autopep8 - repo: https://github.com/pre-commit/pre-commit-hooks rev: v1.3.0 hooks: @@ -16,8 +12,6 @@ repos: stages: [commit] - id: detect-aws-credentials stages: [commit] - args: - - --allow-missing-credentials # Generic file state - id: trailing-whitespace stages: [commit] @@ -40,14 +34,13 @@ repos: - id: flake8 stages: [commit] args: - - --ignore=F705,E123,E402 -- repo: https://github.com/maxetiqua/dockerfilelint-precommit-hooks + - --ignore=F705,E123 +- repo: https://github.com/pryorda/dockerfilelint-precommit-hooks rev: v0.1.0 hooks: - id: dockerfilelint stages: [commit] - repo: https://github.com/mattlqx/pre-commit-sign - rev: v1.1.3 + rev: v1.1.1 hooks: - id: sign-commit - stages: [commit-msg] diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..c7328df --- /dev/null +++ b/.travis.yml @@ -0,0 +1,56 @@ +--- + +language: python +dist: xenial # required for Python >= 3.7 +sudo: required +cache: + directories: + - "$HOME/.cache/pip" + - "$HOME/.pyenv" +matrix: + include: + - name: "Python 3.6 Unit-test" + python: 3.6 + - name: "Python 3.7 Unit-test" + python: 3.7 + - name: "Python 3.7 Docker Unit and Integration-test" + python: 3.7 + services: + - docker + env: + - BUILD_SDIST=true + - INTEGRATION_TEST=true + +install: + - pip install -e . -r requirements.txt -r requirements-tests.txt + - grep -Rl packagecloud /etc/apt/ |sudo xargs rm -vf + - sudo apt-get update && sudo apt-get install -y nodejs npm + - npm install -g dockerfilelint + - if [ $INTEGRATION_TEST == "true" ]; then + pip install python-semantic-release; + fi + +script: + - dockerfilelint ./Dockerfile + - flake8 vmware_exporter tests + - pytest --cov=. -v tests/unit + - if [ $INTEGRATION_TEST == "true" ]; then + pytest tests/integration; + else + echo "Skipped Integration-Tests"; + fi + +after_success: + - codecov + - if [ $INTEGRATION_TEST == "true" ]; then + git config --global user.name "semantic-release (via TravisCI)"; + git config --global user.email "semantic-release@travis"; + semantic-release publish; + semantic-release changelog --post; + fi + +env: + global: + - secure: qC8DzqyxBiBBBUGLuNOO2O6XgmUaLhTi1ArfhYxVDjv5vTJvCNO1DpXfFqXJGR3wrRKe1DOddXildOEL7d7xX6Y3b+k3urnPfqnAM/Hvr0djqDWRPuZl89Fg1mrAvBV3LL/ZsrWKHDc3keD/vTcPAsrnJl3MdS5fMKBwEuDmhpnNdGk0Wty357WxWBaz1JYBoGaqbw0JWAAA5UdfyF3aOybRONXcVqnxqSeR43drBLSdAwBJVVhyv3IYX3HRMCP8vXz43LqWK16z8KVPyrwwzE2WXlq+2FypLtM4A/XOssx4M77MefIMC6ydAimLh33gYtaiuO7+fYipLlOlChNO/pxX43FM+7UuKWaL9MWaUvypmOGo+jp/y6PzbIDz/RH4T8mjaswIe22nyTR/Gbtph/58CISGGEtRNKXjE4/OD1gjodcNS2NkwNQimxjHv30rH4X4Z+JYU+I3iobpmWJ5WJsyMnS3jWkRluRgA+8djuRviK4sdIBwk2qC2AGuZ1c6EJbEN7f3sRIgtQMYd1JAiZVwYmRa08hpulvrZAzxqx/PfkaEOBFQrBwISkD2Oq3MGlDSV+sUe53TmRM1KO9y5fC8XnJHRJv5mw+iWaEIImP2muXufdm101oXNzHbs3hC2kYIFD+u1BoVDgWACUIkj2oRxMJ4p3ynlbccALUc8y0= + - secure: GWTsOe1CGvEi/n7k+c91UisQAf7UcofZKeO8EI6n0abRPQii0rfMIQ7fS2KFUyR7I/nk02YrZcd1etAGTVthc9K3YrUhM68eu+ztlUX6Sr5SM6PseYWkgf+G0I+IIoCpqSjwL/9J4jTP1hvNlP7nofUbo43OyEEX7ndNr1r8Ab0Zu48VYFj3sb3whMovakEVH5SIUe001Jix4wIZ9TYUNnSReNzMb8M+s/xK+RUnT3SYYutBbw0Dc+wTaaW/A5g1kSK35VZZ7OWN+l1Sc0xCa9tfJYKuxXFuyKmBRe3z1WUk+4yQxbGKQ0s47LpICo2qKm2iYoKVoWAGXBtWKCbLDoVy1tRTE3bcBMFbNsHXHT4+1F6djJjfr8ndFAoLpXyVBPsD3/ik8k7GzC7Gvam1fzCisjPDZFbdUpUCXU5HDrknB6yKNvekkltgkGajS0rzY6UPPiUdGccoXMngrS8FhD7zPxqfCtbK9WYBM5yXZ/kCgCqy0+dMu0+1iUR8h4hoQxonp2c7cAEnHMcd90o0IK8NEZMNpu5b/F+34/wj3cy+QYpE99pF5ztnPQfzUPZzR7rAl/TzesDLFVQceykYNczoD1YDMUFareV+nyzZp/W4gECaRkqgZxxfYcSUXQCaQfHWSvSdHHLaoIMzQi7lL1c6YxmWx8UireQEChbKfyw= + - secure: sbOF44qJH2JyblAVIgbVZ/xmIuF9+ypzmyB6EOvbMagAdcySxYEz7BIadIMtP8wWGM2TZQKFxUTJhJblSBbKAKdC/6o6RLUy/MxyCShJafr9NkBhhbLIzYdVGOC5TjswLBXtvNdTbyUgEu6NQnkBjVoBm8FXfhWxRWzJ7SJHSJYMOB8Fnr7gjxiMbenQUJYfRtVowmExrHq0WxyvNMxqKEW4qWFU3AwAZEc7M5hvf+Upju/rnHsBRZrm/RvfRdhQnAXPVaSJXMzy5Msa6KheqswczSSlLTgpUxCF412EXD2rZsjG3sRFMLnokVw983z2Kw4ov3Y7eYHBpmyz7wg9ICD4TdlbbAPJJIgY7lZzYYtt2sQ6DhLwsNZyCCXBGn6b8tmNiOXdRkZJU7aAW6yuXDSx6RZCRNfn4JFFt1o9yzEbea6oKfovB7cy/3d3ty4+3ud9ozXB9G/m7UzsjCvry7gIxEv2rVYtdNeOvslyftXXXzPl4IRJv99WcSmum0nuOpQrlvOzq+1yQNa3ImCRBkFmqUUa34FObzIaGegq4fD3kMKz9N+impQRvNI9WiDqtRvBJTCaP28ibUainxslW4Ep4rcmWo6synf2q6aWTrUPxXl0jvd6eIuCCvWPH7uLXZhL0dYm5ID/9BOCd3TZcvVjBKW0xWkFafNyt2USJto= diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c351d0..e3af55b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,56 +2,6 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/) and [Keep a changelog](https://github.com/olivierlacan/keep-a-changelog). - - -## v0.18.4 (2022-10-11) -### Fix -* **update_dashboards:** Updating datasource and adding prefix ([`a42968f`](https://github.com/pryorda/vmware_exporter/commit/a42968f0cb87598558f48f99d5341a36ab1175f1)) - -## v0.18.3 (2022-03-25) -### Fix -* **empty_string:** #294 ([`a806b1d`](https://github.com/pryorda/vmware_exporter/commit/a806b1da9f65c965769903ad5691ec1449965ddd)) - -## v0.18.2 (2021-09-26) -### Fix -* **fix_image:** Adding dhub automation - fix image ([#293](https://github.com/pryorda/vmware_exporter/issues/293)) ([`1b8bd18`](https://github.com/pryorda/vmware_exporter/commit/1b8bd18c22613582bdcbd1a5f488ca2f63b1e364)) - -## v0.18.1 (2021-09-26) -### Fix -* **fix_tag:** Adding dhub automation - fix tag ([#292](https://github.com/pryorda/vmware_exporter/issues/292)) ([`c3d7830`](https://github.com/pryorda/vmware_exporter/commit/c3d7830ea92567c21b5e5db51ead6ad3983c4082)) - -## v0.18.0 (2021-09-26) -### Feature -* **adding_dhub_automation:** Adding dhub automation ([#291](https://github.com/pryorda/vmware_exporter/issues/291)) ([`ba56f30`](https://github.com/pryorda/vmware_exporter/commit/ba56f300d1d2c2e7439e1f3406aada1e0111ed34)) - -## v0.17.1 (2021-08-19) -### Fix -* **adding_version:** Adding version cli ([`f83b058`](https://github.com/pryorda/vmware_exporter/commit/f83b0580f58bc2d3c7d53f99194d03ef02a02758)) - -## v0.17.0 (2021-08-19) -### Feature -* **add_vm_ds:** Adding vm datastore. ([`16c8604`](https://github.com/pryorda/vmware_exporter/commit/16c8604ef4e6c77d1eb5f1876ead544fde540967)) - -## v0.16.1 (2021-06-10) -### Fix -* **fixing_sensor:** Fix for badly behaving super-micro sensor #271 ([`2d5c196`](https://github.com/pryorda/vmware_exporter/commit/2d5c1965ec21ee6afc1d9ff3063bea3ca93bd99d)) - -## v0.16.0 (2021-03-30) -### Feature -* **adding_signature_validation:** Adding Validation for signatures. ([`72430d9`](https://github.com/pryorda/vmware_exporter/commit/72430d91f181b17c977aecb9b1fda90ef83bd4ee)) - -## v0.15.1 (2021-03-30) -### Fix -* **fix_sensor_lookup:** Fixing sensor lookup ([#262](https://github.com/pryorda/vmware_exporter/issues/262)) ([`e97c855`](https://github.com/pryorda/vmware_exporter/commit/e97c855581a4e8db8804c542aaece62b3d85081b)) - -## v0.15.0 (2021-03-29) -### Feature -* **sensors:** Adding sensor metrics ([`da2f489`](https://github.com/pryorda/vmware_exporter/commit/da2f48929fc8e377202c4e193d2d4836e4d90a38)) - -## v0.14.3 (2021-03-11) -### Fix -* **optimize_build:** Remove travis, add ifs to action ([#254](https://github.com/pryorda/vmware_exporter/issues/254)) ([`43d6556`](https://github.com/pryorda/vmware_exporter/commit/43d6556556171b3ada6804a29aaff4710e511094)) - ## [0.4.2] - 2018-01-02 ## Fixed - [#60](https://github.com/pryorda/vmware_exporter/pull/60) Typo Fix diff --git a/Dockerfile b/Dockerfile index 3fe82a3..a0e7951 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,12 +1,12 @@ -FROM python:3.7-alpine +FROM python:3.6-alpine -LABEL MAINTAINER="Max Etiqua " +LABEL MAINTAINER="Daniel Pryor " LABEL NAME=vmware_exporter WORKDIR /opt/vmware_exporter/ COPY . /opt/vmware_exporter/ -RUN set -x; buildDeps="gcc python3-dev musl-dev libffi-dev openssl openssl-dev rust cargo" \ +RUN set -x; buildDeps="gcc python-dev musl-dev libffi-dev openssl openssl-dev" \ && apk add --no-cache --update $buildDeps \ && pip install -r requirements.txt . \ && apk del $buildDeps diff --git a/README.md b/README.md index 9968275..436818b 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,3 @@ -# Needs new home. Since Broadcom is no longer catering to its opensource/free base, I have decided to move on from maintaining this project. Please create a discussion if you would like to take over managing this project. - - # vmware_exporter VMware vCenter Exporter for Prometheus. @@ -12,12 +9,12 @@ Get VMware vCenter information: - Snapshot Unix timestamp creation date ## Badges -![Docker Stars](https://img.shields.io/docker/stars/maxetiqua/vmware_exporter.svg) -![Docker Pulls](https://img.shields.io/docker/pulls/maxetiqua/vmware_exporter.svg) -![Docker Automated](https://img.shields.io/docker/automated/maxetiqua/vmware_exporter.svg) +![Docker Stars](https://img.shields.io/docker/stars/pryorda/vmware_exporter.svg) +![Docker Pulls](https://img.shields.io/docker/pulls/pryorda/vmware_exporter.svg) +![Docker Automated](https://img.shields.io/docker/automated/pryorda/vmware_exporter.svg) -[![Travis Build Status](https://travis-ci.org/maxetiqua/vmware_exporter.svg?branch=master)](https://travis-ci.org/maxetiqua/vmware_exporter) -![Docker Build](https://img.shields.io/docker/build/maxetiqua/vmware_exporter.svg) +[![Travis Build Status](https://travis-ci.org/pryorda/vmware_exporter.svg?branch=master)](https://travis-ci.org/pryorda/vmware_exporter) +![Docker Build](https://img.shields.io/docker/build/pryorda/vmware_exporter.svg) [![Join the chat at https://gitter.im/vmware_exporter/community](https://badges.gitter.im/vmware_exporter/community.svg)](https://gitter.im/vmware_exporter/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) ## Usage @@ -32,22 +29,9 @@ Get VMware vCenter information: Alternatively, if you don't wish to install the package, run it using `$ vmware_exporter/vmware_exporter.py` or use the following docker command: ``` -docker run -it --rm -p 9272:9272 -e VSPHERE_USER=${VSPHERE_USERNAME} -e VSPHERE_PASSWORD=${VSPHERE_PASSWORD} -e VSPHERE_HOST=${VSPHERE_HOST} -e VSPHERE_IGNORE_SSL=True -e VSPHERE_SPECS_SIZE=2000 --name vmware_exporter maxetiqua/vmware_exporter -``` - -When using containers combined with `--env-file` flag, please use capital letters to set bolleans, for example: - -``` -$ podman run -it --rm -p 9272:9272 --name vmware_exporter --env-file config.env maxetiqua/vmware_exporter -$ cat config.env -VSPHERE_USER=administrator@vsphere.my.domain.com -VSPHERE_PASSWORD=Secure-Pass -VSPHERE_HOST=192.168.0.1 -VSPHERE_IGNORE_SSL=TRUE -VSPHERE_SPECS_SIZE=2000 +docker run -it --rm -p 9272:9272 -e VSPHERE_USER=${VSPHERE_USERNAME} -e VSPHERE_PASSWORD=${VSPHERE_PASSWORD} -e VSPHERE_HOST=${VSPHERE_HOST} -e VSPHERE_IGNORE_SSL=True -e VSPHERE_SPECS_SIZE=2000 --name vmware_exporter pryorda/vmware_exporter ``` - ### Configuration and limiting data collection Only provide a configuration file if enviroment variables are not used. If you do plan to use a configuration file, be sure to override the container entrypoint or add -c config.yml to the command arguments. @@ -71,9 +55,6 @@ default: vsphere_password: "password" ignore_ssl: False specs_size: 5000 - fetch_custom_attributes: True - fetch_tags: True - fetch_alarms: True collect_only: vms: True vmguests: True @@ -87,9 +68,6 @@ esx: vsphere_password: 'password' ignore_ssl: True specs_size: 5000 - fetch_custom_attributes: True - fetch_tags: True - fetch_alarms: True collect_only: vms: False vmguests: True @@ -103,9 +81,6 @@ limited: vsphere_password: 'password' ignore_ssl: True specs_size: 5000 - fetch_custom_attributes: True - fetch_tags: True - fetch_alarms: False collect_only: vms: False vmguests: False @@ -118,15 +93,12 @@ Switching sections can be done by adding ?section=limited to the URL. #### Environment Variables | Variable | Precedence | Defaults | Description | -| --------------------------------------| ---------------------- | -------- | --------------------------------------------------------------------------| +| ---------------------------- | ---------------------- | -------- | --------------------------------------- | | `VSPHERE_HOST` | config, env, get_param | n/a | vsphere server to connect to | | `VSPHERE_USER` | config, env | n/a | User for connecting to vsphere | | `VSPHERE_PASSWORD` | config, env | n/a | Password for connecting to vsphere | | `VSPHERE_SPECS_SIZE` | config, env | 5000 | Size of specs list for query stats function | | `VSPHERE_IGNORE_SSL` | config, env | False | Ignore the ssl cert on the connection to vsphere host | -| `VSPHERE_FETCH_CUSTOM_ATTRIBUTES` | config, env | False | Set to true to collect objects custom attributes as metric labels | -| `VSPHERE_FETCH_TAGS` | config, env | False | Set to true to collect objects tags as metric labels | -| `VSPHERE_FETCH_ALARMS` | config, env | False | Fetch objects triggered alarms, and in case of hosts hdw alarms as well | | `VSPHERE_COLLECT_HOSTS` | config, env | True | Set to false to disable collection of host metrics | | `VSPHERE_COLLECT_DATASTORES` | config, env | True | Set to false to disable collection of datastore metrics | | `VSPHERE_COLLECT_VMS` | config, env | True | Set to false to disable collection of virtual machine metrics | @@ -136,15 +108,12 @@ Switching sections can be done by adding ?section=limited to the URL. You can create new sections as well, with very similiar variables. For example, to create a `limited` section you can set: | Variable | Precedence | Defaults | Description | -| ----------------------------------------------| ---------------------- | -------- | --------------------------------------------------------------------------| +| ---------------------------- | ---------------------- | -------- | --------------------------------------- | | `VSPHERE_LIMITED_HOST` | config, env, get_param | n/a | vsphere server to connect to | | `VSPHERE_LIMITED_USER` | config, env | n/a | User for connecting to vsphere | | `VSPHERE_LIMITED_PASSWORD` | config, env | n/a | Password for connecting to vsphere | | `VSPHERE_LIMITED_SPECS_SIZE` | config, env | 5000 | Size of specs list for query stats function | | `VSPHERE_LIMITED_IGNORE_SSL` | config, env | False | Ignore the ssl cert on the connection to vsphere host | -| `VSPHERE_LIMITED_FETCH_CUSTOM_ATTRIBUTES` | config, env | False | Set to true to collect objects custom attributes as metric labels | -| `VSPHERE_LIMITED_FETCH_TAGS` | config, env | False | Set to true to collect objects tags as metric labels | -| `VSPHERE_LIMITED_FETCH_ALARMS` | config, env | False | Fetch objects triggered alarms, and in case of hosts hdw alarms as well | | `VSPHERE_LIMITED_COLLECT_HOSTS` | config, env | True | Set to false to disable collection of host metrics | | `VSPHERE_LIMITED_COLLECT_DATASTORES` | config, env | True | Set to false to disable collection of datastore metrics | | `VSPHERE_LIMITED_COLLECT_VMS` | config, env | True | Set to false to disable collection of virtual machine metrics | @@ -269,10 +238,8 @@ Forked from https://github.com/rverchere/vmware_exporter. I removed the fork so ## Maintainer -Max Etiqua [maxetiqua](https://github.com/maxetiqua) +Daniel Pryor [pryorda](https://github.com/pryorda) ## License See LICENSE file - -[![Known Vulnerabilities](https://snyk.io/test/github/rmontenegroo/vmware_exporter/badge.svg?targetFile=requirements.txt)](https://snyk.io/test/github/rmontenegroo/vmware_exporter?targetFile=requirements.txt) diff --git a/catalog-info.yaml b/catalog-info.yaml index f732615..e69de29 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -1,10 +0,0 @@ -apiVersion: backstage.io/v1alpha1 -kind: Component -metadata: - name: vmware_exporter - annotations: - github.com/project-slug: maxetiqua/vmware_exporter -spec: - type: service - lifecycle: unknown - owner: production-engineering diff --git a/docker-compose.yml b/docker-compose.yml index 99064bf..64d4dc3 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: '2' services: vmware_exporter: # Using the latest tag, but you can use vers(v0.9.5 for example - image: maxetiqua/vmware_exporter:latest + image: pryorda/vmware_exporter:latest ports: - "9275:9272" environment: diff --git a/kubernetes/vmware-exporter.yml b/kubernetes/vmware-exporter.yml index e8b85b3..b3b8831 100644 --- a/kubernetes/vmware-exporter.yml +++ b/kubernetes/vmware-exporter.yml @@ -18,7 +18,7 @@ spec: spec: containers: - name: vmware-exporter - image: "maxetiqua/vmware_exporter:latest" + image: "pryorda/vmware_exporter:latest" imagePullPolicy: Always ports: - containerPort: 9272 diff --git a/openshift/README.md b/openshift/README.md index fed5ca0..e69de29 100644 --- a/openshift/README.md +++ b/openshift/README.md @@ -1,27 +0,0 @@ -### Installing vmware_exporter in OpenShift - -Create the secret as described in the kubernetes documentation - -TODO: Use existing secret -``` -read -s VSPHERE_PASSWORD -oc create secret generic -n openshift-vsphere-infra vmware-exporter-password --from-literal=VSPHERE_PASSWORD=$VSPHERE_PASSWORD -``` - -Modify the `configmap.yaml` for your configuration and apply. - -``` -oc apply -f configmap.yaml -``` - -Apply the role, rolebinding, service, deployment and ServiceMonitor - -``` -oc apply -f rolebinding.yaml -oc apply -f service.yaml -oc apply -f deployment.yaml -oc apply -f servicemonitor.yaml -``` - - - diff --git a/openshift/configmap.yaml b/openshift/configmap.yaml index ec2b137..e69de29 100644 --- a/openshift/configmap.yaml +++ b/openshift/configmap.yaml @@ -1,19 +0,0 @@ -apiVersion: v1 -data: - VSPHERE_COLLECT_DATASTORES: "True" - VSPHERE_COLLECT_HOSTS: "True" - VSPHERE_COLLECT_SNAPSHOTS: "False" - VSPHERE_COLLECT_VMGUESTS: "True" - VSPHERE_COLLECT_VMS: "True" - VSPHERE_FETCH_ALARMS: "True" - VSPHERE_FETCH_CUSTOM_ATTRIBUTES: "True" - VSPHERE_FETCH_TAGS: "True" - VSPHERE_HOST: vcenter - VSPHERE_IGNORE_SSL: "True" - VSPHERE_USER: user -kind: ConfigMap -metadata: - labels: - app: vmware-exporter - name: vmware-exporter-config - namespace: openshift-vsphere-infra diff --git a/openshift/deployment.yaml b/openshift/deployment.yaml index 8ea7b20..e69de29 100644 --- a/openshift/deployment.yaml +++ b/openshift/deployment.yaml @@ -1,47 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: vmware-exporter - namespace: openshift-vsphere-infra -spec: - progressDeadlineSeconds: 600 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - app: vmware-exporter - k8s-app: vmware-exporter - strategy: - rollingUpdate: - maxSurge: 25% - maxUnavailable: 25% - type: RollingUpdate - template: - metadata: - creationTimestamp: null - labels: - app: vmware-exporter - k8s-app: vmware-exporter - release: vmware-exporter - spec: - containers: - - envFrom: - - configMapRef: - name: vmware-exporter-config - - secretRef: - name: vmware-exporter-password - image: quay.io/jcallen/vmware_exporter:add_metrics - imagePullPolicy: Always - name: vmware-exporter - ports: - - containerPort: 9272 - name: http - protocol: TCP - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - dnsPolicy: ClusterFirst - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - terminationGracePeriodSeconds: 30 diff --git a/openshift/rolebinding.yaml b/openshift/rolebinding.yaml index 4e47a04..e69de29 100644 --- a/openshift/rolebinding.yaml +++ b/openshift/rolebinding.yaml @@ -1,30 +0,0 @@ -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: prometheus-k8s - namespace: openshift-vsphere-infra -rules: - - verbs: - - get - - list - - watch - apiGroups: - - '' - resources: - - services - - endpoints - - pods ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: prometheus-k8s - namespace: openshift-vsphere-infra -subjects: - - kind: ServiceAccount - name: prometheus-k8s - namespace: openshift-monitoring -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: prometheus-k8s diff --git a/openshift/service.yaml b/openshift/service.yaml index 23a1de1..e69de29 100644 --- a/openshift/service.yaml +++ b/openshift/service.yaml @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - k8s-app: vmware-exporter - name: metrics - namespace: openshift-vsphere-infra -spec: - ports: - - name: metrics - port: 9272 - protocol: TCP - targetPort: 9272 - selector: - k8s-app: vmware-exporter - sessionAffinity: None - type: ClusterIP diff --git a/openshift/servicemonitor.yaml b/openshift/servicemonitor.yaml index f8cdf20..e69de29 100644 --- a/openshift/servicemonitor.yaml +++ b/openshift/servicemonitor.yaml @@ -1,19 +0,0 @@ -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - k8s-app: vmware-exporter - name: vmware - namespace: openshift-monitoring -spec: - endpoints: - - interval: 30s - port: metrics - scheme: http - jobLabel: app - namespaceSelector: - matchNames: - - openshift-vsphere-infra - selector: - matchLabels: - k8s-app: vmware-exporter diff --git a/requirements-tests.txt b/requirements-tests.txt index 3e3cc7a..7b75bfc 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -1,7 +1,6 @@ pytest_docker_tools==0.2.0 -pytest==5.4.1 -pytest-cov==2.8.1 -pytest-twisted==1.12 -codecov==2.0.17 -flake8>=3.6.0 -pyflakes>=1.5.0 +pytest==3.3 +pytest-cov==2.6.0 +pytest-twisted==1.8 +codecov==2.0.15 +flake8==3.6.0 diff --git a/requirements.txt b/requirements.txt index d924920..a5b7bd8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,5 +2,5 @@ prometheus-client==0.0.19 pytz pyvmomi>=6.5 twisted>=14.0.2 -pyyaml>=5.1 +yamlconfig service-identity diff --git a/setup.cfg b/setup.cfg index 41a4f67..9e6033b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,3 +1,2 @@ [semantic_release] version_variable = vmware_exporter/__init__.py:__version__ -branch = main diff --git a/setup.py b/setup.py index e2a4697..e7f2ecd 100644 --- a/setup.py +++ b/setup.py @@ -8,8 +8,8 @@ description='VMWare VCenter Exporter for Prometheus', long_description=open('README.md').read(), long_description_content_type="text/markdown", - url='https://github.com/maxetiqua/vmware_exporter', - download_url=("https://github.com/maxetiqua/vmware_exporter/tarball/%s" % + url='https://github.com/pryorda/vmware_exporter', + download_url=("https://github.com/pryorda/vmware_exporter/tarball/%s" % vmware_exporter.__version__), keywords=['VMWare', 'VCenter', 'Prometheus'], license=vmware_exporter.__license__, diff --git a/tests/unit/test_helpers.py b/tests/unit/test_helpers.py index 14d98ba..87c4ece 100644 --- a/tests/unit/test_helpers.py +++ b/tests/unit/test_helpers.py @@ -45,21 +45,6 @@ def test_batch_fetch_properties(): # but the real return value has methods with side effects. So we need to use a fake. content.viewManager.CreateContainerView.return_value = FakeView() - mockCustomField1 = mock.Mock() - mockCustomField1.key = 1 - mockCustomField1.name = 'customAttribute1' - mockCustomField1.managedObjectType = vim.Datastore - - mockCustomField2 = mock.Mock() - mockCustomField2.key = 2 - mockCustomField2.name = 'customAttribute2' - mockCustomField1.managedObjectType = vim.VirtualMachine - - content.customFieldsManager.field = [ - mockCustomField1, - mockCustomField2, - ] - prop1 = mock.Mock() prop1.name = 'someprop' prop1.val = 1 diff --git a/tests/unit/test_vmware_exporter.py b/tests/unit/test_vmware_exporter.py index 9aa54a9..6dcb0d0 100644 --- a/tests/unit/test_vmware_exporter.py +++ b/tests/unit/test_vmware_exporter.py @@ -72,9 +72,6 @@ def test_collect_vms(): 'password', collect_only, 5000, - False, - True, - False ) collector.content = _succeed(mock.Mock()) @@ -90,7 +87,6 @@ def test_collect_vms(): 'summary.config.memorySizeMB': 1024, 'runtime.maxCpuUsage': 2400, 'summary.config.template': False, - 'summary.config.vmPathName': '[datastore-1] vm-1/vm-1.vmx', 'runtime.bootTime': boot_time, 'snapshot': snapshot, 'guest.disk': [disk], @@ -99,7 +95,7 @@ def test_collect_vms(): 'guest.toolsVersionStatus2': 'guestToolsUnmanaged', } }) - assert collector.vm_labels.result == {'vm-1': ['vm-1', 'datastore-1', 'n/a', 'n/a', 'n/a']} + assert collector.vm_labels.result == {'vm-1': ['vm-1']} # Test template True @@ -109,9 +105,6 @@ def test_collect_vms(): 'password', collect_only, 5000, - False, - True, - False ) collector.content = _succeed(mock.Mock()) @@ -130,7 +123,6 @@ def test_collect_vms(): 'summary.config.numCpu': 1, 'summary.config.memorySizeMB': 1024, 'summary.config.template': True, - 'summary.config.vmPathName': '[datastore-1] vm-1/vm-1.vmx', 'runtime.bootTime': boot_time, 'snapshot': snapshot, 'guest.disk': [disk], @@ -142,7 +134,7 @@ def test_collect_vms(): yield collector._vmware_get_vms(metrics) assert _check_properties(batch_fetch_properties.call_args[0][1]) assert collector.vm_labels.result == { - 'vm-1': ['vm-1', 'datastore-1', 'host-1', 'dc', 'cluster-1'], + 'vm-1': ['vm-1', 'host-1', 'dc', 'cluster-1'], } assert metrics['vmware_vm_template'].samples[0][2] == 1.0 @@ -155,9 +147,6 @@ def test_collect_vms(): 'password', collect_only, 5000, - False, - True, - False ) collector.content = _succeed(mock.Mock()) @@ -177,7 +166,6 @@ def test_collect_vms(): 'summary.config.memorySizeMB': 1024, 'runtime.maxCpuUsage': 2400, 'summary.config.template': False, - 'summary.config.vmPathName': '[datastore-1] vm-1/vm-1.vmx', 'runtime.bootTime': boot_time, 'snapshot': snapshot, 'guest.disk': [disk], @@ -192,7 +180,6 @@ def test_collect_vms(): 'summary.config.memorySizeMB': 1024, 'runtime.maxCpuUsage': 2400, 'summary.config.template': False, - 'summary.config.vmPathName': '[datastore-1] vm-2/vm-2.vmx', 'runtime.bootTime': boot_time, 'snapshot': snapshot, 'guest.disk': [disk], @@ -208,7 +195,6 @@ def test_collect_vms(): 'summary.config.memorySizeMB': 1024, 'runtime.maxCpuUsage': 2400, 'summary.config.template': False, - 'summary.config.vmPathName': '[datastore-1] vm-3/vm-3.vmx', 'runtime.bootTime': boot_time, 'snapshot': snapshot, 'guest.disk': [disk], @@ -220,15 +206,14 @@ def test_collect_vms(): yield collector._vmware_get_vms(metrics) assert _check_properties(batch_fetch_properties.call_args[0][1]) assert collector.vm_labels.result == { - 'vm-1': ['vm-1', 'datastore-1', 'host-1', 'dc', 'cluster-1'], - 'vm-2': ['vm-2', 'datastore-1', 'n/a', 'n/a', 'n/a'], - 'vm-3': ['vm-3', 'datastore-1', 'host-1', 'dc', 'cluster-1'], + 'vm-1': ['vm-1', 'host-1', 'dc', 'cluster-1'], + 'vm-2': ['vm-2'], + 'vm-3': ['vm-3', 'host-1', 'dc', 'cluster-1'], } # Assert that vm-3 skipped #69/#70 assert metrics['vmware_vm_power_state'].samples[1][1] == { 'vm_name': 'vm-3', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -237,7 +222,6 @@ def test_collect_vms(): # General VM metrics assert metrics['vmware_vm_power_state'].samples[0][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -246,7 +230,6 @@ def test_collect_vms(): assert metrics['vmware_vm_boot_timestamp_seconds'].samples[0][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -256,7 +239,6 @@ def test_collect_vms(): # Disk info (vmguest) assert metrics['vmware_vm_guest_disk_capacity'].samples[0][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -267,7 +249,6 @@ def test_collect_vms(): # VM tools info (vmguest) assert metrics['vmware_vm_guest_tools_running_status'].samples[0][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -277,7 +258,6 @@ def test_collect_vms(): assert metrics['vmware_vm_guest_tools_version'].samples[0][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -287,7 +267,6 @@ def test_collect_vms(): assert metrics['vmware_vm_guest_tools_version_status'].samples[0][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -298,7 +277,6 @@ def test_collect_vms(): # Snapshots assert metrics['vmware_vm_snapshots'].samples[0][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -307,7 +285,6 @@ def test_collect_vms(): assert metrics['vmware_vm_snapshot_timestamp_seconds'].samples[0][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -317,7 +294,6 @@ def test_collect_vms(): assert metrics['vmware_vm_snapshot_timestamp_seconds'].samples[1][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -328,7 +304,6 @@ def test_collect_vms(): # Max Memory assert metrics['vmware_vm_memory_max'].samples[0][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -338,7 +313,6 @@ def test_collect_vms(): # Max Cpu assert metrics['vmware_vm_max_cpu_usage'].samples[0][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -370,9 +344,6 @@ def test_metrics_without_hostaccess(): 'password', collect_only, 5000, - False, - True, - False ) metrics = collector._create_metric_containers() collector.content = _succeed(mock.Mock()) @@ -388,7 +359,6 @@ def test_metrics_without_hostaccess(): 'summary.config.memorySizeMB': 1024, 'runtime.maxCpuUsage': 2400, 'summary.config.template': False, - 'summary.config.vmPathName': '[datastore-1] vm-x/vm-x.vmx', 'runtime.bootTime': boot_time, 'guest.disk': [disk], 'guest.toolsStatus': 'toolsOk', diff --git a/validate-signature.rb b/validate-signature.rb index 07a420e..e69de29 100644 --- a/validate-signature.rb +++ b/validate-signature.rb @@ -1,19 +0,0 @@ -#!/bin/ruby -require 'pre-commit-sign' -if ARGV.length >= 1 - puts 'Validating signature' - commit_message = ARGV[0] - message_body = commit_message.split("\n").select { |l| l.start_with?(' ') }.join("\n").gsub(/^ /, '') - pcs = PrecommitSign.from_message(message_body) - pcs.date = DateTime.strptime(/^Date:\s+(.*)$/.match(commit_message).captures.first, '%a %b %d %T %Y %z').to_time - puts "Commit Message: #{message_body}" - if pcs.valid_signature? - puts 'Perfect' - else - puts 'Not valid' - exit 1 - end -else - puts "Need a commit message to validate signature from. Try pre-commit install -f && pre-commit install --install-hooks -t commit-msg -f before commiting your code." - exit 1 -end diff --git a/vmware_exporter/__init__.py b/vmware_exporter/__init__.py index 361892d..0fa1a3f 100644 --- a/vmware_exporter/__init__.py +++ b/vmware_exporter/__init__.py @@ -1,3 +1,3 @@ -__version__ = '0.18.4' +__version__ = '0.11.1' __author__ = "Daniel Pryor" __license__ = "BSD 3-Clause License" diff --git a/vmware_exporter/defer.py b/vmware_exporter/defer.py index 724bde3..05ecffc 100644 --- a/vmware_exporter/defer.py +++ b/vmware_exporter/defer.py @@ -1,7 +1,6 @@ ''' Helpers for writing efficient twisted code, optimized for coroutine scheduling efficiency ''' -# autopep8'd from twisted.internet import defer from twisted.python import failure @@ -57,7 +56,7 @@ def errback(self, err): self.callbacks.pop(0).errback(err) def addCallbacks(self, *args, **kwargs): - if self.result is None: + if not self.result: d = defer.Deferred() d.addCallbacks(*args, **kwargs) self.callbacks.append(d) diff --git a/vmware_exporter/helpers.py b/vmware_exporter/helpers.py index cb5fc98..30c08d2 100644 --- a/vmware_exporter/helpers.py +++ b/vmware_exporter/helpers.py @@ -1,5 +1,5 @@ -# autopep8'd import os + from pyVmomi import vmodl @@ -15,26 +15,6 @@ def batch_fetch_properties(content, obj_type, properties): recursive=True ) - """ - Gathering all custom attibutes names are stored as key (integer) in CustomFieldsManager - We do not want those keys, but the names. So here the names and keys are gathered to - be translated later - """ - if ('customValue' in properties) or ('summary.customValue' in properties): - - allCustomAttributesNames = {} - - if content.customFieldsManager and content.customFieldsManager.field: - allCustomAttributesNames.update( - dict( - [ - (f.key, f.name) - for f in content.customFieldsManager.field - if f.managedObjectType in (obj_type, None) - ] - ) - ) - try: PropertyCollector = vmodl.query.PropertyCollector @@ -60,7 +40,6 @@ def batch_fetch_properties(content, obj_type, properties): filter_spec.propSet = [property_spec] props = content.propertyCollector.RetrieveContents([filter_spec]) - finally: view_ref.Destroy() @@ -71,78 +50,6 @@ def batch_fetch_properties(content, obj_type, properties): properties['id'] = obj.obj._moId for prop in obj.propSet: - - """ - if it's a custom value property for vms (summary.customValue), hosts (summary.customValue) - or datastores (customValue) - we store all attributes together in a python dict and - translate its name key to name - """ - if 'customValue' in prop.name: - - properties[prop.name] = {} - - if allCustomAttributesNames: - - properties[prop.name] = dict( - [ - (allCustomAttributesNames[attribute.key], attribute.value) - for attribute in prop.val - if attribute.key in allCustomAttributesNames - ] - ) - - elif 'triggeredAlarmState' == prop.name: - """ - triggered alarms - """ - try: - alarms = list( - 'triggeredAlarm:{}:{}'.format(item.alarm.info.systemName.split('.')[1], item.overallStatus) - for item in prop.val - ) - except Exception: - alarms = ['triggeredAlarm:AlarmsUnavailable:yellow'] - - properties[prop.name] = ','.join(alarms) - - elif 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo' == prop.name: - """ - handle numericSensorInfo - """ - sensors = list( - 'numericSensorInfo:name={}:type={}:sensorStatus={}:value={}:unitModifier={}:unit={}'.format( - item.name, - item.sensorType, - item.healthState.key, - item.currentReading, - item.unitModifier, - item.baseUnits.lower() - ) - for item in prop.val - ) - properties[prop.name] = ','.join(sensors) - - elif prop.name in [ - 'runtime.healthSystemRuntime.hardwareStatusInfo.cpuStatusInfo', - 'runtime.healthSystemRuntime.hardwareStatusInfo.memoryStatusInfo', - ]: - """ - handle hardwareStatusInfo - """ - sensors = list( - 'numericSensorInfo:name={}:type={}:sensorStatus={}:value={}:unitModifier={}:unit={}'.format( - item.name, - "n/a", - item.status.key, - "n/a", - "n/a", - "n/a", - ) - for item in prop.val - ) - properties[prop.name] = ','.join(sensors) - - else: properties[prop.name] = prop.val results[obj.obj._moId] = properties diff --git a/vmware_exporter/vmware_exporter.py b/vmware_exporter/vmware_exporter.py index de3734f..70e4ae6 100755 --- a/vmware_exporter/vmware_exporter.py +++ b/vmware_exporter/vmware_exporter.py @@ -1,10 +1,10 @@ #!/usr/bin/env python # -*- python -*- # -*- coding: utf-8 -*- -# autopep8'd """ Handles collection of metrics for vmware. """ + from __future__ import print_function # Generic imports From 504d36b6a33626ceb22faa08a1a61f68b5e35719 Mon Sep 17 00:00:00 2001 From: Massimiliano Ribuoli Date: Mon, 27 Oct 2025 19:39:23 +0100 Subject: [PATCH 4/5] Final changes to 0.11.1 --- .github/workflows/python-package.yml | 0 README.md | 44 +- catalog-info.yaml | 0 dashboards/cluster.json | 52 +- dashboards/esx.json | 41 +- dashboards/esxi.json | 41 +- dashboards/virtualmachine.json | 50 +- openshift/README.md | 0 openshift/configmap.yaml | 0 openshift/deployment.yaml | 0 openshift/rolebinding.yaml | 0 openshift/service.yaml | 0 openshift/servicemonitor.yaml | 0 tests/unit/test_vmware_exporter.py | 371 +-------- validate-signature.rb | 0 vmware_exporter/helpers.py | 2 +- vmware_exporter/vmware_exporter.py | 1086 ++------------------------ 17 files changed, 185 insertions(+), 1502 deletions(-) delete mode 100644 .github/workflows/python-package.yml delete mode 100644 catalog-info.yaml delete mode 100644 openshift/README.md delete mode 100644 openshift/configmap.yaml delete mode 100644 openshift/deployment.yaml delete mode 100644 openshift/rolebinding.yaml delete mode 100644 openshift/service.yaml delete mode 100644 openshift/servicemonitor.yaml delete mode 100644 validate-signature.rb diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml deleted file mode 100644 index e69de29..0000000 diff --git a/README.md b/README.md index 436818b..24debc0 100644 --- a/README.md +++ b/README.md @@ -92,33 +92,33 @@ limited: Switching sections can be done by adding ?section=limited to the URL. #### Environment Variables -| Variable | Precedence | Defaults | Description | +| Variable | Precedence | Defaults | Description | | ---------------------------- | ---------------------- | -------- | --------------------------------------- | -| `VSPHERE_HOST` | config, env, get_param | n/a | vsphere server to connect to | -| `VSPHERE_USER` | config, env | n/a | User for connecting to vsphere | -| `VSPHERE_PASSWORD` | config, env | n/a | Password for connecting to vsphere | -| `VSPHERE_SPECS_SIZE` | config, env | 5000 | Size of specs list for query stats function | -| `VSPHERE_IGNORE_SSL` | config, env | False | Ignore the ssl cert on the connection to vsphere host | -| `VSPHERE_COLLECT_HOSTS` | config, env | True | Set to false to disable collection of host metrics | -| `VSPHERE_COLLECT_DATASTORES` | config, env | True | Set to false to disable collection of datastore metrics | -| `VSPHERE_COLLECT_VMS` | config, env | True | Set to false to disable collection of virtual machine metrics | -| `VSPHERE_COLLECT_VMGUESTS` | config, env | True | Set to false to disable collection of virtual machine guest metrics | -| `VSPHERE_COLLECT_SNAPSHOTS` | config, env | True | Set to false to disable collection of snapshot metrics | +| `VSPHERE_HOST` | config, env, get_param | n/a | vsphere server to connect to | +| `VSPHERE_USER` | config, env | n/a | User for connecting to vsphere | +| `VSPHERE_PASSWORD` | config, env | n/a | Password for connecting to vsphere | +| `VSPHERE_SPECS_SIZE` | config, env | 5000 | Size of specs list for query stats function | +| `VSPHERE_IGNORE_SSL` | config, env | False | Ignore the ssl cert on the connection to vsphere host | +| `VSPHERE_COLLECT_HOSTS` | config, env | True | Set to false to disable collection of host metrics | +| `VSPHERE_COLLECT_DATASTORES` | config, env | True | Set to false to disable collection of datastore metrics | +| `VSPHERE_COLLECT_VMS` | config, env | True | Set to false to disable collection of virtual machine metrics | +| `VSPHERE_COLLECT_VMGUESTS` | config, env | True | Set to false to disable collection of virtual machine guest metrics | +| `VSPHERE_COLLECT_SNAPSHOTS` | config, env | True | Set to false to disable collection of snapshot metrics | You can create new sections as well, with very similiar variables. For example, to create a `limited` section you can set: -| Variable | Precedence | Defaults | Description | +| Variable | Precedence | Defaults | Description | | ---------------------------- | ---------------------- | -------- | --------------------------------------- | -| `VSPHERE_LIMITED_HOST` | config, env, get_param | n/a | vsphere server to connect to | -| `VSPHERE_LIMITED_USER` | config, env | n/a | User for connecting to vsphere | -| `VSPHERE_LIMITED_PASSWORD` | config, env | n/a | Password for connecting to vsphere | -| `VSPHERE_LIMITED_SPECS_SIZE` | config, env | 5000 | Size of specs list for query stats function | -| `VSPHERE_LIMITED_IGNORE_SSL` | config, env | False | Ignore the ssl cert on the connection to vsphere host | -| `VSPHERE_LIMITED_COLLECT_HOSTS` | config, env | True | Set to false to disable collection of host metrics | -| `VSPHERE_LIMITED_COLLECT_DATASTORES` | config, env | True | Set to false to disable collection of datastore metrics | -| `VSPHERE_LIMITED_COLLECT_VMS` | config, env | True | Set to false to disable collection of virtual machine metrics | -| `VSPHERE_LIMITED_COLLECT_VMGUESTS` | config, env | True | Set to false to disable collection of virtual machine guest metrics | -| `VSPHERE_LIMITED_COLLECT_SNAPSHOTS` | config, env | True | Set to false to disable collection of snapshot metrics | +| `VSPHERE_LIMITED_HOST` | config, env, get_param | n/a | vsphere server to connect to | +| `VSPHERE_LIMITED_USER` | config, env | n/a | User for connecting to vsphere | +| `VSPHERE_LIMITED_PASSWORD` | config, env | n/a | Password for connecting to vsphere | +| `VSPHERE_LIMITED_SPECS_SIZE` | config, env | 5000 | Size of specs list for query stats function | +| `VSPHERE_LIMITED_IGNORE_SSL` | config, env | False | Ignore the ssl cert on the connection to vsphere host | +| `VSPHERE_LIMITED_COLLECT_HOSTS` | config, env | True | Set to false to disable collection of host metrics | +| `VSPHERE_LIMITED_COLLECT_DATASTORES` | config, env | True | Set to false to disable collection of datastore metrics | +| `VSPHERE_LIMITED_COLLECT_VMS` | config, env | True | Set to false to disable collection of virtual machine metrics | +| `VSPHERE_LIMITED_COLLECT_VMGUESTS` | config, env | True | Set to false to disable collection of virtual machine guest metrics | +| `VSPHERE_LIMITED_COLLECT_SNAPSHOTS` | config, env | True | Set to false to disable collection of snapshot metrics | You need to set at least `VSPHERE_SECTIONNAME_USER` for the section to be detected. diff --git a/catalog-info.yaml b/catalog-info.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/dashboards/cluster.json b/dashboards/cluster.json index d978f2e..b51a28a 100644 --- a/dashboards/cluster.json +++ b/dashboards/cluster.json @@ -31,7 +31,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "short", "gauge": { "maxValue": 100, @@ -112,7 +112,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "hertz", "gauge": { "maxValue": 100, @@ -193,7 +193,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "decmbytes", "gauge": { "maxValue": 100, @@ -274,7 +274,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "none", "gauge": { "maxValue": 100, @@ -355,7 +355,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "none", "gauge": { "maxValue": 100, @@ -439,7 +439,7 @@ }, { "cacheTimeout": null, - "datasource": "$datasource", + "datasource": "Prometheus", "gridPos": { "h": 5, "w": 4, @@ -504,7 +504,7 @@ }, { "cacheTimeout": null, - "datasource": "$datasource", + "datasource": "Prometheus", "gridPos": { "h": 5, "w": 4, @@ -569,7 +569,7 @@ }, { "cacheTimeout": null, - "datasource": "$datasource", + "datasource": "Prometheus", "gridPos": { "h": 5, "w": 4, @@ -638,7 +638,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -732,7 +732,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "percent", "gauge": { "maxValue": 100, @@ -807,7 +807,7 @@ }, { "cacheTimeout": null, - "datasource": "$datasource", + "datasource": "Prometheus", "gridPos": { "h": 5, "w": 4, @@ -872,7 +872,7 @@ }, { "cacheTimeout": null, - "datasource": "$datasource", + "datasource": "Prometheus", "gridPos": { "h": 5, "w": 4, @@ -941,7 +941,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1032,7 +1032,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1124,7 +1124,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1216,7 +1216,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1307,7 +1307,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1399,7 +1399,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1490,7 +1490,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1582,7 +1582,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1674,7 +1674,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1765,7 +1765,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1856,7 +1856,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1944,9 +1944,7 @@ ], "schemaVersion": 20, "style": "dark", - "tags": [ - "vmware" - ], + "tags": [], "templating": { "list": [ { @@ -1974,7 +1972,7 @@ "cluster1" ] }, - "datasource": "$datasource", + "datasource": "Prometheus", "definition": "label_values(cluster_name)", "hide": 0, "includeAll": false, diff --git a/dashboards/esx.json b/dashboards/esx.json index 505b93f..7493592 100644 --- a/dashboards/esx.json +++ b/dashboards/esx.json @@ -1,7 +1,7 @@ { "__inputs": [ { - "name": "datasource", + "name": "DS_PROMETHEUS", "label": "prometheus", "description": "", "type": "datasource", @@ -59,7 +59,7 @@ "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], - "datasource": "${datasource}", + "datasource": "${DS_PROMETHEUS}", "decimals": 1, "description": "System uptime", "format": "s", @@ -139,7 +139,7 @@ "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], - "datasource": "${datasource}", + "datasource": "${DS_PROMETHEUS}", "format": "percent", "gauge": { "maxValue": 100, @@ -218,7 +218,7 @@ "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], - "datasource": "${datasource}", + "datasource": "${DS_PROMETHEUS}", "format": "percent", "gauge": { "maxValue": 100, @@ -296,7 +296,7 @@ "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], - "datasource": "${datasource}", + "datasource": "${DS_PROMETHEUS}", "format": "none", "gauge": { "maxValue": 100, @@ -385,7 +385,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${datasource}", + "datasource": "${DS_PROMETHEUS}", "decimals": 1, "fill": 0, "id": 1, @@ -468,7 +468,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${datasource}", + "datasource": "${DS_PROMETHEUS}", "decimals": 1, "fill": 1, "id": 2, @@ -557,33 +557,13 @@ ], "schemaVersion": 14, "style": "dark", - "tags": [ - "vmware", - "esx" - ], + "tags": [], "templating": { "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "includeAll": false, - "label": "Datasource", - "multi": false, - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, { "allValue": null, "current": {}, - "datasource": "$datasource", + "datasource": "${DS_PROMETHEUS}", "hide": 0, "includeAll": false, "label": "Host:", @@ -632,7 +612,6 @@ ] }, "timezone": "browser", - "title": "VMware ESX Hosts Information", - "uid": "ed9d4bbf8801a8f79194b2ce6ead0ffcb8f9952a", + "title": "ESX Hosts Information", "version": 17 } \ No newline at end of file diff --git a/dashboards/esxi.json b/dashboards/esxi.json index 6c5e527..0baa91f 100644 --- a/dashboards/esxi.json +++ b/dashboards/esxi.json @@ -31,7 +31,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "short", "gauge": { "maxValue": 100, @@ -114,7 +114,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "none", "gauge": { "maxValue": 100, @@ -197,7 +197,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "none", "gauge": { "maxValue": 100, @@ -280,7 +280,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "none", "gauge": { "maxValue": 100, @@ -363,7 +363,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "none", "gauge": { "maxValue": 100, @@ -446,7 +446,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "s", "gauge": { "maxValue": 100, @@ -528,7 +528,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "short", "gauge": { "maxValue": 100, @@ -610,7 +610,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "decmbytes", "gauge": { "maxValue": 100, @@ -685,7 +685,7 @@ }, { "cacheTimeout": null, - "datasource": "$datasource", + "datasource": "Prometheus", "gridPos": { "h": 4, "w": 4, @@ -750,7 +750,7 @@ }, { "cacheTimeout": null, - "datasource": "$datasource", + "datasource": "Prometheus", "gridPos": { "h": 4, "w": 4, @@ -813,7 +813,7 @@ }, { "cacheTimeout": null, - "datasource": "$datasource", + "datasource": "Prometheus", "gridPos": { "h": 4, "w": 4, @@ -885,7 +885,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "percent", "gauge": { "maxValue": 100, @@ -964,7 +964,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1060,7 +1060,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1163,7 +1163,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1261,7 +1261,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1367,7 +1367,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1491,7 +1491,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1600,7 +1600,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1701,6 +1701,7 @@ "schemaVersion": 20, "style": "dark", "tags": [ + "prometheus", "vmware", "esxi" ], @@ -1729,7 +1730,7 @@ "text": "192.168.0.27", "value": "192.168.0.27" }, - "datasource": "$datasource", + "datasource": "Prometheus", "definition": "", "hide": 0, "includeAll": false, diff --git a/dashboards/virtualmachine.json b/dashboards/virtualmachine.json index 94dc1f1..d3b51c5 100644 --- a/dashboards/virtualmachine.json +++ b/dashboards/virtualmachine.json @@ -31,7 +31,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "none", "gauge": { "maxValue": 100, @@ -114,7 +114,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "none", "gauge": { "maxValue": 100, @@ -197,7 +197,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "none", "gauge": { "maxValue": 100, @@ -284,7 +284,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "none", "gauge": { "maxValue": 100, @@ -371,7 +371,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "short", "gauge": { "maxValue": 100, @@ -459,7 +459,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "none", "gauge": { "maxValue": 100, @@ -542,7 +542,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "short", "gauge": { "maxValue": 100, @@ -617,7 +617,7 @@ }, { "cacheTimeout": null, - "datasource": "$datasource", + "datasource": "Prometheus", "gridPos": { "h": 5, "w": 3, @@ -682,7 +682,7 @@ }, { "cacheTimeout": null, - "datasource": "$datasource", + "datasource": "Prometheus", "gridPos": { "h": 5, "w": 3, @@ -747,7 +747,7 @@ }, { "cacheTimeout": null, - "datasource": "$datasource", + "datasource": "Prometheus", "gridPos": { "h": 5, "w": 3, @@ -819,7 +819,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "decmbytes", "gauge": { "maxValue": 100, @@ -901,7 +901,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "$datasource", + "datasource": "Prometheus", "format": "percent", "gauge": { "maxValue": 100, @@ -976,7 +976,7 @@ }, { "cacheTimeout": null, - "datasource": "$datasource", + "datasource": "Prometheus", "gridPos": { "h": 5, "w": 3, @@ -1042,7 +1042,7 @@ }, { "cacheTimeout": null, - "datasource": "$datasource", + "datasource": "Prometheus", "gridPos": { "h": 5, "w": 3, @@ -1112,7 +1112,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1208,7 +1208,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1304,7 +1304,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1395,7 +1395,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1491,7 +1491,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1613,7 +1613,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1710,7 +1710,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1811,7 +1811,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "$datasource", + "datasource": "Prometheus", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1899,9 +1899,7 @@ ], "schemaVersion": 20, "style": "dark", - "tags": [ - "vmware" - ], + "tags": [], "templating": { "list": [ { @@ -1927,7 +1925,7 @@ "text": "centos-dhcp", "value": "centos-dhcp" }, - "datasource": "$datasource", + "datasource": "Prometheus", "definition": "label_values(vm_name)", "hide": 0, "includeAll": false, diff --git a/openshift/README.md b/openshift/README.md deleted file mode 100644 index e69de29..0000000 diff --git a/openshift/configmap.yaml b/openshift/configmap.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/openshift/deployment.yaml b/openshift/deployment.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/openshift/rolebinding.yaml b/openshift/rolebinding.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/openshift/service.yaml b/openshift/service.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/openshift/servicemonitor.yaml b/openshift/servicemonitor.yaml deleted file mode 100644 index e69de29..0000000 diff --git a/tests/unit/test_vmware_exporter.py b/tests/unit/test_vmware_exporter.py index 6dcb0d0..d26dca8 100644 --- a/tests/unit/test_vmware_exporter.py +++ b/tests/unit/test_vmware_exporter.py @@ -135,7 +135,7 @@ def test_collect_vms(): assert _check_properties(batch_fetch_properties.call_args[0][1]) assert collector.vm_labels.result == { 'vm-1': ['vm-1', 'host-1', 'dc', 'cluster-1'], - } + } assert metrics['vmware_vm_template'].samples[0][2] == 1.0 @@ -209,7 +209,7 @@ def test_collect_vms(): 'vm-1': ['vm-1', 'host-1', 'dc', 'cluster-1'], 'vm-2': ['vm-2'], 'vm-3': ['vm-3', 'host-1', 'dc', 'cluster-1'], - } + } # Assert that vm-3 skipped #69/#70 assert metrics['vmware_vm_power_state'].samples[1][1] == { @@ -366,14 +366,13 @@ def test_metrics_without_hostaccess(): 'guest.toolsVersionStatus2': 'guestToolsUnmanaged', } }) - assert collector.vm_labels.result == {'vm-1': ['vm-x', 'datastore-1', 'n/a', 'n/a', 'n/a']} + assert collector.vm_labels.result == {'vm-1': ['vm-x']} yield collector._vmware_get_vms(metrics) # 113 AssertionError {'partition': '/boot'} vs {'host_name': '/boot'} assert metrics['vmware_vm_guest_disk_capacity'].samples[0][1] == { 'vm_name': 'vm-x', 'partition': '/boot', - 'ds_name': 'datastore-1', 'host_name': 'n/a', 'cluster_name': 'n/a', 'dc_name': 'n/a', @@ -383,7 +382,6 @@ def test_metrics_without_hostaccess(): # but found ['vm-1'] assert metrics['vmware_vm_power_state'].samples[0][1] == { 'vm_name': 'vm-x', - 'ds_name': 'datastore-1', 'host_name': 'n/a', 'cluster_name': 'n/a', 'dc_name': 'n/a', @@ -543,7 +541,7 @@ def test_collect_vm_perf(): }) collector.__dict__['vm_labels'] = _succeed({ - 'vm:1': ['vm-1', 'datastore-1', 'host-1', 'dc', 'cluster-1'], + 'vm:1': ['vm-1', 'host-1', 'dc', 'cluster-1'], }) collector.__dict__['vm_inventory'] = _succeed({ @@ -564,7 +562,6 @@ def test_collect_vm_perf(): # General VM metrics assert metrics['vmware_vm_net_transmitted_average'].samples[0][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -573,7 +570,6 @@ def test_collect_vm_perf(): assert metrics['vmware_vm_cpu_demand_average'].samples[0][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -582,7 +578,6 @@ def test_collect_vm_perf(): assert metrics['vmware_vm_disk_maxTotalLatency_latest'].samples[0][1] == { 'vm_name': 'vm-1', - 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -607,19 +602,12 @@ def test_collect_hosts(): 'password', collect_only, 5000, - True, - False, - False, - True ) collector.content = _succeed(mock.Mock()) collector.__dict__['host_labels'] = _succeed({ 'host:1': ['host-1', 'dc', 'cluster'], - 'host:2': ['host-2', 'dc', 'cluster'], - 'host:3': ['host-3', 'dc', 'cluster'], - 'host:4': ['host-4', 'dc', 'cluster'], - 'host:5': ['host-5', 'dc', 'cluster'], + 'host:2': ['host-1', 'dc', 'cluster'], }) metrics = collector._create_metric_containers() @@ -632,7 +620,6 @@ def test_collect_hosts(): 'runtime.powerState': 'poweredOn', 'runtime.bootTime': boot_time, 'runtime.connectionState': 'connected', - 'runtime.standbyMode': 'none', 'runtime.inMaintenanceMode': True, 'summary.quickStats.overallCpuUsage': 100, 'summary.hardware.numCpuCores': 12, @@ -643,110 +630,12 @@ def test_collect_hosts(): 'summary.config.product.build': '6765062', 'summary.hardware.cpuModel': 'cpu_model1', 'summary.hardware.model': 'model1', - 'summary.customValue': { - 'customValue1': 'value1', - 'customValue2': 'value2', - }, - 'triggeredAlarmState': '', - 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo': '', }, 'host:2': { 'id': 'host:2', 'name': 'host-2', 'runtime.powerState': 'poweredOff', - 'runtime.standbyMode': 'none', - 'summary.customValue': {}, - 'triggeredAlarmState': '', - 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo': '', - }, - 'host:3': { - 'id': 'host:3', - 'name': 'host-3', - 'runtime.powerState': 'poweredOn', - 'runtime.bootTime': boot_time, - 'runtime.connectionState': 'connected', - 'runtime.standbyMode': 'in', - 'runtime.inMaintenanceMode': True, - 'summary.quickStats.overallCpuUsage': 100, - 'summary.hardware.numCpuCores': 8, - 'summary.hardware.cpuMhz': 1000, - 'summary.quickStats.overallMemoryUsage': 1024, - 'summary.hardware.memorySize': 2048 * 1024 * 1024, - 'summary.config.product.version': '6.0.0', - 'summary.config.product.build': '6765063', - 'summary.hardware.cpuModel': 'cpu_model1', - 'summary.hardware.model': 'model1', - 'summary.customValue': {}, - 'triggeredAlarmState': '', - 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo': '', - }, - 'host:4': { - 'id': 'host:4', - 'name': 'host-4', - 'runtime.powerState': 'poweredOn', - 'runtime.bootTime': boot_time, - 'runtime.connectionState': 'connected', - 'runtime.standbyMode': 'entering', - 'runtime.inMaintenanceMode': True, - 'summary.quickStats.overallCpuUsage': 100, - 'summary.hardware.numCpuCores': 6, - 'summary.hardware.cpuMhz': 1000, - 'summary.quickStats.overallMemoryUsage': 1024, - 'summary.hardware.memorySize': 2048 * 1024 * 1024, - 'summary.config.product.version': '6.0.0', - 'summary.config.product.build': '6765064', - 'summary.hardware.cpuModel': 'cpu_model1', - 'summary.hardware.model': 'model1', - 'summary.customValue': {}, - 'triggeredAlarmState': '', - 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo': '', - }, - 'host:5': { - 'id': 'host:5', - 'name': 'host-5', - 'runtime.powerState': 'poweredOn', - 'runtime.bootTime': boot_time, - 'runtime.connectionState': 'connected', - 'runtime.standbyMode': 'exiting', - 'runtime.inMaintenanceMode': True, - 'summary.quickStats.overallCpuUsage': 100, - 'summary.hardware.numCpuCores': 4, - 'summary.hardware.cpuMhz': 1000, - 'summary.quickStats.overallMemoryUsage': 1024, - 'summary.hardware.memorySize': 2048 * 1024 * 1024, - 'summary.config.product.version': '6.0.0', - 'summary.config.product.build': '6765065', - 'summary.hardware.cpuModel': 'cpu_model1', - 'summary.hardware.model': 'model1', - 'summary.customValue': {}, - 'triggeredAlarmState': ','.join( - ( - 'triggeredAlarm:HostMemoryUsageAlarm:red', - 'triggeredAlarm:HostCPUUsageAlarm:yellow' - ) - ), - 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo': ','.join( - ( - 'numericSensorInfo:name=Fan Device 12 System Fan ' - '6B:type=fan:sensorStatus=yellow:value=821700:unitModifier=-2:unit=rpm', - 'numericSensorInfo:name=Power Supply 2 PS2 ' - 'Temperature:type=temperature:sensorStatus=green:value=2900:unitModifier=-2:unit=degrees c', - 'numericSensorInfo:name=System Board 1 VR Watchdog ' - '0:type=voltage:sensorStatus=red:value=2000:unitModifier=0:unit=volts', - 'numericSensorInfo:name=Power Supply 2 Current ' - '2:type=power:sensorStatus=green:value=20:unitModifier=-2:unit=amps', - 'numericSensorInfo:name=System Board 1 Pwr ' - 'Consumption:type=power:sensorStatus=green:value=7000:unitModifier=-2:unit=watts', - 'numericSensorInfo:name=Cooling Unit 1 Fan Redundancy ' - '0:type=power:sensorStatus=green:value=1:unitModifier=0:unit=redundancy-discrete', - 'numericSensorInfo:name=Management Controller Firmware 2 NM ' - 'Capabilities:type=other:sensorStatus=unknown:value=5:unitModifier=0:unit=unspecified', - 'cpuStatusInfo:name=CPU 1:type=n/a:sensorStatus=green:value=n/a:unitModifier=n/a:unit=n/a', - 'memoryStatusInfo:name=Memory 12:type=n/a:sensorStatus=yellow:value=n/a:unitModifier=n/a' - ':unit=n/a', - ) - ), - }, + } }) yield collector._vmware_get_hosts(metrics) assert _check_properties(batch_fetch_properties.call_args[0][1]) @@ -754,9 +643,7 @@ def test_collect_hosts(): assert metrics['vmware_host_memory_max'].samples[0][1] == { 'host_name': 'host-1', 'dc_name': 'dc', - 'cluster_name': 'cluster', - 'customValue1': 'value1', - 'customValue2': 'value2' + 'cluster_name': 'cluster' } assert metrics['vmware_host_memory_max'].samples[0][2] == 2048 assert metrics['vmware_host_num_cpu'].samples[0][2] == 12 @@ -767,15 +654,13 @@ def test_collect_hosts(): 'cluster_name': 'cluster', 'version': '6.0.0', 'build': '6765062', - 'customValue1': 'value1', - 'customValue2': 'value2', } assert metrics['vmware_host_product_info'].samples[0][2] == 1 # In our test data we hava a host that is powered down - we should have its # power_state metric but not any others. - assert len(metrics['vmware_host_power_state'].samples) == 5 - assert len(metrics['vmware_host_memory_max'].samples) == 4 + assert len(metrics['vmware_host_power_state'].samples) == 2 + assert len(metrics['vmware_host_memory_max'].samples) == 1 assert metrics['vmware_host_hardware_info'].samples[0][1] == { 'host_name': 'host-1', @@ -783,175 +668,9 @@ def test_collect_hosts(): 'cluster_name': 'cluster', 'hardware_model': 'model1', 'hardware_cpu_model': 'cpu_model1', - 'customValue1': 'value1', - 'customValue2': 'value2', } assert metrics['vmware_host_hardware_info'].samples[0][2] == 1 - # Host:1 is not on Standby Mode - assert metrics['vmware_host_standby_mode'].samples[0][2] == 0 - assert metrics['vmware_host_standby_mode'].samples[0][1] == { - 'host_name': 'host-1', - 'dc_name': 'dc', - 'cluster_name': 'cluster', - 'standby_mode_state': 'none', - 'customValue1': 'value1', - 'customValue2': 'value2', - } - - # Host:2 is Powered down and Standby Mode and not set - assert metrics['vmware_host_standby_mode'].samples[1][2] == 0 - assert metrics['vmware_host_standby_mode'].samples[1][1] == { - 'host_name': 'host-2', - 'dc_name': 'dc', - 'cluster_name': 'cluster', - 'standby_mode_state': 'none', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - } - - # Host:3 is on Standby Mode - assert metrics['vmware_host_standby_mode'].samples[2][2] == 1 - assert metrics['vmware_host_standby_mode'].samples[2][1] == { - 'host_name': 'host-3', - 'dc_name': 'dc', - 'cluster_name': 'cluster', - 'standby_mode_state': 'in', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - } - - # Host:4 is not on Standby Mode - assert metrics['vmware_host_standby_mode'].samples[3][2] == 0 - assert metrics['vmware_host_standby_mode'].samples[3][1] == { - 'host_name': 'host-4', - 'dc_name': 'dc', - 'cluster_name': 'cluster', - 'standby_mode_state': 'entering', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - } - - # Host:4 no alarms found - assert metrics['vmware_host_yellow_alarms'].samples[3][2] == 0 - assert metrics['vmware_host_red_alarms'].samples[3][2] == 0 - - # Host:5 is not on Standby Mode - assert metrics['vmware_host_standby_mode'].samples[4][2] == 0 - assert metrics['vmware_host_standby_mode'].samples[4][1] == { - 'host_name': 'host-5', - 'dc_name': 'dc', - 'cluster_name': 'cluster', - 'standby_mode_state': 'exiting', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - } - - # Host:5 testing alarms - assert metrics['vmware_host_yellow_alarms'].samples[4][2] == 1 - assert metrics['vmware_host_red_alarms'].samples[4][2] == 1 - - assert metrics['vmware_host_yellow_alarms'].samples[4][1] == { - 'cluster_name': 'cluster', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - 'dc_name': 'dc', - 'host_name': 'host-5', - 'alarms': 'triggeredAlarm:HostCPUUsageAlarm' - } - - # Host:5 testing sensors - assert len(metrics['vmware_host_sensor_state'].samples) == 9 - assert metrics['vmware_host_sensor_state'].samples[3][1] == { - 'cluster_name': 'cluster', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - 'dc_name': 'dc', - 'host_name': 'host-5', - 'name': 'Power Supply 2 Current 2', - 'type': 'power' - } - - assert metrics['vmware_host_sensor_fan'].samples[0][2] == 8217 - assert metrics['vmware_host_sensor_fan'].samples[0][1] == { - 'cluster_name': 'cluster', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - 'dc_name': 'dc', - 'host_name': 'host-5', - 'name': 'Fan Device 12 System Fan 6B', - } - - assert metrics['vmware_host_sensor_temperature'].samples[0][2] == 29 - assert metrics['vmware_host_sensor_temperature'].samples[0][1] == { - 'cluster_name': 'cluster', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - 'dc_name': 'dc', - 'host_name': 'host-5', - 'name': 'Power Supply 2 PS2 Temperature', - } - - assert metrics['vmware_host_sensor_power_voltage'].samples[0][2] == 2000 - assert metrics['vmware_host_sensor_power_voltage'].samples[0][1] == { - 'cluster_name': 'cluster', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - 'dc_name': 'dc', - 'host_name': 'host-5', - 'name': 'System Board 1 VR Watchdog 0', - } - - assert metrics['vmware_host_sensor_power_current'].samples[0][2] == 0.2 - assert metrics['vmware_host_sensor_power_current'].samples[0][1] == { - 'cluster_name': 'cluster', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - 'dc_name': 'dc', - 'host_name': 'host-5', - 'name': 'Power Supply 2 Current 2', - } - - assert metrics['vmware_host_sensor_power_watt'].samples[0][2] == 70 - assert metrics['vmware_host_sensor_power_watt'].samples[0][1] == { - 'cluster_name': 'cluster', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - 'dc_name': 'dc', - 'host_name': 'host-5', - 'name': 'System Board 1 Pwr Consumption', - } - - assert metrics['vmware_host_sensor_redundancy'].samples[0][2] == 1 - assert metrics['vmware_host_sensor_redundancy'].samples[0][1] == { - 'cluster_name': 'cluster', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - 'dc_name': 'dc', - 'host_name': 'host-5', - 'name': 'Cooling Unit 1 Fan Redundancy 0', - } - - assert metrics['vmware_host_sensor_state'].samples[7][1] == { - 'cluster_name': 'cluster', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - 'dc_name': 'dc', - 'host_name': 'host-5', - 'name': 'CPU 1', - 'type': 'n/a' - } - - assert metrics['vmware_host_sensor_state'].samples[8][1] == { - 'cluster_name': 'cluster', - 'customValue1': 'n/a', - 'customValue2': 'n/a', - 'dc_name': 'dc', - 'host_name': 'host-5', - 'name': 'Memory 12', - 'type': 'n/a' - } - @pytest_twisted.inlineCallbacks def test_collect_host_perf(): @@ -1082,22 +801,8 @@ def test_collect_datastore(): 'password', collect_only, 5000, - True, - True, - True, - True ) collector.content = _succeed(mock.Mock()) - collector.client = _succeed(mock.Mock()) - collector._tagNames = { - 'datastores': ['ds_name', 'dc_name', 'ds_cluster'], - } - - collector.tags = { - 'datastores': { - 'datastore-1': ['tag1'] - } - } collector.__dict__['datastore_labels'] = _succeed({ 'datastore-1': ['datastore-1', 'dc', 'ds_cluster'], @@ -1115,38 +820,16 @@ def test_collect_datastore(): 'vm': ['vm-1'], 'summary.accessible': True, 'summary.maintenanceMode': 'normal', - 'triggeredAlarmState': 'triggeredAlarm:DatastoreDiskUsageAlarm:yellow,triggeredAlarm:OtherAlarm:red' } }) yield collector._vmware_get_datastores(metrics) assert _check_properties(batch_fetch_properties.call_args[0][1]) - assert metrics['vmware_datastore_yellow_alarms'].samples[0][2] == 1 - - assert metrics['vmware_datastore_yellow_alarms'].samples[0][1] == { - 'ds_name': 'datastore-1', - 'dc_name': 'dc', - 'ds_cluster': 'ds_cluster', - 'tags': 'tag1', - 'alarms': 'triggeredAlarm:DatastoreDiskUsageAlarm' - } - - assert metrics['vmware_datastore_red_alarms'].samples[0][2] == 1 - - assert metrics['vmware_datastore_red_alarms'].samples[0][1] == { - 'ds_name': 'datastore-1', - 'dc_name': 'dc', - 'ds_cluster': 'ds_cluster', - 'tags': 'tag1', - 'alarms': 'triggeredAlarm:OtherAlarm' - } - assert metrics['vmware_datastore_capacity_size'].samples[0][1] == { 'ds_name': 'datastore-1', 'dc_name': 'dc', - 'ds_cluster': 'ds_cluster', - 'tags': 'tag1' + 'ds_cluster': 'ds_cluster' } assert metrics['vmware_datastore_capacity_size'].samples[0][2] == 0.0 @@ -1154,16 +837,14 @@ def test_collect_datastore(): 'ds_name': 'datastore-1', 'dc_name': 'dc', 'ds_cluster': 'ds_cluster', - 'mode': 'normal', - 'tags': 'tag1' + 'mode': 'normal' } assert metrics['vmware_datastore_maintenance_mode'].samples[0][2] == 1.0 assert metrics['vmware_datastore_accessible'].samples[0][1] == { 'ds_name': 'datastore-1', 'dc_name': 'dc', - 'ds_cluster': 'ds_cluster', - 'tags': 'tag1' + 'ds_cluster': 'ds_cluster' } assert metrics['vmware_datastore_accessible'].samples[0][2] == 1.0 @@ -1568,9 +1249,6 @@ def test_vmware_resource_async_render_GET_section(): 'vsphere_user': 'username1', 'vsphere_password': 'password1', 'specs_size': 5000, - 'fetch_custom_attributes': True, - 'fetch_tags': True, - 'fetch_alarms': True, 'collect_only': { 'datastores': True, 'hosts': True, @@ -1585,9 +1263,6 @@ def test_vmware_resource_async_render_GET_section(): 'vsphere_user': 'username2', 'vsphere_password': 'password2', 'specs_size': 5000, - 'fetch_custom_attributes': True, - 'fetch_tags': True, - 'fetch_alarms': True, 'collect_only': { 'datastores': True, 'hosts': True, @@ -1608,10 +1283,7 @@ def test_vmware_resource_async_render_GET_section(): 'password2', resource.config['mysection']['collect_only'], 5000, - True, - 'On', - True, - True + 'On' ) request.setResponseCode.assert_called_with(200) @@ -1625,9 +1297,6 @@ def test_config_env_multiple_sections(): 'VSPHERE_USER': 'username1', 'VSPHERE_PASSWORD': 'password1', 'VSPHERE_SPECS_SIZE': 5000, - 'VSPHERE_FETCH_CUSTOM_ATTRIBUTES': True, - 'VSPHERE_FETCH_TAGS': True, - 'VSPHERE_FETCH_ALARMS': True, 'VSPHERE_MYSECTION_HOST': '127.0.0.11', 'VSPHERE_MYSECTION_USER': 'username2', 'VSPHERE_MYSECTION_PASSWORD': 'password2', @@ -1647,9 +1316,6 @@ def test_config_env_multiple_sections(): 'vsphere_user': 'username1', 'vsphere_password': 'password1', 'specs_size': 5000, - 'fetch_custom_attributes': True, - 'fetch_tags': True, - 'fetch_alarms': True, 'collect_only': { 'datastores': True, 'hosts': True, @@ -1664,9 +1330,6 @@ def test_config_env_multiple_sections(): 'vsphere_user': 'username2', 'vsphere_password': 'password2', 'specs_size': 5000, - 'fetch_custom_attributes': False, - 'fetch_tags': False, - 'fetch_alarms': False, 'collect_only': { 'datastores': True, 'hosts': True, @@ -1691,11 +1354,3 @@ def test_valid_loglevel_cli_argument(): def test_main(): with pytest.raises(SystemExit): main(['-h', '-l debug']) - - -def test_version(capsys): - with pytest.raises(SystemExit): - main(['-v']) - captured = capsys.readouterr() - assert captured.out.startswith("vmware_exporter") - assert captured.err == "" diff --git a/validate-signature.rb b/validate-signature.rb deleted file mode 100644 index e69de29..0000000 diff --git a/vmware_exporter/helpers.py b/vmware_exporter/helpers.py index 30c08d2..afcf205 100644 --- a/vmware_exporter/helpers.py +++ b/vmware_exporter/helpers.py @@ -50,7 +50,7 @@ def batch_fetch_properties(content, obj_type, properties): properties['id'] = obj.obj._moId for prop in obj.propSet: - properties[prop.name] = prop.val + properties[prop.name] = prop.val results[obj.obj._moId] = properties diff --git a/vmware_exporter/vmware_exporter.py b/vmware_exporter/vmware_exporter.py index 70e4ae6..be2bb77 100755 --- a/vmware_exporter/vmware_exporter.py +++ b/vmware_exporter/vmware_exporter.py @@ -6,32 +6,18 @@ """ from __future__ import print_function +import datetime # Generic imports import argparse import os -import re import ssl import sys import traceback import pytz import logging -import datetime -import yaml -import requests - -""" -disable annoying urllib3 warning messages for connecting to servers with non verified certificate Doh! -""" -from requests.packages.urllib3.exceptions import InsecureRequestWarning -requests.packages.urllib3.disable_warnings(InsecureRequestWarning) - -""" -For custom attributes -used to plain some list of lists in a single one -""" -from itertools import chain +from yamlconfig import YamlConfig # Twisted from twisted.web.server import Site, NOT_DONE_YET @@ -49,24 +35,10 @@ from .helpers import batch_fetch_properties, get_bool_env from .defer import parallelize, run_once_property -from .__init__ import __version__ - class VmwareCollector(): - def __init__( - self, - host, - username, - password, - collect_only, - specs_size, - fetch_custom_attributes=False, - ignore_ssl=False, - fetch_tags=False, - fetch_alarms=False - ): - + def __init__(self, host, username, password, collect_only, specs_size, ignore_ssl=False): self.host = host self.username = username self.password = password @@ -74,325 +46,154 @@ def __init__( self.collect_only = collect_only self.specs_size = int(specs_size) - self._session = None - - # Custom Attributes - # flag to wheter fetch custom attributes or not - self.fetch_custom_attributes = fetch_custom_attributes - # vms, hosts and datastores custom attributes must be stored by their moid - self._vmsCustomAttributes = {} - self._hostsCustomAttributes = {} - self._datastoresCustomAttributes = {} - - # Tags - # flag to wheter fetch tags or not - self.fetch_tags = fetch_tags - - # Alarms - # flag wheter to fetch alarms or not - self.fetch_alarms = fetch_alarms - - # label names and ammount will be needed later to insert labels from custom attributes - self._labelNames = { - 'vms': ['vm_name', 'ds_name', 'host_name', 'dc_name', 'cluster_name'], - 'vm_perf': ['vm_name', 'ds_name', 'host_name', 'dc_name', 'cluster_name'], - 'vmguests': ['vm_name', 'ds_name', 'host_name', 'dc_name', 'cluster_name'], - 'snapshots': ['vm_name', 'ds_name', 'host_name', 'dc_name', 'cluster_name'], - 'datastores': ['ds_name', 'dc_name', 'ds_cluster'], - 'hosts': ['host_name', 'dc_name', 'cluster_name'], - 'host_perf': ['host_name', 'dc_name', 'cluster_name'], - } - - # if tags are gonna be fetched 'tags' will be a label too - if self.fetch_tags: - for section in self._labelNames.keys(): - self._labelNames[section] = self._labelNames[section] + ['tags'] - - # as label names, metric are going to be used modified later - # as labels from custom attributes are going to be inserted - self._metricNames = { - 'vms': [], - 'vm_perf': [], - 'hosts': [], - 'host_perf': [], - 'datastores': [], - } - def _create_metric_containers(self): metric_list = {} metric_list['vms'] = { 'vmware_vm_power_state': GaugeMetricFamily( 'vmware_vm_power_state', 'VMWare VM Power state (On / Off)', - labels=self._labelNames['vms']), + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), 'vmware_vm_boot_timestamp_seconds': GaugeMetricFamily( 'vmware_vm_boot_timestamp_seconds', 'VMWare VM boot time in seconds', - labels=self._labelNames['vms']), + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), 'vmware_vm_num_cpu': GaugeMetricFamily( 'vmware_vm_num_cpu', 'VMWare Number of processors in the virtual machine', - labels=self._labelNames['vms']), + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), 'vmware_vm_memory_max': GaugeMetricFamily( 'vmware_vm_memory_max', 'VMWare VM Memory Max availability in Mbytes', - labels=self._labelNames['vms']), + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), 'vmware_vm_max_cpu_usage': GaugeMetricFamily( 'vmware_vm_max_cpu_usage', 'VMWare VM Cpu Max availability in hz', - labels=self._labelNames['vms']), + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), 'vmware_vm_template': GaugeMetricFamily( 'vmware_vm_template', 'VMWare VM Template (true / false)', - labels=self._labelNames['vms']), - } + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), + } metric_list['vmguests'] = { 'vmware_vm_guest_disk_free': GaugeMetricFamily( 'vmware_vm_guest_disk_free', 'Disk metric per partition', - labels=self._labelNames['vmguests'] + ['partition', ]), + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name', 'partition', ]), 'vmware_vm_guest_disk_capacity': GaugeMetricFamily( 'vmware_vm_guest_disk_capacity', 'Disk capacity metric per partition', - labels=self._labelNames['vmguests'] + ['partition', ]), + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name', 'partition', ]), 'vmware_vm_guest_tools_running_status': GaugeMetricFamily( 'vmware_vm_guest_tools_running_status', 'VM tools running status', - labels=self._labelNames['vmguests'] + ['tools_status', ]), + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name', 'tools_status', ]), 'vmware_vm_guest_tools_version': GaugeMetricFamily( 'vmware_vm_guest_tools_version', 'VM tools version', - labels=self._labelNames['vmguests'] + ['tools_version', ]), + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name', 'tools_version', ]), 'vmware_vm_guest_tools_version_status': GaugeMetricFamily( 'vmware_vm_guest_tools_version_status', 'VM tools version status', - labels=self._labelNames['vmguests'] + ['tools_version_status', ]), - } + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name', 'tools_version_status', ]), + } metric_list['snapshots'] = { 'vmware_vm_snapshots': GaugeMetricFamily( 'vmware_vm_snapshots', 'VMWare current number of existing snapshots', - labels=self._labelNames['snapshots']), + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), 'vmware_vm_snapshot_timestamp_seconds': GaugeMetricFamily( 'vmware_vm_snapshot_timestamp_seconds', 'VMWare Snapshot creation time in seconds', - labels=self._labelNames['snapshots'] + ['vm_snapshot_name']), - } + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name', 'vm_snapshot_name']), + } metric_list['datastores'] = { 'vmware_datastore_capacity_size': GaugeMetricFamily( 'vmware_datastore_capacity_size', 'VMWare Datasore capacity in bytes', - labels=self._labelNames['datastores']), + labels=['ds_name', 'dc_name', 'ds_cluster']), 'vmware_datastore_freespace_size': GaugeMetricFamily( 'vmware_datastore_freespace_size', 'VMWare Datastore freespace in bytes', - labels=self._labelNames['datastores']), + labels=['ds_name', 'dc_name', 'ds_cluster']), 'vmware_datastore_uncommited_size': GaugeMetricFamily( 'vmware_datastore_uncommited_size', 'VMWare Datastore uncommitted in bytes', - labels=self._labelNames['datastores']), + labels=['ds_name', 'dc_name', 'ds_cluster']), 'vmware_datastore_provisoned_size': GaugeMetricFamily( 'vmware_datastore_provisoned_size', 'VMWare Datastore provisoned in bytes', - labels=self._labelNames['datastores']), + labels=['ds_name', 'dc_name', 'ds_cluster']), 'vmware_datastore_hosts': GaugeMetricFamily( 'vmware_datastore_hosts', 'VMWare Hosts number using this datastore', - labels=self._labelNames['datastores']), + labels=['ds_name', 'dc_name', 'ds_cluster']), 'vmware_datastore_vms': GaugeMetricFamily( 'vmware_datastore_vms', 'VMWare Virtual Machines count per datastore', - labels=self._labelNames['datastores']), + labels=['ds_name', 'dc_name', 'ds_cluster']), 'vmware_datastore_maintenance_mode': GaugeMetricFamily( 'vmware_datastore_maintenance_mode', 'VMWare datastore maintenance mode (normal / inMaintenance / enteringMaintenance)', - labels=self._labelNames['datastores'] + ['mode']), + labels=['ds_name', 'dc_name', 'ds_cluster', 'mode']), 'vmware_datastore_type': GaugeMetricFamily( 'vmware_datastore_type', 'VMWare datastore type (VMFS, NetworkFileSystem, NetworkFileSystem41, CIFS, VFAT, VSAN, VFFS)', - labels=self._labelNames['datastores'] + ['ds_type']), + labels=['ds_name', 'dc_name', 'ds_cluster', 'ds_type']), 'vmware_datastore_accessible': GaugeMetricFamily( 'vmware_datastore_accessible', 'VMWare datastore accessible (true / false)', - labels=self._labelNames['datastores']) - } + labels=['ds_name', 'dc_name', 'ds_cluster']) + } metric_list['hosts'] = { 'vmware_host_power_state': GaugeMetricFamily( 'vmware_host_power_state', 'VMWare Host Power state (On / Off)', - labels=self._labelNames['hosts']), - 'vmware_host_standby_mode': GaugeMetricFamily( - 'vmware_host_standby_mode', - 'VMWare Host Standby Mode (entering / exiting / in / none)', - labels=self._labelNames['hosts'] + ['standby_mode_state']), + labels=['host_name', 'dc_name', 'cluster_name']), 'vmware_host_connection_state': GaugeMetricFamily( 'vmware_host_connection_state', 'VMWare Host connection state (connected / disconnected / notResponding)', - labels=self._labelNames['hosts'] + ['state']), + labels=['host_name', 'dc_name', 'cluster_name', 'state']), 'vmware_host_maintenance_mode': GaugeMetricFamily( 'vmware_host_maintenance_mode', 'VMWare Host maintenance mode (true / false)', - labels=self._labelNames['hosts']), + labels=['host_name', 'dc_name', 'cluster_name']), 'vmware_host_boot_timestamp_seconds': GaugeMetricFamily( 'vmware_host_boot_timestamp_seconds', 'VMWare Host boot time in seconds', - labels=self._labelNames['hosts']), + labels=['host_name', 'dc_name', 'cluster_name']), 'vmware_host_cpu_usage': GaugeMetricFamily( 'vmware_host_cpu_usage', 'VMWare Host CPU usage in Mhz', - labels=self._labelNames['hosts']), + labels=['host_name', 'dc_name', 'cluster_name']), 'vmware_host_cpu_max': GaugeMetricFamily( 'vmware_host_cpu_max', 'VMWare Host CPU max availability in Mhz', - labels=self._labelNames['hosts']), + labels=['host_name', 'dc_name', 'cluster_name']), 'vmware_host_num_cpu': GaugeMetricFamily( 'vmware_host_num_cpu', 'VMWare Number of processors in the Host', - labels=self._labelNames['hosts']), + labels=['host_name', 'dc_name', 'cluster_name']), 'vmware_host_memory_usage': GaugeMetricFamily( 'vmware_host_memory_usage', 'VMWare Host Memory usage in Mbytes', - labels=self._labelNames['hosts']), + labels=['host_name', 'dc_name', 'cluster_name']), 'vmware_host_memory_max': GaugeMetricFamily( 'vmware_host_memory_max', 'VMWare Host Memory Max availability in Mbytes', - labels=self._labelNames['hosts']), + labels=['host_name', 'dc_name', 'cluster_name']), 'vmware_host_product_info': GaugeMetricFamily( 'vmware_host_product_info', 'A metric with a constant "1" value labeled by version and build from os the host.', - labels=self._labelNames['hosts'] + ['version', 'build']), + labels=['host_name', 'dc_name', 'cluster_name', 'version', 'build']), 'vmware_host_hardware_info': GaugeMetricFamily( 'vmware_host_hardware_info', 'A metric with a constant "1" value labeled by model and cpu model from the host.', - labels=self._labelNames['hosts'] + ['hardware_model', 'hardware_cpu_model']), - 'vmware_host_sensor_state': GaugeMetricFamily( - 'vmware_host_sensor_state', - 'VMWare sensor state value (0=red / 1=yellow / 2=green / 3=unknown) labeled by sensor name and type ' - 'from the host.', - labels=self._labelNames['hosts'] + ['name', 'type']), - 'vmware_host_sensor_fan': GaugeMetricFamily( - 'vmware_host_sensor_fan', - 'VMWare sensor fan speed value in RPM labeled by sensor name from the host.', - labels=self._labelNames['hosts'] + ['name']), - 'vmware_host_sensor_temperature': GaugeMetricFamily( - 'vmware_host_sensor_temperature', - 'VMWare sensor temperature value in degree C labeled by sensor name from the host.', - labels=self._labelNames['hosts'] + ['name']), - 'vmware_host_sensor_power_voltage': GaugeMetricFamily( - 'vmware_host_sensor_power_voltage', - 'VMWare sensor power voltage value in volt labeled by sensor name from the host.', - labels=self._labelNames['hosts'] + ['name']), - 'vmware_host_sensor_power_current': GaugeMetricFamily( - 'vmware_host_sensor_power_current', - 'VMWare sensor power current value in amp labeled by sensor name from the host.', - labels=self._labelNames['hosts'] + ['name']), - 'vmware_host_sensor_power_watt': GaugeMetricFamily( - 'vmware_host_sensor_power_watt', - 'VMWare sensor power watt value in watt labeled by sensor name from the host.', - labels=self._labelNames['hosts'] + ['name']), - 'vmware_host_sensor_redundancy': GaugeMetricFamily( - 'vmware_host_sensor_redundancy', - 'VMWare sensor redundancy value (1=ok / 0=ko) labeled by sensor name from the host.', - labels=self._labelNames['hosts'] + ['name']), - } - - """ - if alarms are being retrieved, metrics have to been created here - """ - if self.fetch_alarms: - """ - for hosts - """ - metric_list['hosts'].update( - { - 'vmware_host_yellow_alarms': GaugeMetricFamily( - 'vmware_host_yellow_alarms', - 'A metric with the amount of host yellow alarms and labeled with the list of alarm names', - labels=self._labelNames['hosts'] + ['alarms'] - ), - 'vmware_host_red_alarms': GaugeMetricFamily( - 'vmware_host_red_alarms', - 'A metric with the amount of host red alarms and labeled with the list of alarm names', - labels=self._labelNames['hosts'] + ['alarms'] - ) - } - ) - - """ - for datastores - """ - metric_list['datastores'].update( - { - 'vmware_datastore_yellow_alarms': GaugeMetricFamily( - 'vmware_datastore_yellow_alarms', - 'A metric with the amount of datastore yellow alarms and labeled with the list of alarm names', - labels=self._labelNames['datastores'] + ['alarms'] - ), - 'vmware_datastore_red_alarms': GaugeMetricFamily( - 'vmware_datastore_red_alarms', - 'A metric with the amount of datastore red alarms and labeled with the list of alarm names', - labels=self._labelNames['datastores'] + ['alarms'] - ) - } - ) - - """ - for vms - """ - metric_list['vms'].update( - { - 'vmware_vm_yellow_alarms': GaugeMetricFamily( - 'vmware_vm_yellow_alarms', - 'A metric with the amount of virtual machine yellow alarms and \ - labeled with the list of alarm names', - labels=self._labelNames['vms'] + ['alarms'] - ), - 'vmware_vm_red_alarms': GaugeMetricFamily( - 'vmware_vm_red_alarms', - 'A metric with the amount of virtual machine red alarms and \ - labeled with the list of alarm names', - labels=self._labelNames['vms'] + ['alarms'] - ) - } - ) - metric_list['vmguests'].update( - { - 'vmware_vm_yellow_alarms': GaugeMetricFamily( - 'vmware_vm_yellow_alarms', - 'A metric with the amount of virtual machine yellow alarms and \ - labeled with the list of alarm names', - labels=self._labelNames['vms'] + ['alarms'] - ), - 'vmware_vm_red_alarms': GaugeMetricFamily( - 'vmware_vm_red_alarms', - 'A metric with the amount of virtual machine red alarms and \ - labeled with the list of alarm names', - labels=self._labelNames['vms'] + ['alarms'] - ) - } - ) - metric_list['snapshots'].update( - { - 'vmware_vm_yellow_alarms': GaugeMetricFamily( - 'vmware_vm_yellow_alarms', - 'A metric with the amount of virtual machine yellow alarms and \ - labeled with the list of alarm names', - labels=self._labelNames['vms'] + ['alarms'] - ), - 'vmware_vm_red_alarms': GaugeMetricFamily( - 'vmware_vm_red_alarms', - 'A metric with the amount of virtual machine red alarms and \ - labeled with the list of alarm names', - labels=self._labelNames['vms'] + ['alarms'] - ) - } - ) + labels=['host_name', 'dc_name', 'cluster_name', 'hardware_model', 'hardware_cpu_model']), + } metrics = {} for key, value in self.collect_only.items(): if value is True: - """ storing metric names to be used later """ - self._metricNames[key] = list(metric_list[key].keys()) metrics.update(metric_list[key]) return metrics @@ -421,7 +222,7 @@ def collect(self): # Collect Datastore metrics if collect_only['datastores'] is True: - tasks.append(self._vmware_get_datastores(metrics, )) + tasks.append(self._vmware_get_datastores(metrics,)) if collect_only['hosts'] is True: tasks.append(self._vmware_get_hosts(metrics)) @@ -433,134 +234,12 @@ def collect(self): logging.info("Finished collecting metrics from {vsphere_host}".format(vsphere_host=vsphere_host)) - return list(metrics.values()) # noqa: F705 + return list(metrics.values()) # noqa: F705 def _to_epoch(self, my_date): """ convert to epoch time """ return (my_date - datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)).total_seconds() - @run_once_property - @defer.inlineCallbacks - def session(self): - - if self._session is None: - self._session = requests.Session() - self._session.verify = not self.ignore_ssl - self._session.auth = (self.username, self.password) - - try: - yield threads.deferToThread( - self._session.post, - 'https://{host}/rest/com/vmware/cis/session'.format(host=self.host) - ) - except Exception as e: - logging.error('Error creating vcenter API session ({})'.format(e)) - self._session = None - - return self._session - - @run_once_property - @defer.inlineCallbacks - def _tagIDs(self): - """ - fetch a list of all tags ids - """ - session = yield self.session - response = yield threads.deferToThread( - session.get, - 'https://{host}/rest/com/vmware/cis/tagging/tag'.format(host=self.host) - ) - output = [] - try: - output = response.json().get('value') - except Exception as e: - logging.error('Unable to fetch tag IDs from vcenter {} ({})'.format(self.host, e)) - - return output - - @run_once_property - @defer.inlineCallbacks - def _attachedObjectsOnTags(self): - """ - retrieve a dict with all objects which have a tag attached - """ - session = yield self.session - tagIDs = yield self._tagIDs - jsonBody = { - 'tag_ids': tagIDs - } - response = yield threads.deferToThread( - session.post, - 'https://{host}/rest/com/vmware/cis/tagging/tag-association?~action=list-attached-objects-on-tags' - .format(host=self.host), - json=jsonBody - ) - - output = {} - - try: - output = response.json().get('value', output) - except Exception as e: - logging.error('Unable to fetch list of attached objects on tags on vcenter {} ({})'.format(self.host, e)) - - return output - - @run_once_property - @defer.inlineCallbacks - def _tagNames(self): - """ - tag IDs are useless to enduser, so they have to be translated - to the tag text - """ - session = yield self.session - tagIDs = yield self._tagIDs - tagNames = {} - for tagID in tagIDs: - response = yield threads.deferToThread( - session.get, - 'https://{host}/rest/com/vmware/cis/tagging/tag/id:{tag_id}'.format(host=self.host, tag_id=tagID) - ) - tagObj = response.json().get('value', {}) - if tagObj: - tagNames[tagObj.get('id')] = tagObj.get('name') - - return tagNames - - @run_once_property - @defer.inlineCallbacks - def tags(self): - """ - tags are finally stored by category: vms, hosts, and datastores - and linked to object moid - """ - logging.info("Fetching tags") - start = datetime.datetime.utcnow() - - attachedObjs = yield self._attachedObjectsOnTags - tagNames = yield self._tagNames - tags = { - 'vms': {}, - 'hosts': {}, - 'datastores': {}, - 'others': {}, - } - - sections = {'VirtualMachine': 'vms', 'Datastore': 'datastores', 'HostSystem': 'hosts'} - - for attachedObj in attachedObjs: - tagName = tagNames.get(attachedObj.get('tag_id')) - for obj in attachedObj.get('object_ids'): - section = sections.get(obj.get('type'), 'others') - if obj.get('id') not in tags[section]: - tags[section][obj.get('id')] = [tagName] - else: - tags[section][obj.get('id')].append(tagName) - - fetch_time = datetime.datetime.utcnow() - start - logging.info("Fetched tags ({fetch_time})".format(fetch_time=fetch_time)) - - return tags - @run_once_property @defer.inlineCallbacks def connection(self): @@ -625,41 +304,12 @@ def datastore_inventory(self): 'vm', ] - """ - are custom attributes going to be retrieved? - """ - if self.fetch_custom_attributes: - """ yep! """ - properties.append('customValue') - - """ - triggeredAlarmState must be fetched to get datastore alarms list - """ - if self.fetch_alarms: - properties.append('triggeredAlarmState') - datastores = yield self.batch_fetch_properties( vim.Datastore, properties ) - - """ - once custom attributes are fetched, - store'em linked to their moid - if no customValue found for an object - it get an empty dict - """ - if self.fetch_custom_attributes: - self._datastoresCustomAttributes = dict( - [ - (ds_moId, ds.get('customValue', {})) - for ds_moId, ds in datastores.items() - ] - ) - fetch_time = datetime.datetime.utcnow() - start logging.info("Fetched vim.Datastore inventory ({fetch_time})".format(fetch_time=fetch_time)) - return datastores @run_once_property @@ -676,7 +326,6 @@ def host_system_inventory(self): 'summary.config.product.version', 'summary.config.product.build', 'runtime.powerState', - 'runtime.standbyMode', 'runtime.bootTime', 'runtime.connectionState', 'runtime.inMaintenanceMode', @@ -684,48 +333,14 @@ def host_system_inventory(self): 'summary.quickStats.overallMemoryUsage', 'summary.hardware.cpuModel', 'summary.hardware.model', - 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo', - 'runtime.healthSystemRuntime.hardwareStatusInfo.cpuStatusInfo', - 'runtime.healthSystemRuntime.hardwareStatusInfo.memoryStatusInfo', ] - """ - signal to fetch hosts custom attributes - yay! - """ - if self.fetch_custom_attributes: - properties.append('summary.customValue') - - """ - triggeredAlarmState must be fetched to get host alarms list - in case of hosts, sensors, cpu and memory status alarms - are going to be retrieved as well - """ - if self.fetch_alarms: - properties.append('triggeredAlarmState') - host_systems = yield self.batch_fetch_properties( vim.HostSystem, properties, ) - - """ - once custom attributes are fetched, - store'em linked to their moid - if no customValue found for an object - it get an empty dict - """ - if self.fetch_custom_attributes: - self._hostsCustomAttributes = dict( - [ - (host_moId, host.get('summary.customValue', {})) - for host_moId, host in host_systems.items() - ] - ) - fetch_time = datetime.datetime.utcnow() - start logging.info("Fetched vim.HostSystem inventory ({fetch_time})".format(fetch_time=fetch_time)) - return host_systems @run_once_property @@ -737,8 +352,6 @@ def vm_inventory(self): 'name', 'runtime.host', 'parent', - 'summary.config.vmPathName', - 'guest.ipAddress', ] if self.collect_only['vms'] is True: @@ -762,209 +375,14 @@ def vm_inventory(self): if self.collect_only['snapshots'] is True: properties.append('snapshot') - """ - papa smurf, are we collecting custom attributes? - """ - if self.fetch_custom_attributes: - properties.append('summary.customValue') - - """ - triggeredAlarmState must be fetched to get vm alarms list - """ - if self.fetch_alarms: - properties.append('triggeredAlarmState') - virtual_machines = yield self.batch_fetch_properties( vim.VirtualMachine, properties, ) - - """ - once custom attributes are fetched, - store'em linked to their moid - if no customValue found for an object - it get an empty dict - """ - if self.fetch_custom_attributes: - self._vmsCustomAttributes = dict( - [ - (vm_moId, vm.get('summary.customValue', {})) - for vm_moId, vm in virtual_machines.items() - ] - ) - fetch_time = datetime.datetime.utcnow() - start logging.info("Fetched vim.VirtualMachine inventory ({fetch_time})".format(fetch_time=fetch_time)) - return virtual_machines - @defer.inlineCallbacks - def customAttributesLabelNames(self, metric_type): - """ - vm perf, vms, vmguestes and snapshots metrics share the same custom attributes - as they re related to virtual machine objects - - host perf and hosts metrics share the same custom attributes - as they re related to host system objects - """ - - labelNames = [] - - if metric_type in ('datastores',): - labelNames = yield self.datastoresCustomAttributesLabelNames - - if metric_type in ('vms', 'vm_perf', 'snapshots', 'vmguests'): - labelNames = yield self.vmsCustomAttributesLabelNames - - if metric_type in ('hosts', 'host_perf'): - labelNames = yield self.hostsCustomAttributesLabelNames - - return labelNames - - @run_once_property - @defer.inlineCallbacks - def datastoresCustomAttributesLabelNames(self): - """ - normalizes custom attributes to all objects of the same type - it means - all objects of type datastore will share the same set of custom attributes - but these custom attributes can be filled or not, depending on - what has been gathered (of course) - """ - customAttributesLabelNames = [] - - if self.fetch_custom_attributes: - customAttributes = yield self._datastoresCustomAttributes - customAttributesLabelNames = list( - set( - chain( - *[ - attributes.keys() - for attributes in customAttributes.values() - ] - ) - ) - ) - - return customAttributesLabelNames - - @run_once_property - @defer.inlineCallbacks - def hostsCustomAttributesLabelNames(self): - """ - normalizes custom attributes to all objects of the same type - it means - all objects of type host system will share the same set of custom attributes - but these custom attributes can be filled or not, depending on - what has been gathered (of course) - """ - customAttributesLabelNames = [] - - if self.fetch_custom_attributes: - customAttributes = yield self._hostsCustomAttributes - customAttributesLabelNames = list( - set( - chain( - *[ - attributes.keys() - for attributes in customAttributes.values() - ] - ) - ) - ) - - return customAttributesLabelNames - - @run_once_property - @defer.inlineCallbacks - def vmsCustomAttributesLabelNames(self): - """ - normalizes custom attributes to all objects of the same type - it means - all objects of type virtual machine will share the same set of custom attributes - but these custom attributes can be filled or not, depending on - what has been gathered (of course) - """ - customAttributesLabelNames = [] - - if self.fetch_custom_attributes: - customAttributes = yield self._vmsCustomAttributes - customAttributesLabelNames = list( - set( - chain( - *[ - attributes.keys() - for attributes in customAttributes.values() - ] - ) - ) - ) - - return customAttributesLabelNames - - @run_once_property - @defer.inlineCallbacks - def datastoresCustomAttributes(self): - """ - creates a list of the custom attributes values, - in order their labels re gonna be inserted - when no value was found for that custom attribute - 'n/a' is inserted - """ - customAttributes = {} - - if self.fetch_custom_attributes: - customAttributes = yield self._datastoresCustomAttributes - datastoresCustomAttributesLabelNames = yield self.datastoresCustomAttributesLabelNames - for labelName in datastoresCustomAttributesLabelNames: - for ds in customAttributes.keys(): - if labelName not in customAttributes[ds].keys(): - customAttributes[ds][labelName] = 'n/a' - - return customAttributes - - @run_once_property - @defer.inlineCallbacks - def hostsCustomAttributes(self): - """ - creates a list of the custom attributes values, - in order their labels re gonna be inserted - when no value was found for that custom attribute - 'n/a' is inserted - """ - customAttributes = {} - - if self.fetch_custom_attributes: - customAttributes = yield self._hostsCustomAttributes - hostsCustomAttributesLabelNames = yield self.hostsCustomAttributesLabelNames - for labelName in hostsCustomAttributesLabelNames: - for host in customAttributes.keys(): - if labelName not in customAttributes[host].keys(): - customAttributes[host][labelName] = 'n/a' - - return customAttributes - - @run_once_property - @defer.inlineCallbacks - def vmsCustomAttributes(self): - """ - creates a list of the custom attributes values, - in order their labels re gonna be inserted - when no value was found for that custom attribute - 'n/a' is inserted - """ - customAttributes = {} - - if self.fetch_custom_attributes: - customAttributes = yield self._vmsCustomAttributes - vmsCustomAttributesLabelNames = yield self.customAttributesLabelNames('vms') - for labelName in vmsCustomAttributesLabelNames: - for vm in customAttributes.keys(): - if labelName not in customAttributes[vm].keys(): - customAttributes[vm][labelName] = 'n/a' - - return customAttributes - @run_once_property @defer.inlineCallbacks def datacenter_inventory(self): @@ -1004,7 +422,6 @@ def _collect(node, level=1, dc=None, storagePod=""): for dc in dcs: result = yield threads.deferToThread(lambda: _collect(dc)) labels.update(result) - return labels @run_once_property @@ -1030,7 +447,7 @@ def _collect(node, level=1, dc=None, folder=None): node.summary.config.name.rstrip('.'), dc, folder.name if isinstance(folder, vim.ClusterComputeResource) else '' - ] + ] else: logging.debug("[? ] {level} {node}".format(level=('-' * level).ljust(7), node=node)) return inventory @@ -1042,46 +459,9 @@ def _collect(node, level=1, dc=None, folder=None): labels.update(result) return labels - @run_once_property - @defer.inlineCallbacks - def vm_tags(self): - """ - return a dict that links vms moid to its tags - """ - tags = {} - if self.fetch_tags: - tags = yield self.tags - tags = tags['vms'] - return tags - - @run_once_property - @defer.inlineCallbacks - def host_tags(self): - """ - return a dict that links hosts moid to its tags - """ - tags = {} - if self.fetch_tags: - tags = yield self.tags - tags = tags['hosts'] - return tags - - @run_once_property - @defer.inlineCallbacks - def datastore_tags(self): - """ - return a dict that links datastore moid to its tags - """ - tags = {} - if self.fetch_tags: - tags = yield self.tags - tags = tags['datastores'] - return tags - @run_once_property @defer.inlineCallbacks def vm_labels(self): - virtual_machines, host_labels = yield parallelize(self.vm_inventory, self.host_labels) labels = {} @@ -1093,39 +473,9 @@ def vm_labels(self): labels[moid] = [row['name']] - if 'summary.config.vmPathName' in row: - p = row['summary.config.vmPathName'] - if p.startswith('['): - p = p[1:p.find("]")] - else: - p = 'n/a' - - labels[moid] = labels[moid] + [p] - if host_moid in host_labels: labels[moid] = labels[moid] + host_labels[host_moid] - """ - this code was in vm_inventory before - but I have the feeling it is best placed here where - vms label values are handled - """ - labels_cnt = len(labels[moid]) - if self.fetch_tags: - labels_cnt += 1 - - if labels_cnt < len(self._labelNames['vms']): - logging.info( - "Only ${cnt}/{expected} labels (vm, host, dc, cluster) found, filling n/a" - .format( - cnt=labels_cnt, - expected=len(self._labelNames['vms']) - ) - ) - - for i in range(labels_cnt, len(self._labelNames['vms'])): - labels[moid].append('n/a') - return labels @run_once_property @@ -1169,89 +519,18 @@ def _vmware_full_snapshots_list(self, snapshots): snapshot.childSnapshotList) return snapshot_data - @defer.inlineCallbacks - def updateMetricsLabelNames(self, metrics, metric_types): - """ - by the time metrics are created, we have no clue what are gonna be the custom attributes - or even if they re gonna be fetched. - so after custom attributes are finally retrieved from the datacenter, - their labels need to be inserted inside the already defined metric labels. - to be possible, we previously had to store metric names and map'em by object type, vms, - hosts and datastores, and so its metrics, so as to gather everything here - """ - # Insert custom attributes names as metric labels - if self.fetch_custom_attributes: - - for metric_type in metric_types: - - customAttributesLabelNames = yield self.customAttributesLabelNames(metric_type) - - for metric_name in self._metricNames.get(metric_type, []): - metric = metrics.get(metric_name) - labelnames = metric._labelnames - metric._labelnames = labelnames[0:len(self._labelNames[metric_type])] - metric._labelnames += customAttributesLabelNames - metric._labelnames += labelnames[len(self._labelNames[metric_type]):] - metric._labelnames = list(map(lambda x: re.sub('[^a-zA-Z0-9_]', '_', x), metric._labelnames)) - @defer.inlineCallbacks def _vmware_get_datastores(self, ds_metrics): """ Get Datastore information """ - if self.fetch_tags: - """ - if we need the tags, we fetch'em here - """ - results, datastore_labels, datastore_tags = yield parallelize( - self.datastore_inventory, - self.datastore_labels, - self.datastore_tags - ) - else: - results, datastore_labels = yield parallelize(self.datastore_inventory, self.datastore_labels) - - """ - fetch custom attributes - """ - customAttributes = {} - customAttributesLabelNames = {} - if self.fetch_custom_attributes: - customAttributes = yield self.datastoresCustomAttributes - customAttributesLabelNames = yield self.datastoresCustomAttributesLabelNames - - """ - updates the datastore metric label names with custom attributes names - """ - self.updateMetricsLabelNames(ds_metrics, ['datastores']) + results, datastore_labels = yield parallelize(self.datastore_inventory, self.datastore_labels) for datastore_id, datastore in results.items(): try: name = datastore['name'] labels = datastore_labels[name] - - """ - insert the tags values if needed - if tags are empty they receive a 'n/a' - """ - if self.fetch_tags: - tags = datastore_tags.get(datastore_id, []) - tags = ','.join(tags) - if not tags: - tags = 'n/a' - - labels += [tags] - - """ - time to insert the custom attributes values in order - """ - customLabels = [] - for labelName in customAttributesLabelNames: - customLabels.append(customAttributes[datastore_id].get(labelName)) - - labels += customLabels - except KeyError as e: logging.info( "Key error, unable to register datastore {error}, datastores are {datastore_labels}".format( @@ -1260,29 +539,6 @@ def _vmware_get_datastores(self, ds_metrics): ) continue - """ - filter red and yellow alarms - """ - if self.fetch_alarms: - alarms = datastore.get('triggeredAlarmState').split(',') - alarms = [a for a in alarms if ':' in a] - - # Red alarms - red_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'red'] - red_alarms_label = ','.join(red_alarms) if red_alarms else 'n/a' - ds_metrics['vmware_datastore_red_alarms'].add_metric( - labels + [red_alarms_label], - len(red_alarms) - ) - - # Yellow alarms - yellow_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'yellow'] - yellow_alarms_label = ','.join(yellow_alarms) if yellow_alarms else 'n/a' - ds_metrics['vmware_datastore_yellow_alarms'].add_metric( - labels + [yellow_alarms_label], - len(yellow_alarms) - ) - ds_capacity = float(datastore.get('summary.capacity', 0)) ds_freespace = float(datastore.get('summary.freeSpace', 0)) ds_uncommitted = float(datastore.get('summary.uncommitted', 0)) @@ -1354,11 +610,7 @@ def _vmware_get_vm_perf_manager_metrics(self, vm_metrics): vm_metrics[p_metric] = GaugeMetricFamily( p_metric, p_metric, - labels=self._labelNames['vm_perf']) - """ - store perf metric name for later ;) - """ - self._metricNames['vm_perf'].append(p_metric) + labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']) metrics = [] metric_names = {} @@ -1371,11 +623,6 @@ def _vmware_get_vm_perf_manager_metrics(self, vm_metrics): )) metric_names[counter_key] = perf_metric_name - """ - updates vm perf metrics label names with vms custom attributes names - """ - self.updateMetricsLabelNames(vm_metrics, ['vm_perf']) - specs = [] for vm in virtual_machines.values(): if vm.get('runtime.powerState') != 'poweredOn': @@ -1390,7 +637,7 @@ def _vmware_get_vm_perf_manager_metrics(self, vm_metrics): content = yield self.content if len(specs) > 0: - chunks = [specs[x:x + self.specs_size] for x in range(0, len(specs), self.specs_size)] + chunks = [specs[x:x+self.specs_size] for x in range(0, len(specs), self.specs_size)] for list_specs in chunks: results, labels = yield parallelize( threads.deferToThread(content.perfManager.QueryStats, querySpec=list_specs), @@ -1446,8 +693,7 @@ def _vmware_get_host_perf_manager_metrics(self, host_metrics): host_metrics[p_metric] = GaugeMetricFamily( p_metric, p_metric, - labels=self._labelNames['host_perf']) - self._metricNames['host_perf'].append(p_metric) + labels=['host_name', 'dc_name', 'cluster_name']) metrics = [] metric_names = {} @@ -1460,9 +706,6 @@ def _vmware_get_host_perf_manager_metrics(self, host_metrics): )) metric_names[counter_key] = perf_metric_name - # Insert custom attributes names as metric labels - self.updateMetricsLabelNames(host_metrics, ['host_perf']) - specs = [] for host in host_systems.values(): if host.get('runtime.powerState') != 'poweredOn': @@ -1487,7 +730,7 @@ def _vmware_get_host_perf_manager_metrics(self, host_metrics): host_metrics[metric_names[metric.id.counterId]].add_metric( labels[ent.entity._moId], float(sum(metric.value)), - ) + ) logging.info('FIN: _vmware_get_host_perf_manager_metrics') @@ -1498,24 +741,7 @@ def _vmware_get_vms(self, metrics): """ logging.info("Starting vm metrics collection") - if self.fetch_tags: - virtual_machines, vm_labels, vm_tags = yield parallelize( - self.vm_inventory, - self.vm_labels, - self.vm_tags - ) - else: - virtual_machines, vm_labels = yield parallelize(self.vm_inventory, self.vm_labels) - - # fetch Custom Attributes Labels ("values") - customAttributes = {} - customAttributesLabelNames = {} - if self.fetch_custom_attributes: - customAttributes = yield self.vmsCustomAttributes - customAttributesLabelNames = yield self.customAttributesLabelNames('vms') - - # Insert custom attributes names as metric labels - self.updateMetricsLabelNames(metrics, ['vms', 'vmguests', 'snapshots']) + virtual_machines, vm_labels = yield parallelize(self.vm_inventory, self.vm_labels) for moid, row in virtual_machines.items(): # Ignore vm if field "runtime.host" does not exist @@ -1524,44 +750,13 @@ def _vmware_get_vms(self, metrics): continue labels = vm_labels[moid] + labels_cnt = len(labels) - customLabels = [] - for labelName in customAttributesLabelNames: - customLabels.append(customAttributes[moid].get(labelName)) - - if self.fetch_tags: - tags = vm_tags.get(moid, []) - tags = ','.join(tags) - if not tags: - tags = 'n/a' - - vm_labels[moid] += [tags] + customLabels - - else: - vm_labels[moid] += customLabels - - """ - filter red and yellow alarms - """ - if self.fetch_alarms and ('triggeredAlarmState' in row): - alarms = row.get('triggeredAlarmState').split(',') - alarms = [a for a in alarms if ':' in a] - - # Red alarms - red_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'red'] - red_alarms_label = ','.join(red_alarms) if red_alarms else 'n/a' - metrics['vmware_vm_red_alarms'].add_metric( - labels + [red_alarms_label], - len(red_alarms) - ) + if labels_cnt < 4: + logging.info("Only ${cnt}/4 labels (vm, host, dc, cluster) found, filling n/a".format(cnt=labels_cnt)) - # Yellow alarms - yellow_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'yellow'] - yellow_alarms_label = ','.join(yellow_alarms) if yellow_alarms else 'n/a' - metrics['vmware_vm_yellow_alarms'].add_metric( - labels + [yellow_alarms_label], - len(yellow_alarms) - ) + for i in range(labels_cnt, 4): + labels.append('n/a') if 'runtime.powerState' in row: power_state = 1 if row['runtime.powerState'] == 'poweredOn' else 0 @@ -1632,44 +827,11 @@ def _vmware_get_hosts(self, host_metrics): """ logging.info("Starting host metrics collection") - if self.fetch_tags: - results, host_labels, host_tags = yield parallelize( - self.host_system_inventory, - self.host_labels, - self.host_tags - ) - - else: - results, host_labels = yield parallelize(self.host_system_inventory, self.host_labels) - - # fetch Custom Attributes Labels ("values") - customAttributes = {} - customAttributesLabelNames = {} - if self.fetch_custom_attributes: - customAttributes = yield self.hostsCustomAttributes - customAttributesLabelNames = yield self.hostsCustomAttributesLabelNames - - # Insert custom attributes names as metric labels - self.updateMetricsLabelNames(host_metrics, ['hosts']) + results, host_labels = yield parallelize(self.host_system_inventory, self.host_labels) for host_id, host in results.items(): try: labels = host_labels[host_id] - - if self.fetch_tags: - tags = host_tags.get(host_id, []) - tags = ','.join(tags) - if not tags: - tags = 'n/a' - - labels += [tags] - - customLabels = [] - for labelName in customAttributesLabelNames: - customLabels.append(customAttributes[host_id].get(labelName)) - - labels += customLabels - except KeyError as e: logging.info( "Key error, unable to register host {error}, host labels are {host_labels}".format( @@ -1678,103 +840,6 @@ def _vmware_get_hosts(self, host_metrics): ) continue - """ - filter red and yellow alarms - """ - if self.fetch_alarms: - alarms = [a for a in host.get('triggeredAlarmState', '').split(',') if ':' in a] - - # Red alarms - red_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'red'] - red_alarms_label = ','.join(red_alarms) if red_alarms else 'n/a' - host_metrics['vmware_host_red_alarms'].add_metric( - labels + [red_alarms_label], - len(red_alarms) - ) - - # Yellow alarms - yellow_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'yellow'] - yellow_alarms_label = ','.join(yellow_alarms) if yellow_alarms else 'n/a' - host_metrics['vmware_host_yellow_alarms'].add_metric( - labels + [yellow_alarms_label], - len(yellow_alarms) - ) - - # Numeric Sensor Info - sensors = host.get('runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo', '').split(',') + \ - host.get('runtime.healthSystemRuntime.hardwareStatusInfo.cpuStatusInfo', '').split(',') + \ - host.get('runtime.healthSystemRuntime.hardwareStatusInfo.memoryStatusInfo', '').split(',') - - sensors = [s for s in sensors if ':' in s] - - for s in sensors: - sensor = dict(item.split("=") for item in re.split(r':(?=\w+=)', s)[1:]) - - if not all(key in sensor for key in ['sensorStatus', 'name', 'type', 'unit', 'value']): - continue - - sensor_status = { - 'red': 0, - 'yellow': 1, - 'green': 2, - 'unknown': 3, - }[sensor['sensorStatus'].lower()] - - host_metrics['vmware_host_sensor_state'].add_metric( - labels + [sensor['name'], sensor['type']], - sensor_status - ) - - # FAN speed - if sensor["unit"] == 'rpm': - host_metrics['vmware_host_sensor_fan'].add_metric( - labels + [sensor['name']], - int(sensor['value']) * (10 ** (int(sensor['unitModifier']))) - ) - - # Temperature - if sensor["unit"] == 'degrees c': - host_metrics['vmware_host_sensor_temperature'].add_metric( - labels + [sensor['name']], - int(sensor['value']) * (10 ** (int(sensor['unitModifier']))) - ) - - # Power Voltage - if sensor["unit"] == 'volts': - host_metrics['vmware_host_sensor_power_voltage'].add_metric( - labels + [sensor['name']], - int(sensor['value']) * (10 ** (int(sensor['unitModifier']))) - ) - - # Power Current - if sensor["unit"] == 'amps': - host_metrics['vmware_host_sensor_power_current'].add_metric( - labels + [sensor['name']], - int(sensor['value']) * (10 ** (int(sensor['unitModifier']))) - ) - - # Power Watt - if sensor["unit"] == 'watts': - host_metrics['vmware_host_sensor_power_watt'].add_metric( - labels + [sensor['name']], - int(sensor['value']) * (10 ** (int(sensor['unitModifier']))) - ) - - # Redundancy - if sensor["unit"] == 'redundancy-discrete': - host_metrics['vmware_host_sensor_redundancy'].add_metric( - labels + [sensor['name']], - int(sensor['value']) - ) - - # Standby Mode - standby_mode = 1 if host.get('runtime.standbyMode') == 'in' else 0 - standby_mode_state = host.get('runtime.standbyMode', 'unknown') - host_metrics['vmware_host_standby_mode'].add_metric( - labels + [standby_mode_state], - standby_mode - ) - # Power state power_state = 1 if host['runtime.powerState'] == 'poweredOn' else 0 host_metrics['vmware_host_power_state'].add_metric(labels, power_state) @@ -1786,23 +851,24 @@ def _vmware_get_hosts(self, host_metrics): 1 ) - # Host in maintenance mode? - if 'runtime.inMaintenanceMode' in host: - host_metrics['vmware_host_maintenance_mode'].add_metric( - labels, - host['runtime.inMaintenanceMode'] * 1, - ) - if not power_state: continue if host.get('runtime.bootTime'): + # Host uptime host_metrics['vmware_host_boot_timestamp_seconds'].add_metric( labels, self._to_epoch(host['runtime.bootTime']) ) + # Host in maintenance mode? + if 'runtime.inMaintenanceMode' in host: + host_metrics['vmware_host_maintenance_mode'].add_metric( + labels, + host['runtime.inMaintenanceMode'] * 1, + ) + # CPU Usage (in Mhz) if 'summary.quickStats.overallCpuUsage' in host: host_metrics['vmware_host_cpu_usage'].add_metric( @@ -1859,6 +925,7 @@ def collect(self): class VMWareMetricsResource(Resource): + isLeaf = True def __init__(self, args): @@ -1871,9 +938,7 @@ def __init__(self, args): def configure(self, args): if args.config_file: try: - with open(args.config_file) as cf: - self.config = yaml.load(cf, Loader=yaml.FullLoader) - + self.config = YamlConfig(args.config_file) if 'default' not in self.config.keys(): logging.error("Error, you must have a default section in config file (for now)") exit(1) @@ -1888,9 +953,6 @@ def configure(self, args): 'vsphere_password': os.environ.get('VSPHERE_PASSWORD'), 'ignore_ssl': get_bool_env('VSPHERE_IGNORE_SSL', False), 'specs_size': os.environ.get('VSPHERE_SPECS_SIZE', 5000), - 'fetch_custom_attributes': get_bool_env('VSPHERE_FETCH_CUSTOM_ATTRIBUTES', False), - 'fetch_tags': get_bool_env('VSPHERE_FETCH_TAGS', False), - 'fetch_alarms': get_bool_env('VSPHERE_FETCH_ALARMS', False), 'collect_only': { 'vms': get_bool_env('VSPHERE_COLLECT_VMS', True), 'vmguests': get_bool_env('VSPHERE_COLLECT_VMGUESTS', True), @@ -1915,9 +977,6 @@ def configure(self, args): 'vsphere_password': os.environ.get('VSPHERE_{}_PASSWORD'.format(section)), 'ignore_ssl': get_bool_env('VSPHERE_{}_IGNORE_SSL'.format(section), False), 'specs_size': os.environ.get('VSPHERE_{}_SPECS_SIZE'.format(section), 5000), - 'fetch_custom_attributes': get_bool_env('VSPHERE_{}_FETCH_CUSTOM_ATTRIBUTES'.format(section), False), - 'fetch_tags': get_bool_env('VSPHERE_{}_FETCH_TAGS'.format(section), False), - 'fetch_alarms': get_bool_env('VSPHERE_{}_FETCH_ALARMS'.format(section), False), 'collect_only': { 'vms': get_bool_env('VSPHERE_{}_COLLECT_VMS'.format(section), True), 'vmguests': get_bool_env('VSPHERE_{}_COLLECT_VMGUESTS'.format(section), True), @@ -1973,10 +1032,7 @@ def generate_latest_metrics(self, request): self.config[section]['vsphere_password'], self.config[section]['collect_only'], self.config[section]['specs_size'], - self.config[section]['fetch_custom_attributes'], self.config[section]['ignore_ssl'], - self.config[section]['fetch_tags'], - self.config[section]['fetch_alarms'], ) metrics = yield collector.collect() @@ -1991,6 +1047,7 @@ def generate_latest_metrics(self, request): class HealthzResource(Resource): + isLeaf = True def render_GET(self, request): @@ -2034,15 +1091,10 @@ def main(argv=None): parser = argparse.ArgumentParser(description='VMWare metrics exporter for Prometheus') parser.add_argument('-c', '--config', dest='config_file', default=None, help="configuration file") - parser.add_argument('-a', '--address', dest='address', type=str, - default='', help="HTTP address to expose metrics") parser.add_argument('-p', '--port', dest='port', type=int, default=9272, help="HTTP port to expose metrics") parser.add_argument('-l', '--loglevel', dest='loglevel', default="INFO", help="Set application loglevel INFO, DEBUG") - parser.add_argument('-v', '--version', action="version", - version='vmware_exporter {version}'.format(version=__version__), - help='Print version and exit') args = parser.parse_args(argv or sys.argv[1:]) @@ -2054,8 +1106,8 @@ def main(argv=None): reactor.suggestThreadPoolSize(25) factory = Site(registerEndpoints(args)) - logging.info("Starting web server on port {address}:{port}".format(address=args.address, port=args.port)) - endpoint = endpoints.TCP4ServerEndpoint(reactor, args.port, interface=args.address) + logging.info("Starting web server on port {port}".format(port=args.port)) + endpoint = endpoints.TCP4ServerEndpoint(reactor, args.port) endpoint.listen(factory) reactor.run() From 0f268133ffa173260302b0855aee23ee9fbe9936 Mon Sep 17 00:00:00 2001 From: Massimiliano Ribuoli Date: Mon, 27 Oct 2025 19:48:55 +0100 Subject: [PATCH 5/5] Move to 0.18.4 --- .flake8 | 1 + .pre-commit-config.yaml | 11 +- CHANGELOG.md | 50 ++ Dockerfile | 4 +- README.md | 78 +- catalog-info.yaml | 10 + dashboards/cluster.json | 52 +- dashboards/esx.json | 41 +- dashboards/esxi.json | 41 +- dashboards/virtualmachine.json | 50 +- openshift/README.md | 27 + openshift/configmap.yaml | 19 + openshift/deployment.yaml | 47 ++ openshift/rolebinding.yaml | 30 + openshift/service.yaml | 17 + openshift/servicemonitor.yaml | 19 + requirements-tests.txt | 11 +- requirements.txt | 2 +- setup.cfg | 1 + tests/unit/test_helpers.py | 15 + tests/unit/test_vmware_exporter.py | 411 ++++++++++- validate-signature.rb | 19 + vmware_exporter/__init__.py | 2 +- vmware_exporter/defer.py | 3 +- vmware_exporter/helpers.py | 97 ++- vmware_exporter/vmware_exporter.py | 1087 ++++++++++++++++++++++++++-- 26 files changed, 1939 insertions(+), 206 deletions(-) create mode 100644 catalog-info.yaml create mode 100644 openshift/README.md create mode 100644 openshift/configmap.yaml create mode 100644 openshift/deployment.yaml create mode 100644 openshift/rolebinding.yaml create mode 100644 openshift/service.yaml create mode 100644 openshift/servicemonitor.yaml create mode 100644 validate-signature.rb diff --git a/.flake8 b/.flake8 index 6deafc2..e7c5cb3 100644 --- a/.flake8 +++ b/.flake8 @@ -1,2 +1,3 @@ [flake8] +ignore = E402 max-line-length = 120 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e736368..37e87b2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,8 @@ repos: +- repo: https://github.com/pre-commit/mirrors-autopep8 + rev: v1.5.6 # Use the sha / tag you want to point at + hooks: + - id: autopep8 - repo: https://github.com/pre-commit/pre-commit-hooks rev: v1.3.0 hooks: @@ -12,6 +16,8 @@ repos: stages: [commit] - id: detect-aws-credentials stages: [commit] + args: + - --allow-missing-credentials # Generic file state - id: trailing-whitespace stages: [commit] @@ -34,13 +40,14 @@ repos: - id: flake8 stages: [commit] args: - - --ignore=F705,E123 + - --ignore=F705,E123,E402 - repo: https://github.com/pryorda/dockerfilelint-precommit-hooks rev: v0.1.0 hooks: - id: dockerfilelint stages: [commit] - repo: https://github.com/mattlqx/pre-commit-sign - rev: v1.1.1 + rev: v1.1.3 hooks: - id: sign-commit + stages: [commit-msg] diff --git a/CHANGELOG.md b/CHANGELOG.md index e3af55b..5c351d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,56 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/) and [Keep a changelog](https://github.com/olivierlacan/keep-a-changelog). + + +## v0.18.4 (2022-10-11) +### Fix +* **update_dashboards:** Updating datasource and adding prefix ([`a42968f`](https://github.com/pryorda/vmware_exporter/commit/a42968f0cb87598558f48f99d5341a36ab1175f1)) + +## v0.18.3 (2022-03-25) +### Fix +* **empty_string:** #294 ([`a806b1d`](https://github.com/pryorda/vmware_exporter/commit/a806b1da9f65c965769903ad5691ec1449965ddd)) + +## v0.18.2 (2021-09-26) +### Fix +* **fix_image:** Adding dhub automation - fix image ([#293](https://github.com/pryorda/vmware_exporter/issues/293)) ([`1b8bd18`](https://github.com/pryorda/vmware_exporter/commit/1b8bd18c22613582bdcbd1a5f488ca2f63b1e364)) + +## v0.18.1 (2021-09-26) +### Fix +* **fix_tag:** Adding dhub automation - fix tag ([#292](https://github.com/pryorda/vmware_exporter/issues/292)) ([`c3d7830`](https://github.com/pryorda/vmware_exporter/commit/c3d7830ea92567c21b5e5db51ead6ad3983c4082)) + +## v0.18.0 (2021-09-26) +### Feature +* **adding_dhub_automation:** Adding dhub automation ([#291](https://github.com/pryorda/vmware_exporter/issues/291)) ([`ba56f30`](https://github.com/pryorda/vmware_exporter/commit/ba56f300d1d2c2e7439e1f3406aada1e0111ed34)) + +## v0.17.1 (2021-08-19) +### Fix +* **adding_version:** Adding version cli ([`f83b058`](https://github.com/pryorda/vmware_exporter/commit/f83b0580f58bc2d3c7d53f99194d03ef02a02758)) + +## v0.17.0 (2021-08-19) +### Feature +* **add_vm_ds:** Adding vm datastore. ([`16c8604`](https://github.com/pryorda/vmware_exporter/commit/16c8604ef4e6c77d1eb5f1876ead544fde540967)) + +## v0.16.1 (2021-06-10) +### Fix +* **fixing_sensor:** Fix for badly behaving super-micro sensor #271 ([`2d5c196`](https://github.com/pryorda/vmware_exporter/commit/2d5c1965ec21ee6afc1d9ff3063bea3ca93bd99d)) + +## v0.16.0 (2021-03-30) +### Feature +* **adding_signature_validation:** Adding Validation for signatures. ([`72430d9`](https://github.com/pryorda/vmware_exporter/commit/72430d91f181b17c977aecb9b1fda90ef83bd4ee)) + +## v0.15.1 (2021-03-30) +### Fix +* **fix_sensor_lookup:** Fixing sensor lookup ([#262](https://github.com/pryorda/vmware_exporter/issues/262)) ([`e97c855`](https://github.com/pryorda/vmware_exporter/commit/e97c855581a4e8db8804c542aaece62b3d85081b)) + +## v0.15.0 (2021-03-29) +### Feature +* **sensors:** Adding sensor metrics ([`da2f489`](https://github.com/pryorda/vmware_exporter/commit/da2f48929fc8e377202c4e193d2d4836e4d90a38)) + +## v0.14.3 (2021-03-11) +### Fix +* **optimize_build:** Remove travis, add ifs to action ([#254](https://github.com/pryorda/vmware_exporter/issues/254)) ([`43d6556`](https://github.com/pryorda/vmware_exporter/commit/43d6556556171b3ada6804a29aaff4710e511094)) + ## [0.4.2] - 2018-01-02 ## Fixed - [#60](https://github.com/pryorda/vmware_exporter/pull/60) Typo Fix diff --git a/Dockerfile b/Dockerfile index a0e7951..4ade0e9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.6-alpine +FROM python:3.7-alpine LABEL MAINTAINER="Daniel Pryor " LABEL NAME=vmware_exporter @@ -6,7 +6,7 @@ LABEL NAME=vmware_exporter WORKDIR /opt/vmware_exporter/ COPY . /opt/vmware_exporter/ -RUN set -x; buildDeps="gcc python-dev musl-dev libffi-dev openssl openssl-dev" \ +RUN set -x; buildDeps="gcc python3-dev musl-dev libffi-dev openssl openssl-dev rust cargo" \ && apk add --no-cache --update $buildDeps \ && pip install -r requirements.txt . \ && apk del $buildDeps diff --git a/README.md b/README.md index 24debc0..4ae89b3 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,19 @@ Alternatively, if you don't wish to install the package, run it using `$ vmware_ docker run -it --rm -p 9272:9272 -e VSPHERE_USER=${VSPHERE_USERNAME} -e VSPHERE_PASSWORD=${VSPHERE_PASSWORD} -e VSPHERE_HOST=${VSPHERE_HOST} -e VSPHERE_IGNORE_SSL=True -e VSPHERE_SPECS_SIZE=2000 --name vmware_exporter pryorda/vmware_exporter ``` +When using containers combined with `--env-file` flag, please use capital letters to set bolleans, for example: + +``` +$ podman run -it --rm -p 9272:9272 --name vmware_exporter --env-file config.env pryorda/vmware_exporter +$ cat config.env +VSPHERE_USER=administrator@vsphere.my.domain.com +VSPHERE_PASSWORD=Secure-Pass +VSPHERE_HOST=192.168.0.1 +VSPHERE_IGNORE_SSL=TRUE +VSPHERE_SPECS_SIZE=2000 +``` + + ### Configuration and limiting data collection Only provide a configuration file if enviroment variables are not used. If you do plan to use a configuration file, be sure to override the container entrypoint or add -c config.yml to the command arguments. @@ -55,6 +68,9 @@ default: vsphere_password: "password" ignore_ssl: False specs_size: 5000 + fetch_custom_attributes: True + fetch_tags: True + fetch_alarms: True collect_only: vms: True vmguests: True @@ -68,6 +84,9 @@ esx: vsphere_password: 'password' ignore_ssl: True specs_size: 5000 + fetch_custom_attributes: True + fetch_tags: True + fetch_alarms: True collect_only: vms: False vmguests: True @@ -81,6 +100,9 @@ limited: vsphere_password: 'password' ignore_ssl: True specs_size: 5000 + fetch_custom_attributes: True + fetch_tags: True + fetch_alarms: False collect_only: vms: False vmguests: False @@ -92,33 +114,39 @@ limited: Switching sections can be done by adding ?section=limited to the URL. #### Environment Variables -| Variable | Precedence | Defaults | Description | -| ---------------------------- | ---------------------- | -------- | --------------------------------------- | -| `VSPHERE_HOST` | config, env, get_param | n/a | vsphere server to connect to | -| `VSPHERE_USER` | config, env | n/a | User for connecting to vsphere | -| `VSPHERE_PASSWORD` | config, env | n/a | Password for connecting to vsphere | -| `VSPHERE_SPECS_SIZE` | config, env | 5000 | Size of specs list for query stats function | -| `VSPHERE_IGNORE_SSL` | config, env | False | Ignore the ssl cert on the connection to vsphere host | -| `VSPHERE_COLLECT_HOSTS` | config, env | True | Set to false to disable collection of host metrics | -| `VSPHERE_COLLECT_DATASTORES` | config, env | True | Set to false to disable collection of datastore metrics | -| `VSPHERE_COLLECT_VMS` | config, env | True | Set to false to disable collection of virtual machine metrics | -| `VSPHERE_COLLECT_VMGUESTS` | config, env | True | Set to false to disable collection of virtual machine guest metrics | -| `VSPHERE_COLLECT_SNAPSHOTS` | config, env | True | Set to false to disable collection of snapshot metrics | +| Variable | Precedence | Defaults | Description | +| --------------------------------------| ---------------------- | -------- | --------------------------------------------------------------------------| +| `VSPHERE_HOST` | config, env, get_param | n/a | vsphere server to connect to | +| `VSPHERE_USER` | config, env | n/a | User for connecting to vsphere | +| `VSPHERE_PASSWORD` | config, env | n/a | Password for connecting to vsphere | +| `VSPHERE_SPECS_SIZE` | config, env | 5000 | Size of specs list for query stats function | +| `VSPHERE_IGNORE_SSL` | config, env | False | Ignore the ssl cert on the connection to vsphere host | +| `VSPHERE_FETCH_CUSTOM_ATTRIBUTES` | config, env | False | Set to true to collect objects custom attributes as metric labels | +| `VSPHERE_FETCH_TAGS` | config, env | False | Set to true to collect objects tags as metric labels | +| `VSPHERE_FETCH_ALARMS` | config, env | False | Fetch objects triggered alarms, and in case of hosts hdw alarms as well | +| `VSPHERE_COLLECT_HOSTS` | config, env | True | Set to false to disable collection of host metrics | +| `VSPHERE_COLLECT_DATASTORES` | config, env | True | Set to false to disable collection of datastore metrics | +| `VSPHERE_COLLECT_VMS` | config, env | True | Set to false to disable collection of virtual machine metrics | +| `VSPHERE_COLLECT_VMGUESTS` | config, env | True | Set to false to disable collection of virtual machine guest metrics | +| `VSPHERE_COLLECT_SNAPSHOTS` | config, env | True | Set to false to disable collection of snapshot metrics | You can create new sections as well, with very similiar variables. For example, to create a `limited` section you can set: -| Variable | Precedence | Defaults | Description | -| ---------------------------- | ---------------------- | -------- | --------------------------------------- | -| `VSPHERE_LIMITED_HOST` | config, env, get_param | n/a | vsphere server to connect to | -| `VSPHERE_LIMITED_USER` | config, env | n/a | User for connecting to vsphere | -| `VSPHERE_LIMITED_PASSWORD` | config, env | n/a | Password for connecting to vsphere | -| `VSPHERE_LIMITED_SPECS_SIZE` | config, env | 5000 | Size of specs list for query stats function | -| `VSPHERE_LIMITED_IGNORE_SSL` | config, env | False | Ignore the ssl cert on the connection to vsphere host | -| `VSPHERE_LIMITED_COLLECT_HOSTS` | config, env | True | Set to false to disable collection of host metrics | -| `VSPHERE_LIMITED_COLLECT_DATASTORES` | config, env | True | Set to false to disable collection of datastore metrics | -| `VSPHERE_LIMITED_COLLECT_VMS` | config, env | True | Set to false to disable collection of virtual machine metrics | -| `VSPHERE_LIMITED_COLLECT_VMGUESTS` | config, env | True | Set to false to disable collection of virtual machine guest metrics | -| `VSPHERE_LIMITED_COLLECT_SNAPSHOTS` | config, env | True | Set to false to disable collection of snapshot metrics | +| Variable | Precedence | Defaults | Description | +| ----------------------------------------------| ---------------------- | -------- | --------------------------------------------------------------------------| +| `VSPHERE_LIMITED_HOST` | config, env, get_param | n/a | vsphere server to connect to | +| `VSPHERE_LIMITED_USER` | config, env | n/a | User for connecting to vsphere | +| `VSPHERE_LIMITED_PASSWORD` | config, env | n/a | Password for connecting to vsphere | +| `VSPHERE_LIMITED_SPECS_SIZE` | config, env | 5000 | Size of specs list for query stats function | +| `VSPHERE_LIMITED_IGNORE_SSL` | config, env | False | Ignore the ssl cert on the connection to vsphere host | +| `VSPHERE_LIMITED_FETCH_CUSTOM_ATTRIBUTES` | config, env | False | Set to true to collect objects custom attributes as metric labels | +| `VSPHERE_LIMITED_FETCH_TAGS` | config, env | False | Set to true to collect objects tags as metric labels | +| `VSPHERE_LIMITED_FETCH_ALARMS` | config, env | False | Fetch objects triggered alarms, and in case of hosts hdw alarms as well | +| `VSPHERE_LIMITED_COLLECT_HOSTS` | config, env | True | Set to false to disable collection of host metrics | +| `VSPHERE_LIMITED_COLLECT_DATASTORES` | config, env | True | Set to false to disable collection of datastore metrics | +| `VSPHERE_LIMITED_COLLECT_VMS` | config, env | True | Set to false to disable collection of virtual machine metrics | +| `VSPHERE_LIMITED_COLLECT_VMGUESTS` | config, env | True | Set to false to disable collection of virtual machine guest metrics | +| `VSPHERE_LIMITED_COLLECT_SNAPSHOTS` | config, env | True | Set to false to disable collection of snapshot metrics | You need to set at least `VSPHERE_SECTIONNAME_USER` for the section to be detected. @@ -243,3 +271,5 @@ Daniel Pryor [pryorda](https://github.com/pryorda) ## License See LICENSE file + +[![Known Vulnerabilities](https://snyk.io/test/github/rmontenegroo/vmware_exporter/badge.svg?targetFile=requirements.txt)](https://snyk.io/test/github/rmontenegroo/vmware_exporter?targetFile=requirements.txt) diff --git a/catalog-info.yaml b/catalog-info.yaml new file mode 100644 index 0000000..3aa0124 --- /dev/null +++ b/catalog-info.yaml @@ -0,0 +1,10 @@ +apiVersion: backstage.io/v1alpha1 +kind: Component +metadata: + name: vmware_exporter + annotations: + github.com/project-slug: pryorda/vmware_exporter +spec: + type: service + lifecycle: unknown + owner: production-engineering diff --git a/dashboards/cluster.json b/dashboards/cluster.json index b51a28a..d978f2e 100644 --- a/dashboards/cluster.json +++ b/dashboards/cluster.json @@ -31,7 +31,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "short", "gauge": { "maxValue": 100, @@ -112,7 +112,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "hertz", "gauge": { "maxValue": 100, @@ -193,7 +193,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "decmbytes", "gauge": { "maxValue": 100, @@ -274,7 +274,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "none", "gauge": { "maxValue": 100, @@ -355,7 +355,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "none", "gauge": { "maxValue": 100, @@ -439,7 +439,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "gridPos": { "h": 5, "w": 4, @@ -504,7 +504,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "gridPos": { "h": 5, "w": 4, @@ -569,7 +569,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "gridPos": { "h": 5, "w": 4, @@ -638,7 +638,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -732,7 +732,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "percent", "gauge": { "maxValue": 100, @@ -807,7 +807,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "gridPos": { "h": 5, "w": 4, @@ -872,7 +872,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "gridPos": { "h": 5, "w": 4, @@ -941,7 +941,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1032,7 +1032,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1124,7 +1124,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1216,7 +1216,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1307,7 +1307,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1399,7 +1399,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1490,7 +1490,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1582,7 +1582,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1674,7 +1674,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1765,7 +1765,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1856,7 +1856,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1944,7 +1944,9 @@ ], "schemaVersion": 20, "style": "dark", - "tags": [], + "tags": [ + "vmware" + ], "templating": { "list": [ { @@ -1972,7 +1974,7 @@ "cluster1" ] }, - "datasource": "Prometheus", + "datasource": "$datasource", "definition": "label_values(cluster_name)", "hide": 0, "includeAll": false, diff --git a/dashboards/esx.json b/dashboards/esx.json index 7493592..505b93f 100644 --- a/dashboards/esx.json +++ b/dashboards/esx.json @@ -1,7 +1,7 @@ { "__inputs": [ { - "name": "DS_PROMETHEUS", + "name": "datasource", "label": "prometheus", "description": "", "type": "datasource", @@ -59,7 +59,7 @@ "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], - "datasource": "${DS_PROMETHEUS}", + "datasource": "${datasource}", "decimals": 1, "description": "System uptime", "format": "s", @@ -139,7 +139,7 @@ "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], - "datasource": "${DS_PROMETHEUS}", + "datasource": "${datasource}", "format": "percent", "gauge": { "maxValue": 100, @@ -218,7 +218,7 @@ "rgba(237, 129, 40, 0.89)", "rgba(245, 54, 54, 0.9)" ], - "datasource": "${DS_PROMETHEUS}", + "datasource": "${datasource}", "format": "percent", "gauge": { "maxValue": 100, @@ -296,7 +296,7 @@ "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], - "datasource": "${DS_PROMETHEUS}", + "datasource": "${datasource}", "format": "none", "gauge": { "maxValue": 100, @@ -385,7 +385,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "${datasource}", "decimals": 1, "fill": 0, "id": 1, @@ -468,7 +468,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", + "datasource": "${datasource}", "decimals": 1, "fill": 1, "id": 2, @@ -557,13 +557,33 @@ ], "schemaVersion": 14, "style": "dark", - "tags": [], + "tags": [ + "vmware", + "esx" + ], "templating": { "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "includeAll": false, + "label": "Datasource", + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, { "allValue": null, "current": {}, - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", "hide": 0, "includeAll": false, "label": "Host:", @@ -612,6 +632,7 @@ ] }, "timezone": "browser", - "title": "ESX Hosts Information", + "title": "VMware ESX Hosts Information", + "uid": "ed9d4bbf8801a8f79194b2ce6ead0ffcb8f9952a", "version": 17 } \ No newline at end of file diff --git a/dashboards/esxi.json b/dashboards/esxi.json index 0baa91f..6c5e527 100644 --- a/dashboards/esxi.json +++ b/dashboards/esxi.json @@ -31,7 +31,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "short", "gauge": { "maxValue": 100, @@ -114,7 +114,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "none", "gauge": { "maxValue": 100, @@ -197,7 +197,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "none", "gauge": { "maxValue": 100, @@ -280,7 +280,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "none", "gauge": { "maxValue": 100, @@ -363,7 +363,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "none", "gauge": { "maxValue": 100, @@ -446,7 +446,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "s", "gauge": { "maxValue": 100, @@ -528,7 +528,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "short", "gauge": { "maxValue": 100, @@ -610,7 +610,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "decmbytes", "gauge": { "maxValue": 100, @@ -685,7 +685,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "gridPos": { "h": 4, "w": 4, @@ -750,7 +750,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "gridPos": { "h": 4, "w": 4, @@ -813,7 +813,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "gridPos": { "h": 4, "w": 4, @@ -885,7 +885,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "percent", "gauge": { "maxValue": 100, @@ -964,7 +964,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1060,7 +1060,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1163,7 +1163,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1261,7 +1261,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1367,7 +1367,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1491,7 +1491,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1600,7 +1600,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1701,7 +1701,6 @@ "schemaVersion": 20, "style": "dark", "tags": [ - "prometheus", "vmware", "esxi" ], @@ -1730,7 +1729,7 @@ "text": "192.168.0.27", "value": "192.168.0.27" }, - "datasource": "Prometheus", + "datasource": "$datasource", "definition": "", "hide": 0, "includeAll": false, diff --git a/dashboards/virtualmachine.json b/dashboards/virtualmachine.json index d3b51c5..94dc1f1 100644 --- a/dashboards/virtualmachine.json +++ b/dashboards/virtualmachine.json @@ -31,7 +31,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "none", "gauge": { "maxValue": 100, @@ -114,7 +114,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "none", "gauge": { "maxValue": 100, @@ -197,7 +197,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "none", "gauge": { "maxValue": 100, @@ -284,7 +284,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "none", "gauge": { "maxValue": 100, @@ -371,7 +371,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "short", "gauge": { "maxValue": 100, @@ -459,7 +459,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "none", "gauge": { "maxValue": 100, @@ -542,7 +542,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "short", "gauge": { "maxValue": 100, @@ -617,7 +617,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "gridPos": { "h": 5, "w": 3, @@ -682,7 +682,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "gridPos": { "h": 5, "w": 3, @@ -747,7 +747,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "gridPos": { "h": 5, "w": 3, @@ -819,7 +819,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "decmbytes", "gauge": { "maxValue": 100, @@ -901,7 +901,7 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "datasource": "Prometheus", + "datasource": "$datasource", "format": "percent", "gauge": { "maxValue": 100, @@ -976,7 +976,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "gridPos": { "h": 5, "w": 3, @@ -1042,7 +1042,7 @@ }, { "cacheTimeout": null, - "datasource": "Prometheus", + "datasource": "$datasource", "gridPos": { "h": 5, "w": 3, @@ -1112,7 +1112,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1208,7 +1208,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1304,7 +1304,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1395,7 +1395,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1491,7 +1491,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1613,7 +1613,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "decimals": 1, "fill": 1, "fillGradient": 0, @@ -1710,7 +1710,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1811,7 +1811,7 @@ "cacheTimeout": null, "dashLength": 10, "dashes": false, - "datasource": "Prometheus", + "datasource": "$datasource", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1899,7 +1899,9 @@ ], "schemaVersion": 20, "style": "dark", - "tags": [], + "tags": [ + "vmware" + ], "templating": { "list": [ { @@ -1925,7 +1927,7 @@ "text": "centos-dhcp", "value": "centos-dhcp" }, - "datasource": "Prometheus", + "datasource": "$datasource", "definition": "label_values(vm_name)", "hide": 0, "includeAll": false, diff --git a/openshift/README.md b/openshift/README.md new file mode 100644 index 0000000..fed5ca0 --- /dev/null +++ b/openshift/README.md @@ -0,0 +1,27 @@ +### Installing vmware_exporter in OpenShift + +Create the secret as described in the kubernetes documentation + +TODO: Use existing secret +``` +read -s VSPHERE_PASSWORD +oc create secret generic -n openshift-vsphere-infra vmware-exporter-password --from-literal=VSPHERE_PASSWORD=$VSPHERE_PASSWORD +``` + +Modify the `configmap.yaml` for your configuration and apply. + +``` +oc apply -f configmap.yaml +``` + +Apply the role, rolebinding, service, deployment and ServiceMonitor + +``` +oc apply -f rolebinding.yaml +oc apply -f service.yaml +oc apply -f deployment.yaml +oc apply -f servicemonitor.yaml +``` + + + diff --git a/openshift/configmap.yaml b/openshift/configmap.yaml new file mode 100644 index 0000000..ec2b137 --- /dev/null +++ b/openshift/configmap.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +data: + VSPHERE_COLLECT_DATASTORES: "True" + VSPHERE_COLLECT_HOSTS: "True" + VSPHERE_COLLECT_SNAPSHOTS: "False" + VSPHERE_COLLECT_VMGUESTS: "True" + VSPHERE_COLLECT_VMS: "True" + VSPHERE_FETCH_ALARMS: "True" + VSPHERE_FETCH_CUSTOM_ATTRIBUTES: "True" + VSPHERE_FETCH_TAGS: "True" + VSPHERE_HOST: vcenter + VSPHERE_IGNORE_SSL: "True" + VSPHERE_USER: user +kind: ConfigMap +metadata: + labels: + app: vmware-exporter + name: vmware-exporter-config + namespace: openshift-vsphere-infra diff --git a/openshift/deployment.yaml b/openshift/deployment.yaml new file mode 100644 index 0000000..8ea7b20 --- /dev/null +++ b/openshift/deployment.yaml @@ -0,0 +1,47 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vmware-exporter + namespace: openshift-vsphere-infra +spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app: vmware-exporter + k8s-app: vmware-exporter + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: vmware-exporter + k8s-app: vmware-exporter + release: vmware-exporter + spec: + containers: + - envFrom: + - configMapRef: + name: vmware-exporter-config + - secretRef: + name: vmware-exporter-password + image: quay.io/jcallen/vmware_exporter:add_metrics + imagePullPolicy: Always + name: vmware-exporter + ports: + - containerPort: 9272 + name: http + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 diff --git a/openshift/rolebinding.yaml b/openshift/rolebinding.yaml new file mode 100644 index 0000000..4e47a04 --- /dev/null +++ b/openshift/rolebinding.yaml @@ -0,0 +1,30 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: prometheus-k8s + namespace: openshift-vsphere-infra +rules: + - verbs: + - get + - list + - watch + apiGroups: + - '' + resources: + - services + - endpoints + - pods +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: prometheus-k8s + namespace: openshift-vsphere-infra +subjects: + - kind: ServiceAccount + name: prometheus-k8s + namespace: openshift-monitoring +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-k8s diff --git a/openshift/service.yaml b/openshift/service.yaml new file mode 100644 index 0000000..23a1de1 --- /dev/null +++ b/openshift/service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: vmware-exporter + name: metrics + namespace: openshift-vsphere-infra +spec: + ports: + - name: metrics + port: 9272 + protocol: TCP + targetPort: 9272 + selector: + k8s-app: vmware-exporter + sessionAffinity: None + type: ClusterIP diff --git a/openshift/servicemonitor.yaml b/openshift/servicemonitor.yaml new file mode 100644 index 0000000..f8cdf20 --- /dev/null +++ b/openshift/servicemonitor.yaml @@ -0,0 +1,19 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + k8s-app: vmware-exporter + name: vmware + namespace: openshift-monitoring +spec: + endpoints: + - interval: 30s + port: metrics + scheme: http + jobLabel: app + namespaceSelector: + matchNames: + - openshift-vsphere-infra + selector: + matchLabels: + k8s-app: vmware-exporter diff --git a/requirements-tests.txt b/requirements-tests.txt index 7b75bfc..3e3cc7a 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -1,6 +1,7 @@ pytest_docker_tools==0.2.0 -pytest==3.3 -pytest-cov==2.6.0 -pytest-twisted==1.8 -codecov==2.0.15 -flake8==3.6.0 +pytest==5.4.1 +pytest-cov==2.8.1 +pytest-twisted==1.12 +codecov==2.0.17 +flake8>=3.6.0 +pyflakes>=1.5.0 diff --git a/requirements.txt b/requirements.txt index a5b7bd8..d924920 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,5 +2,5 @@ prometheus-client==0.0.19 pytz pyvmomi>=6.5 twisted>=14.0.2 -yamlconfig +pyyaml>=5.1 service-identity diff --git a/setup.cfg b/setup.cfg index 9e6033b..41a4f67 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,3 @@ [semantic_release] version_variable = vmware_exporter/__init__.py:__version__ +branch = main diff --git a/tests/unit/test_helpers.py b/tests/unit/test_helpers.py index 87c4ece..14d98ba 100644 --- a/tests/unit/test_helpers.py +++ b/tests/unit/test_helpers.py @@ -45,6 +45,21 @@ def test_batch_fetch_properties(): # but the real return value has methods with side effects. So we need to use a fake. content.viewManager.CreateContainerView.return_value = FakeView() + mockCustomField1 = mock.Mock() + mockCustomField1.key = 1 + mockCustomField1.name = 'customAttribute1' + mockCustomField1.managedObjectType = vim.Datastore + + mockCustomField2 = mock.Mock() + mockCustomField2.key = 2 + mockCustomField2.name = 'customAttribute2' + mockCustomField1.managedObjectType = vim.VirtualMachine + + content.customFieldsManager.field = [ + mockCustomField1, + mockCustomField2, + ] + prop1 = mock.Mock() prop1.name = 'someprop' prop1.val = 1 diff --git a/tests/unit/test_vmware_exporter.py b/tests/unit/test_vmware_exporter.py index d26dca8..9aa54a9 100644 --- a/tests/unit/test_vmware_exporter.py +++ b/tests/unit/test_vmware_exporter.py @@ -72,6 +72,9 @@ def test_collect_vms(): 'password', collect_only, 5000, + False, + True, + False ) collector.content = _succeed(mock.Mock()) @@ -87,6 +90,7 @@ def test_collect_vms(): 'summary.config.memorySizeMB': 1024, 'runtime.maxCpuUsage': 2400, 'summary.config.template': False, + 'summary.config.vmPathName': '[datastore-1] vm-1/vm-1.vmx', 'runtime.bootTime': boot_time, 'snapshot': snapshot, 'guest.disk': [disk], @@ -95,7 +99,7 @@ def test_collect_vms(): 'guest.toolsVersionStatus2': 'guestToolsUnmanaged', } }) - assert collector.vm_labels.result == {'vm-1': ['vm-1']} + assert collector.vm_labels.result == {'vm-1': ['vm-1', 'datastore-1', 'n/a', 'n/a', 'n/a']} # Test template True @@ -105,6 +109,9 @@ def test_collect_vms(): 'password', collect_only, 5000, + False, + True, + False ) collector.content = _succeed(mock.Mock()) @@ -123,6 +130,7 @@ def test_collect_vms(): 'summary.config.numCpu': 1, 'summary.config.memorySizeMB': 1024, 'summary.config.template': True, + 'summary.config.vmPathName': '[datastore-1] vm-1/vm-1.vmx', 'runtime.bootTime': boot_time, 'snapshot': snapshot, 'guest.disk': [disk], @@ -134,8 +142,8 @@ def test_collect_vms(): yield collector._vmware_get_vms(metrics) assert _check_properties(batch_fetch_properties.call_args[0][1]) assert collector.vm_labels.result == { - 'vm-1': ['vm-1', 'host-1', 'dc', 'cluster-1'], - } + 'vm-1': ['vm-1', 'datastore-1', 'host-1', 'dc', 'cluster-1'], + } assert metrics['vmware_vm_template'].samples[0][2] == 1.0 @@ -147,6 +155,9 @@ def test_collect_vms(): 'password', collect_only, 5000, + False, + True, + False ) collector.content = _succeed(mock.Mock()) @@ -166,6 +177,7 @@ def test_collect_vms(): 'summary.config.memorySizeMB': 1024, 'runtime.maxCpuUsage': 2400, 'summary.config.template': False, + 'summary.config.vmPathName': '[datastore-1] vm-1/vm-1.vmx', 'runtime.bootTime': boot_time, 'snapshot': snapshot, 'guest.disk': [disk], @@ -180,6 +192,7 @@ def test_collect_vms(): 'summary.config.memorySizeMB': 1024, 'runtime.maxCpuUsage': 2400, 'summary.config.template': False, + 'summary.config.vmPathName': '[datastore-1] vm-2/vm-2.vmx', 'runtime.bootTime': boot_time, 'snapshot': snapshot, 'guest.disk': [disk], @@ -195,6 +208,7 @@ def test_collect_vms(): 'summary.config.memorySizeMB': 1024, 'runtime.maxCpuUsage': 2400, 'summary.config.template': False, + 'summary.config.vmPathName': '[datastore-1] vm-3/vm-3.vmx', 'runtime.bootTime': boot_time, 'snapshot': snapshot, 'guest.disk': [disk], @@ -206,14 +220,15 @@ def test_collect_vms(): yield collector._vmware_get_vms(metrics) assert _check_properties(batch_fetch_properties.call_args[0][1]) assert collector.vm_labels.result == { - 'vm-1': ['vm-1', 'host-1', 'dc', 'cluster-1'], - 'vm-2': ['vm-2'], - 'vm-3': ['vm-3', 'host-1', 'dc', 'cluster-1'], - } + 'vm-1': ['vm-1', 'datastore-1', 'host-1', 'dc', 'cluster-1'], + 'vm-2': ['vm-2', 'datastore-1', 'n/a', 'n/a', 'n/a'], + 'vm-3': ['vm-3', 'datastore-1', 'host-1', 'dc', 'cluster-1'], + } # Assert that vm-3 skipped #69/#70 assert metrics['vmware_vm_power_state'].samples[1][1] == { 'vm_name': 'vm-3', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -222,6 +237,7 @@ def test_collect_vms(): # General VM metrics assert metrics['vmware_vm_power_state'].samples[0][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -230,6 +246,7 @@ def test_collect_vms(): assert metrics['vmware_vm_boot_timestamp_seconds'].samples[0][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -239,6 +256,7 @@ def test_collect_vms(): # Disk info (vmguest) assert metrics['vmware_vm_guest_disk_capacity'].samples[0][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -249,6 +267,7 @@ def test_collect_vms(): # VM tools info (vmguest) assert metrics['vmware_vm_guest_tools_running_status'].samples[0][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -258,6 +277,7 @@ def test_collect_vms(): assert metrics['vmware_vm_guest_tools_version'].samples[0][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -267,6 +287,7 @@ def test_collect_vms(): assert metrics['vmware_vm_guest_tools_version_status'].samples[0][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -277,6 +298,7 @@ def test_collect_vms(): # Snapshots assert metrics['vmware_vm_snapshots'].samples[0][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -285,6 +307,7 @@ def test_collect_vms(): assert metrics['vmware_vm_snapshot_timestamp_seconds'].samples[0][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -294,6 +317,7 @@ def test_collect_vms(): assert metrics['vmware_vm_snapshot_timestamp_seconds'].samples[1][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -304,6 +328,7 @@ def test_collect_vms(): # Max Memory assert metrics['vmware_vm_memory_max'].samples[0][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -313,6 +338,7 @@ def test_collect_vms(): # Max Cpu assert metrics['vmware_vm_max_cpu_usage'].samples[0][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -344,6 +370,9 @@ def test_metrics_without_hostaccess(): 'password', collect_only, 5000, + False, + True, + False ) metrics = collector._create_metric_containers() collector.content = _succeed(mock.Mock()) @@ -359,6 +388,7 @@ def test_metrics_without_hostaccess(): 'summary.config.memorySizeMB': 1024, 'runtime.maxCpuUsage': 2400, 'summary.config.template': False, + 'summary.config.vmPathName': '[datastore-1] vm-x/vm-x.vmx', 'runtime.bootTime': boot_time, 'guest.disk': [disk], 'guest.toolsStatus': 'toolsOk', @@ -366,13 +396,14 @@ def test_metrics_without_hostaccess(): 'guest.toolsVersionStatus2': 'guestToolsUnmanaged', } }) - assert collector.vm_labels.result == {'vm-1': ['vm-x']} + assert collector.vm_labels.result == {'vm-1': ['vm-x', 'datastore-1', 'n/a', 'n/a', 'n/a']} yield collector._vmware_get_vms(metrics) # 113 AssertionError {'partition': '/boot'} vs {'host_name': '/boot'} assert metrics['vmware_vm_guest_disk_capacity'].samples[0][1] == { 'vm_name': 'vm-x', 'partition': '/boot', + 'ds_name': 'datastore-1', 'host_name': 'n/a', 'cluster_name': 'n/a', 'dc_name': 'n/a', @@ -382,6 +413,7 @@ def test_metrics_without_hostaccess(): # but found ['vm-1'] assert metrics['vmware_vm_power_state'].samples[0][1] == { 'vm_name': 'vm-x', + 'ds_name': 'datastore-1', 'host_name': 'n/a', 'cluster_name': 'n/a', 'dc_name': 'n/a', @@ -541,7 +573,7 @@ def test_collect_vm_perf(): }) collector.__dict__['vm_labels'] = _succeed({ - 'vm:1': ['vm-1', 'host-1', 'dc', 'cluster-1'], + 'vm:1': ['vm-1', 'datastore-1', 'host-1', 'dc', 'cluster-1'], }) collector.__dict__['vm_inventory'] = _succeed({ @@ -562,6 +594,7 @@ def test_collect_vm_perf(): # General VM metrics assert metrics['vmware_vm_net_transmitted_average'].samples[0][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -570,6 +603,7 @@ def test_collect_vm_perf(): assert metrics['vmware_vm_cpu_demand_average'].samples[0][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -578,6 +612,7 @@ def test_collect_vm_perf(): assert metrics['vmware_vm_disk_maxTotalLatency_latest'].samples[0][1] == { 'vm_name': 'vm-1', + 'ds_name': 'datastore-1', 'host_name': 'host-1', 'cluster_name': 'cluster-1', 'dc_name': 'dc', @@ -602,12 +637,19 @@ def test_collect_hosts(): 'password', collect_only, 5000, + True, + False, + False, + True ) collector.content = _succeed(mock.Mock()) collector.__dict__['host_labels'] = _succeed({ 'host:1': ['host-1', 'dc', 'cluster'], - 'host:2': ['host-1', 'dc', 'cluster'], + 'host:2': ['host-2', 'dc', 'cluster'], + 'host:3': ['host-3', 'dc', 'cluster'], + 'host:4': ['host-4', 'dc', 'cluster'], + 'host:5': ['host-5', 'dc', 'cluster'], }) metrics = collector._create_metric_containers() @@ -620,6 +662,7 @@ def test_collect_hosts(): 'runtime.powerState': 'poweredOn', 'runtime.bootTime': boot_time, 'runtime.connectionState': 'connected', + 'runtime.standbyMode': 'none', 'runtime.inMaintenanceMode': True, 'summary.quickStats.overallCpuUsage': 100, 'summary.hardware.numCpuCores': 12, @@ -630,12 +673,110 @@ def test_collect_hosts(): 'summary.config.product.build': '6765062', 'summary.hardware.cpuModel': 'cpu_model1', 'summary.hardware.model': 'model1', + 'summary.customValue': { + 'customValue1': 'value1', + 'customValue2': 'value2', + }, + 'triggeredAlarmState': '', + 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo': '', }, 'host:2': { 'id': 'host:2', 'name': 'host-2', 'runtime.powerState': 'poweredOff', - } + 'runtime.standbyMode': 'none', + 'summary.customValue': {}, + 'triggeredAlarmState': '', + 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo': '', + }, + 'host:3': { + 'id': 'host:3', + 'name': 'host-3', + 'runtime.powerState': 'poweredOn', + 'runtime.bootTime': boot_time, + 'runtime.connectionState': 'connected', + 'runtime.standbyMode': 'in', + 'runtime.inMaintenanceMode': True, + 'summary.quickStats.overallCpuUsage': 100, + 'summary.hardware.numCpuCores': 8, + 'summary.hardware.cpuMhz': 1000, + 'summary.quickStats.overallMemoryUsage': 1024, + 'summary.hardware.memorySize': 2048 * 1024 * 1024, + 'summary.config.product.version': '6.0.0', + 'summary.config.product.build': '6765063', + 'summary.hardware.cpuModel': 'cpu_model1', + 'summary.hardware.model': 'model1', + 'summary.customValue': {}, + 'triggeredAlarmState': '', + 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo': '', + }, + 'host:4': { + 'id': 'host:4', + 'name': 'host-4', + 'runtime.powerState': 'poweredOn', + 'runtime.bootTime': boot_time, + 'runtime.connectionState': 'connected', + 'runtime.standbyMode': 'entering', + 'runtime.inMaintenanceMode': True, + 'summary.quickStats.overallCpuUsage': 100, + 'summary.hardware.numCpuCores': 6, + 'summary.hardware.cpuMhz': 1000, + 'summary.quickStats.overallMemoryUsage': 1024, + 'summary.hardware.memorySize': 2048 * 1024 * 1024, + 'summary.config.product.version': '6.0.0', + 'summary.config.product.build': '6765064', + 'summary.hardware.cpuModel': 'cpu_model1', + 'summary.hardware.model': 'model1', + 'summary.customValue': {}, + 'triggeredAlarmState': '', + 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo': '', + }, + 'host:5': { + 'id': 'host:5', + 'name': 'host-5', + 'runtime.powerState': 'poweredOn', + 'runtime.bootTime': boot_time, + 'runtime.connectionState': 'connected', + 'runtime.standbyMode': 'exiting', + 'runtime.inMaintenanceMode': True, + 'summary.quickStats.overallCpuUsage': 100, + 'summary.hardware.numCpuCores': 4, + 'summary.hardware.cpuMhz': 1000, + 'summary.quickStats.overallMemoryUsage': 1024, + 'summary.hardware.memorySize': 2048 * 1024 * 1024, + 'summary.config.product.version': '6.0.0', + 'summary.config.product.build': '6765065', + 'summary.hardware.cpuModel': 'cpu_model1', + 'summary.hardware.model': 'model1', + 'summary.customValue': {}, + 'triggeredAlarmState': ','.join( + ( + 'triggeredAlarm:HostMemoryUsageAlarm:red', + 'triggeredAlarm:HostCPUUsageAlarm:yellow' + ) + ), + 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo': ','.join( + ( + 'numericSensorInfo:name=Fan Device 12 System Fan ' + '6B:type=fan:sensorStatus=yellow:value=821700:unitModifier=-2:unit=rpm', + 'numericSensorInfo:name=Power Supply 2 PS2 ' + 'Temperature:type=temperature:sensorStatus=green:value=2900:unitModifier=-2:unit=degrees c', + 'numericSensorInfo:name=System Board 1 VR Watchdog ' + '0:type=voltage:sensorStatus=red:value=2000:unitModifier=0:unit=volts', + 'numericSensorInfo:name=Power Supply 2 Current ' + '2:type=power:sensorStatus=green:value=20:unitModifier=-2:unit=amps', + 'numericSensorInfo:name=System Board 1 Pwr ' + 'Consumption:type=power:sensorStatus=green:value=7000:unitModifier=-2:unit=watts', + 'numericSensorInfo:name=Cooling Unit 1 Fan Redundancy ' + '0:type=power:sensorStatus=green:value=1:unitModifier=0:unit=redundancy-discrete', + 'numericSensorInfo:name=Management Controller Firmware 2 NM ' + 'Capabilities:type=other:sensorStatus=unknown:value=5:unitModifier=0:unit=unspecified', + 'cpuStatusInfo:name=CPU 1:type=n/a:sensorStatus=green:value=n/a:unitModifier=n/a:unit=n/a', + 'memoryStatusInfo:name=Memory 12:type=n/a:sensorStatus=yellow:value=n/a:unitModifier=n/a' + ':unit=n/a', + ) + ), + }, }) yield collector._vmware_get_hosts(metrics) assert _check_properties(batch_fetch_properties.call_args[0][1]) @@ -643,7 +784,9 @@ def test_collect_hosts(): assert metrics['vmware_host_memory_max'].samples[0][1] == { 'host_name': 'host-1', 'dc_name': 'dc', - 'cluster_name': 'cluster' + 'cluster_name': 'cluster', + 'customValue1': 'value1', + 'customValue2': 'value2' } assert metrics['vmware_host_memory_max'].samples[0][2] == 2048 assert metrics['vmware_host_num_cpu'].samples[0][2] == 12 @@ -654,13 +797,15 @@ def test_collect_hosts(): 'cluster_name': 'cluster', 'version': '6.0.0', 'build': '6765062', + 'customValue1': 'value1', + 'customValue2': 'value2', } assert metrics['vmware_host_product_info'].samples[0][2] == 1 # In our test data we hava a host that is powered down - we should have its # power_state metric but not any others. - assert len(metrics['vmware_host_power_state'].samples) == 2 - assert len(metrics['vmware_host_memory_max'].samples) == 1 + assert len(metrics['vmware_host_power_state'].samples) == 5 + assert len(metrics['vmware_host_memory_max'].samples) == 4 assert metrics['vmware_host_hardware_info'].samples[0][1] == { 'host_name': 'host-1', @@ -668,9 +813,175 @@ def test_collect_hosts(): 'cluster_name': 'cluster', 'hardware_model': 'model1', 'hardware_cpu_model': 'cpu_model1', + 'customValue1': 'value1', + 'customValue2': 'value2', } assert metrics['vmware_host_hardware_info'].samples[0][2] == 1 + # Host:1 is not on Standby Mode + assert metrics['vmware_host_standby_mode'].samples[0][2] == 0 + assert metrics['vmware_host_standby_mode'].samples[0][1] == { + 'host_name': 'host-1', + 'dc_name': 'dc', + 'cluster_name': 'cluster', + 'standby_mode_state': 'none', + 'customValue1': 'value1', + 'customValue2': 'value2', + } + + # Host:2 is Powered down and Standby Mode and not set + assert metrics['vmware_host_standby_mode'].samples[1][2] == 0 + assert metrics['vmware_host_standby_mode'].samples[1][1] == { + 'host_name': 'host-2', + 'dc_name': 'dc', + 'cluster_name': 'cluster', + 'standby_mode_state': 'none', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + } + + # Host:3 is on Standby Mode + assert metrics['vmware_host_standby_mode'].samples[2][2] == 1 + assert metrics['vmware_host_standby_mode'].samples[2][1] == { + 'host_name': 'host-3', + 'dc_name': 'dc', + 'cluster_name': 'cluster', + 'standby_mode_state': 'in', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + } + + # Host:4 is not on Standby Mode + assert metrics['vmware_host_standby_mode'].samples[3][2] == 0 + assert metrics['vmware_host_standby_mode'].samples[3][1] == { + 'host_name': 'host-4', + 'dc_name': 'dc', + 'cluster_name': 'cluster', + 'standby_mode_state': 'entering', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + } + + # Host:4 no alarms found + assert metrics['vmware_host_yellow_alarms'].samples[3][2] == 0 + assert metrics['vmware_host_red_alarms'].samples[3][2] == 0 + + # Host:5 is not on Standby Mode + assert metrics['vmware_host_standby_mode'].samples[4][2] == 0 + assert metrics['vmware_host_standby_mode'].samples[4][1] == { + 'host_name': 'host-5', + 'dc_name': 'dc', + 'cluster_name': 'cluster', + 'standby_mode_state': 'exiting', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + } + + # Host:5 testing alarms + assert metrics['vmware_host_yellow_alarms'].samples[4][2] == 1 + assert metrics['vmware_host_red_alarms'].samples[4][2] == 1 + + assert metrics['vmware_host_yellow_alarms'].samples[4][1] == { + 'cluster_name': 'cluster', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + 'dc_name': 'dc', + 'host_name': 'host-5', + 'alarms': 'triggeredAlarm:HostCPUUsageAlarm' + } + + # Host:5 testing sensors + assert len(metrics['vmware_host_sensor_state'].samples) == 9 + assert metrics['vmware_host_sensor_state'].samples[3][1] == { + 'cluster_name': 'cluster', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + 'dc_name': 'dc', + 'host_name': 'host-5', + 'name': 'Power Supply 2 Current 2', + 'type': 'power' + } + + assert metrics['vmware_host_sensor_fan'].samples[0][2] == 8217 + assert metrics['vmware_host_sensor_fan'].samples[0][1] == { + 'cluster_name': 'cluster', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + 'dc_name': 'dc', + 'host_name': 'host-5', + 'name': 'Fan Device 12 System Fan 6B', + } + + assert metrics['vmware_host_sensor_temperature'].samples[0][2] == 29 + assert metrics['vmware_host_sensor_temperature'].samples[0][1] == { + 'cluster_name': 'cluster', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + 'dc_name': 'dc', + 'host_name': 'host-5', + 'name': 'Power Supply 2 PS2 Temperature', + } + + assert metrics['vmware_host_sensor_power_voltage'].samples[0][2] == 2000 + assert metrics['vmware_host_sensor_power_voltage'].samples[0][1] == { + 'cluster_name': 'cluster', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + 'dc_name': 'dc', + 'host_name': 'host-5', + 'name': 'System Board 1 VR Watchdog 0', + } + + assert metrics['vmware_host_sensor_power_current'].samples[0][2] == 0.2 + assert metrics['vmware_host_sensor_power_current'].samples[0][1] == { + 'cluster_name': 'cluster', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + 'dc_name': 'dc', + 'host_name': 'host-5', + 'name': 'Power Supply 2 Current 2', + } + + assert metrics['vmware_host_sensor_power_watt'].samples[0][2] == 70 + assert metrics['vmware_host_sensor_power_watt'].samples[0][1] == { + 'cluster_name': 'cluster', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + 'dc_name': 'dc', + 'host_name': 'host-5', + 'name': 'System Board 1 Pwr Consumption', + } + + assert metrics['vmware_host_sensor_redundancy'].samples[0][2] == 1 + assert metrics['vmware_host_sensor_redundancy'].samples[0][1] == { + 'cluster_name': 'cluster', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + 'dc_name': 'dc', + 'host_name': 'host-5', + 'name': 'Cooling Unit 1 Fan Redundancy 0', + } + + assert metrics['vmware_host_sensor_state'].samples[7][1] == { + 'cluster_name': 'cluster', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + 'dc_name': 'dc', + 'host_name': 'host-5', + 'name': 'CPU 1', + 'type': 'n/a' + } + + assert metrics['vmware_host_sensor_state'].samples[8][1] == { + 'cluster_name': 'cluster', + 'customValue1': 'n/a', + 'customValue2': 'n/a', + 'dc_name': 'dc', + 'host_name': 'host-5', + 'name': 'Memory 12', + 'type': 'n/a' + } + @pytest_twisted.inlineCallbacks def test_collect_host_perf(): @@ -801,8 +1112,22 @@ def test_collect_datastore(): 'password', collect_only, 5000, + True, + True, + True, + True ) collector.content = _succeed(mock.Mock()) + collector.client = _succeed(mock.Mock()) + collector._tagNames = { + 'datastores': ['ds_name', 'dc_name', 'ds_cluster'], + } + + collector.tags = { + 'datastores': { + 'datastore-1': ['tag1'] + } + } collector.__dict__['datastore_labels'] = _succeed({ 'datastore-1': ['datastore-1', 'dc', 'ds_cluster'], @@ -820,16 +1145,38 @@ def test_collect_datastore(): 'vm': ['vm-1'], 'summary.accessible': True, 'summary.maintenanceMode': 'normal', + 'triggeredAlarmState': 'triggeredAlarm:DatastoreDiskUsageAlarm:yellow,triggeredAlarm:OtherAlarm:red' } }) yield collector._vmware_get_datastores(metrics) assert _check_properties(batch_fetch_properties.call_args[0][1]) + assert metrics['vmware_datastore_yellow_alarms'].samples[0][2] == 1 + + assert metrics['vmware_datastore_yellow_alarms'].samples[0][1] == { + 'ds_name': 'datastore-1', + 'dc_name': 'dc', + 'ds_cluster': 'ds_cluster', + 'tags': 'tag1', + 'alarms': 'triggeredAlarm:DatastoreDiskUsageAlarm' + } + + assert metrics['vmware_datastore_red_alarms'].samples[0][2] == 1 + + assert metrics['vmware_datastore_red_alarms'].samples[0][1] == { + 'ds_name': 'datastore-1', + 'dc_name': 'dc', + 'ds_cluster': 'ds_cluster', + 'tags': 'tag1', + 'alarms': 'triggeredAlarm:OtherAlarm' + } + assert metrics['vmware_datastore_capacity_size'].samples[0][1] == { 'ds_name': 'datastore-1', 'dc_name': 'dc', - 'ds_cluster': 'ds_cluster' + 'ds_cluster': 'ds_cluster', + 'tags': 'tag1' } assert metrics['vmware_datastore_capacity_size'].samples[0][2] == 0.0 @@ -837,14 +1184,16 @@ def test_collect_datastore(): 'ds_name': 'datastore-1', 'dc_name': 'dc', 'ds_cluster': 'ds_cluster', - 'mode': 'normal' + 'mode': 'normal', + 'tags': 'tag1' } assert metrics['vmware_datastore_maintenance_mode'].samples[0][2] == 1.0 assert metrics['vmware_datastore_accessible'].samples[0][1] == { 'ds_name': 'datastore-1', 'dc_name': 'dc', - 'ds_cluster': 'ds_cluster' + 'ds_cluster': 'ds_cluster', + 'tags': 'tag1' } assert metrics['vmware_datastore_accessible'].samples[0][2] == 1.0 @@ -1249,6 +1598,9 @@ def test_vmware_resource_async_render_GET_section(): 'vsphere_user': 'username1', 'vsphere_password': 'password1', 'specs_size': 5000, + 'fetch_custom_attributes': True, + 'fetch_tags': True, + 'fetch_alarms': True, 'collect_only': { 'datastores': True, 'hosts': True, @@ -1263,6 +1615,9 @@ def test_vmware_resource_async_render_GET_section(): 'vsphere_user': 'username2', 'vsphere_password': 'password2', 'specs_size': 5000, + 'fetch_custom_attributes': True, + 'fetch_tags': True, + 'fetch_alarms': True, 'collect_only': { 'datastores': True, 'hosts': True, @@ -1283,7 +1638,10 @@ def test_vmware_resource_async_render_GET_section(): 'password2', resource.config['mysection']['collect_only'], 5000, - 'On' + True, + 'On', + True, + True ) request.setResponseCode.assert_called_with(200) @@ -1297,6 +1655,9 @@ def test_config_env_multiple_sections(): 'VSPHERE_USER': 'username1', 'VSPHERE_PASSWORD': 'password1', 'VSPHERE_SPECS_SIZE': 5000, + 'VSPHERE_FETCH_CUSTOM_ATTRIBUTES': True, + 'VSPHERE_FETCH_TAGS': True, + 'VSPHERE_FETCH_ALARMS': True, 'VSPHERE_MYSECTION_HOST': '127.0.0.11', 'VSPHERE_MYSECTION_USER': 'username2', 'VSPHERE_MYSECTION_PASSWORD': 'password2', @@ -1316,6 +1677,9 @@ def test_config_env_multiple_sections(): 'vsphere_user': 'username1', 'vsphere_password': 'password1', 'specs_size': 5000, + 'fetch_custom_attributes': True, + 'fetch_tags': True, + 'fetch_alarms': True, 'collect_only': { 'datastores': True, 'hosts': True, @@ -1330,6 +1694,9 @@ def test_config_env_multiple_sections(): 'vsphere_user': 'username2', 'vsphere_password': 'password2', 'specs_size': 5000, + 'fetch_custom_attributes': False, + 'fetch_tags': False, + 'fetch_alarms': False, 'collect_only': { 'datastores': True, 'hosts': True, @@ -1354,3 +1721,11 @@ def test_valid_loglevel_cli_argument(): def test_main(): with pytest.raises(SystemExit): main(['-h', '-l debug']) + + +def test_version(capsys): + with pytest.raises(SystemExit): + main(['-v']) + captured = capsys.readouterr() + assert captured.out.startswith("vmware_exporter") + assert captured.err == "" diff --git a/validate-signature.rb b/validate-signature.rb new file mode 100644 index 0000000..07a420e --- /dev/null +++ b/validate-signature.rb @@ -0,0 +1,19 @@ +#!/bin/ruby +require 'pre-commit-sign' +if ARGV.length >= 1 + puts 'Validating signature' + commit_message = ARGV[0] + message_body = commit_message.split("\n").select { |l| l.start_with?(' ') }.join("\n").gsub(/^ /, '') + pcs = PrecommitSign.from_message(message_body) + pcs.date = DateTime.strptime(/^Date:\s+(.*)$/.match(commit_message).captures.first, '%a %b %d %T %Y %z').to_time + puts "Commit Message: #{message_body}" + if pcs.valid_signature? + puts 'Perfect' + else + puts 'Not valid' + exit 1 + end +else + puts "Need a commit message to validate signature from. Try pre-commit install -f && pre-commit install --install-hooks -t commit-msg -f before commiting your code." + exit 1 +end diff --git a/vmware_exporter/__init__.py b/vmware_exporter/__init__.py index 0fa1a3f..361892d 100644 --- a/vmware_exporter/__init__.py +++ b/vmware_exporter/__init__.py @@ -1,3 +1,3 @@ -__version__ = '0.11.1' +__version__ = '0.18.4' __author__ = "Daniel Pryor" __license__ = "BSD 3-Clause License" diff --git a/vmware_exporter/defer.py b/vmware_exporter/defer.py index 05ecffc..724bde3 100644 --- a/vmware_exporter/defer.py +++ b/vmware_exporter/defer.py @@ -1,6 +1,7 @@ ''' Helpers for writing efficient twisted code, optimized for coroutine scheduling efficiency ''' +# autopep8'd from twisted.internet import defer from twisted.python import failure @@ -56,7 +57,7 @@ def errback(self, err): self.callbacks.pop(0).errback(err) def addCallbacks(self, *args, **kwargs): - if not self.result: + if self.result is None: d = defer.Deferred() d.addCallbacks(*args, **kwargs) self.callbacks.append(d) diff --git a/vmware_exporter/helpers.py b/vmware_exporter/helpers.py index afcf205..cb5fc98 100644 --- a/vmware_exporter/helpers.py +++ b/vmware_exporter/helpers.py @@ -1,5 +1,5 @@ +# autopep8'd import os - from pyVmomi import vmodl @@ -15,6 +15,26 @@ def batch_fetch_properties(content, obj_type, properties): recursive=True ) + """ + Gathering all custom attibutes names are stored as key (integer) in CustomFieldsManager + We do not want those keys, but the names. So here the names and keys are gathered to + be translated later + """ + if ('customValue' in properties) or ('summary.customValue' in properties): + + allCustomAttributesNames = {} + + if content.customFieldsManager and content.customFieldsManager.field: + allCustomAttributesNames.update( + dict( + [ + (f.key, f.name) + for f in content.customFieldsManager.field + if f.managedObjectType in (obj_type, None) + ] + ) + ) + try: PropertyCollector = vmodl.query.PropertyCollector @@ -40,6 +60,7 @@ def batch_fetch_properties(content, obj_type, properties): filter_spec.propSet = [property_spec] props = content.propertyCollector.RetrieveContents([filter_spec]) + finally: view_ref.Destroy() @@ -50,7 +71,79 @@ def batch_fetch_properties(content, obj_type, properties): properties['id'] = obj.obj._moId for prop in obj.propSet: - properties[prop.name] = prop.val + + """ + if it's a custom value property for vms (summary.customValue), hosts (summary.customValue) + or datastores (customValue) - we store all attributes together in a python dict and + translate its name key to name + """ + if 'customValue' in prop.name: + + properties[prop.name] = {} + + if allCustomAttributesNames: + + properties[prop.name] = dict( + [ + (allCustomAttributesNames[attribute.key], attribute.value) + for attribute in prop.val + if attribute.key in allCustomAttributesNames + ] + ) + + elif 'triggeredAlarmState' == prop.name: + """ + triggered alarms + """ + try: + alarms = list( + 'triggeredAlarm:{}:{}'.format(item.alarm.info.systemName.split('.')[1], item.overallStatus) + for item in prop.val + ) + except Exception: + alarms = ['triggeredAlarm:AlarmsUnavailable:yellow'] + + properties[prop.name] = ','.join(alarms) + + elif 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo' == prop.name: + """ + handle numericSensorInfo + """ + sensors = list( + 'numericSensorInfo:name={}:type={}:sensorStatus={}:value={}:unitModifier={}:unit={}'.format( + item.name, + item.sensorType, + item.healthState.key, + item.currentReading, + item.unitModifier, + item.baseUnits.lower() + ) + for item in prop.val + ) + properties[prop.name] = ','.join(sensors) + + elif prop.name in [ + 'runtime.healthSystemRuntime.hardwareStatusInfo.cpuStatusInfo', + 'runtime.healthSystemRuntime.hardwareStatusInfo.memoryStatusInfo', + ]: + """ + handle hardwareStatusInfo + """ + sensors = list( + 'numericSensorInfo:name={}:type={}:sensorStatus={}:value={}:unitModifier={}:unit={}'.format( + item.name, + "n/a", + item.status.key, + "n/a", + "n/a", + "n/a", + ) + for item in prop.val + ) + properties[prop.name] = ','.join(sensors) + + else: + properties[prop.name] = prop.val results[obj.obj._moId] = properties diff --git a/vmware_exporter/vmware_exporter.py b/vmware_exporter/vmware_exporter.py index be2bb77..e497fa2 100755 --- a/vmware_exporter/vmware_exporter.py +++ b/vmware_exporter/vmware_exporter.py @@ -1,23 +1,37 @@ #!/usr/bin/env python # -*- python -*- # -*- coding: utf-8 -*- +# autopep8'd """ Handles collection of metrics for vmware. """ - from __future__ import print_function -import datetime # Generic imports import argparse import os +import re import ssl import sys import traceback import pytz import logging +import datetime +import yaml +import requests + +""" +disable annoying urllib3 warning messages for connecting to servers with non verified certificate Doh! +""" +from requests.packages.urllib3.exceptions import InsecureRequestWarning -from yamlconfig import YamlConfig +requests.packages.urllib3.disable_warnings(InsecureRequestWarning) + +""" +For custom attributes +used to plain some list of lists in a single one +""" +from itertools import chain # Twisted from twisted.web.server import Site, NOT_DONE_YET @@ -35,10 +49,24 @@ from .helpers import batch_fetch_properties, get_bool_env from .defer import parallelize, run_once_property +from .__init__ import __version__ + class VmwareCollector(): - def __init__(self, host, username, password, collect_only, specs_size, ignore_ssl=False): + def __init__( + self, + host, + username, + password, + collect_only, + specs_size, + fetch_custom_attributes=False, + ignore_ssl=False, + fetch_tags=False, + fetch_alarms=False + ): + self.host = host self.username = username self.password = password @@ -46,154 +74,325 @@ def __init__(self, host, username, password, collect_only, specs_size, ignore_ss self.collect_only = collect_only self.specs_size = int(specs_size) + self._session = None + + # Custom Attributes + # flag to wheter fetch custom attributes or not + self.fetch_custom_attributes = fetch_custom_attributes + # vms, hosts and datastores custom attributes must be stored by their moid + self._vmsCustomAttributes = {} + self._hostsCustomAttributes = {} + self._datastoresCustomAttributes = {} + + # Tags + # flag to wheter fetch tags or not + self.fetch_tags = fetch_tags + + # Alarms + # flag wheter to fetch alarms or not + self.fetch_alarms = fetch_alarms + + # label names and ammount will be needed later to insert labels from custom attributes + self._labelNames = { + 'vms': ['vm_name', 'ds_name', 'host_name', 'dc_name', 'cluster_name'], + 'vm_perf': ['vm_name', 'ds_name', 'host_name', 'dc_name', 'cluster_name'], + 'vmguests': ['vm_name', 'ds_name', 'host_name', 'dc_name', 'cluster_name'], + 'snapshots': ['vm_name', 'ds_name', 'host_name', 'dc_name', 'cluster_name'], + 'datastores': ['ds_name', 'dc_name', 'ds_cluster'], + 'hosts': ['host_name', 'dc_name', 'cluster_name'], + 'host_perf': ['host_name', 'dc_name', 'cluster_name'], + } + + # if tags are gonna be fetched 'tags' will be a label too + if self.fetch_tags: + for section in self._labelNames.keys(): + self._labelNames[section] = self._labelNames[section] + ['tags'] + + # as label names, metric are going to be used modified later + # as labels from custom attributes are going to be inserted + self._metricNames = { + 'vms': [], + 'vm_perf': [], + 'hosts': [], + 'host_perf': [], + 'datastores': [], + } + def _create_metric_containers(self): metric_list = {} metric_list['vms'] = { 'vmware_vm_power_state': GaugeMetricFamily( 'vmware_vm_power_state', 'VMWare VM Power state (On / Off)', - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['vms']), 'vmware_vm_boot_timestamp_seconds': GaugeMetricFamily( 'vmware_vm_boot_timestamp_seconds', 'VMWare VM boot time in seconds', - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['vms']), 'vmware_vm_num_cpu': GaugeMetricFamily( 'vmware_vm_num_cpu', 'VMWare Number of processors in the virtual machine', - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['vms']), 'vmware_vm_memory_max': GaugeMetricFamily( 'vmware_vm_memory_max', 'VMWare VM Memory Max availability in Mbytes', - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['vms']), 'vmware_vm_max_cpu_usage': GaugeMetricFamily( 'vmware_vm_max_cpu_usage', 'VMWare VM Cpu Max availability in hz', - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['vms']), 'vmware_vm_template': GaugeMetricFamily( 'vmware_vm_template', 'VMWare VM Template (true / false)', - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), - } + labels=self._labelNames['vms']), + } metric_list['vmguests'] = { 'vmware_vm_guest_disk_free': GaugeMetricFamily( 'vmware_vm_guest_disk_free', 'Disk metric per partition', - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name', 'partition', ]), + labels=self._labelNames['vmguests'] + ['partition', ]), 'vmware_vm_guest_disk_capacity': GaugeMetricFamily( 'vmware_vm_guest_disk_capacity', 'Disk capacity metric per partition', - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name', 'partition', ]), + labels=self._labelNames['vmguests'] + ['partition', ]), 'vmware_vm_guest_tools_running_status': GaugeMetricFamily( 'vmware_vm_guest_tools_running_status', 'VM tools running status', - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name', 'tools_status', ]), + labels=self._labelNames['vmguests'] + ['tools_status', ]), 'vmware_vm_guest_tools_version': GaugeMetricFamily( 'vmware_vm_guest_tools_version', 'VM tools version', - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name', 'tools_version', ]), + labels=self._labelNames['vmguests'] + ['tools_version', ]), 'vmware_vm_guest_tools_version_status': GaugeMetricFamily( 'vmware_vm_guest_tools_version_status', 'VM tools version status', - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name', 'tools_version_status', ]), - } + labels=self._labelNames['vmguests'] + ['tools_version_status', ]), + } metric_list['snapshots'] = { 'vmware_vm_snapshots': GaugeMetricFamily( 'vmware_vm_snapshots', 'VMWare current number of existing snapshots', - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['snapshots']), 'vmware_vm_snapshot_timestamp_seconds': GaugeMetricFamily( 'vmware_vm_snapshot_timestamp_seconds', 'VMWare Snapshot creation time in seconds', - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name', 'vm_snapshot_name']), - } + labels=self._labelNames['snapshots'] + ['vm_snapshot_name']), + } metric_list['datastores'] = { 'vmware_datastore_capacity_size': GaugeMetricFamily( 'vmware_datastore_capacity_size', 'VMWare Datasore capacity in bytes', - labels=['ds_name', 'dc_name', 'ds_cluster']), + labels=self._labelNames['datastores']), 'vmware_datastore_freespace_size': GaugeMetricFamily( 'vmware_datastore_freespace_size', 'VMWare Datastore freespace in bytes', - labels=['ds_name', 'dc_name', 'ds_cluster']), + labels=self._labelNames['datastores']), 'vmware_datastore_uncommited_size': GaugeMetricFamily( 'vmware_datastore_uncommited_size', 'VMWare Datastore uncommitted in bytes', - labels=['ds_name', 'dc_name', 'ds_cluster']), + labels=self._labelNames['datastores']), 'vmware_datastore_provisoned_size': GaugeMetricFamily( 'vmware_datastore_provisoned_size', 'VMWare Datastore provisoned in bytes', - labels=['ds_name', 'dc_name', 'ds_cluster']), + labels=self._labelNames['datastores']), 'vmware_datastore_hosts': GaugeMetricFamily( 'vmware_datastore_hosts', 'VMWare Hosts number using this datastore', - labels=['ds_name', 'dc_name', 'ds_cluster']), + labels=self._labelNames['datastores']), 'vmware_datastore_vms': GaugeMetricFamily( 'vmware_datastore_vms', 'VMWare Virtual Machines count per datastore', - labels=['ds_name', 'dc_name', 'ds_cluster']), + labels=self._labelNames['datastores']), 'vmware_datastore_maintenance_mode': GaugeMetricFamily( 'vmware_datastore_maintenance_mode', 'VMWare datastore maintenance mode (normal / inMaintenance / enteringMaintenance)', - labels=['ds_name', 'dc_name', 'ds_cluster', 'mode']), + labels=self._labelNames['datastores'] + ['mode']), 'vmware_datastore_type': GaugeMetricFamily( 'vmware_datastore_type', 'VMWare datastore type (VMFS, NetworkFileSystem, NetworkFileSystem41, CIFS, VFAT, VSAN, VFFS)', - labels=['ds_name', 'dc_name', 'ds_cluster', 'ds_type']), + labels=self._labelNames['datastores'] + ['ds_type']), 'vmware_datastore_accessible': GaugeMetricFamily( 'vmware_datastore_accessible', 'VMWare datastore accessible (true / false)', - labels=['ds_name', 'dc_name', 'ds_cluster']) - } + labels=self._labelNames['datastores']) + } metric_list['hosts'] = { 'vmware_host_power_state': GaugeMetricFamily( 'vmware_host_power_state', 'VMWare Host Power state (On / Off)', - labels=['host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['hosts']), + 'vmware_host_standby_mode': GaugeMetricFamily( + 'vmware_host_standby_mode', + 'VMWare Host Standby Mode (entering / exiting / in / none)', + labels=self._labelNames['hosts'] + ['standby_mode_state']), 'vmware_host_connection_state': GaugeMetricFamily( 'vmware_host_connection_state', 'VMWare Host connection state (connected / disconnected / notResponding)', - labels=['host_name', 'dc_name', 'cluster_name', 'state']), + labels=self._labelNames['hosts'] + ['state']), 'vmware_host_maintenance_mode': GaugeMetricFamily( 'vmware_host_maintenance_mode', 'VMWare Host maintenance mode (true / false)', - labels=['host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['hosts']), 'vmware_host_boot_timestamp_seconds': GaugeMetricFamily( 'vmware_host_boot_timestamp_seconds', 'VMWare Host boot time in seconds', - labels=['host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['hosts']), 'vmware_host_cpu_usage': GaugeMetricFamily( 'vmware_host_cpu_usage', 'VMWare Host CPU usage in Mhz', - labels=['host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['hosts']), 'vmware_host_cpu_max': GaugeMetricFamily( 'vmware_host_cpu_max', 'VMWare Host CPU max availability in Mhz', - labels=['host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['hosts']), 'vmware_host_num_cpu': GaugeMetricFamily( 'vmware_host_num_cpu', 'VMWare Number of processors in the Host', - labels=['host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['hosts']), 'vmware_host_memory_usage': GaugeMetricFamily( 'vmware_host_memory_usage', 'VMWare Host Memory usage in Mbytes', - labels=['host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['hosts']), 'vmware_host_memory_max': GaugeMetricFamily( 'vmware_host_memory_max', 'VMWare Host Memory Max availability in Mbytes', - labels=['host_name', 'dc_name', 'cluster_name']), + labels=self._labelNames['hosts']), 'vmware_host_product_info': GaugeMetricFamily( 'vmware_host_product_info', 'A metric with a constant "1" value labeled by version and build from os the host.', - labels=['host_name', 'dc_name', 'cluster_name', 'version', 'build']), + labels=self._labelNames['hosts'] + ['version', 'build']), 'vmware_host_hardware_info': GaugeMetricFamily( 'vmware_host_hardware_info', 'A metric with a constant "1" value labeled by model and cpu model from the host.', - labels=['host_name', 'dc_name', 'cluster_name', 'hardware_model', 'hardware_cpu_model']), - } + labels=self._labelNames['hosts'] + ['hardware_model', 'hardware_cpu_model']), + 'vmware_host_sensor_state': GaugeMetricFamily( + 'vmware_host_sensor_state', + 'VMWare sensor state value (0=red / 1=yellow / 2=green / 3=unknown) labeled by sensor name and type ' + 'from the host.', + labels=self._labelNames['hosts'] + ['name', 'type']), + 'vmware_host_sensor_fan': GaugeMetricFamily( + 'vmware_host_sensor_fan', + 'VMWare sensor fan speed value in RPM labeled by sensor name from the host.', + labels=self._labelNames['hosts'] + ['name']), + 'vmware_host_sensor_temperature': GaugeMetricFamily( + 'vmware_host_sensor_temperature', + 'VMWare sensor temperature value in degree C labeled by sensor name from the host.', + labels=self._labelNames['hosts'] + ['name']), + 'vmware_host_sensor_power_voltage': GaugeMetricFamily( + 'vmware_host_sensor_power_voltage', + 'VMWare sensor power voltage value in volt labeled by sensor name from the host.', + labels=self._labelNames['hosts'] + ['name']), + 'vmware_host_sensor_power_current': GaugeMetricFamily( + 'vmware_host_sensor_power_current', + 'VMWare sensor power current value in amp labeled by sensor name from the host.', + labels=self._labelNames['hosts'] + ['name']), + 'vmware_host_sensor_power_watt': GaugeMetricFamily( + 'vmware_host_sensor_power_watt', + 'VMWare sensor power watt value in watt labeled by sensor name from the host.', + labels=self._labelNames['hosts'] + ['name']), + 'vmware_host_sensor_redundancy': GaugeMetricFamily( + 'vmware_host_sensor_redundancy', + 'VMWare sensor redundancy value (1=ok / 0=ko) labeled by sensor name from the host.', + labels=self._labelNames['hosts'] + ['name']), + } + + """ + if alarms are being retrieved, metrics have to been created here + """ + if self.fetch_alarms: + """ + for hosts + """ + metric_list['hosts'].update( + { + 'vmware_host_yellow_alarms': GaugeMetricFamily( + 'vmware_host_yellow_alarms', + 'A metric with the amount of host yellow alarms and labeled with the list of alarm names', + labels=self._labelNames['hosts'] + ['alarms'] + ), + 'vmware_host_red_alarms': GaugeMetricFamily( + 'vmware_host_red_alarms', + 'A metric with the amount of host red alarms and labeled with the list of alarm names', + labels=self._labelNames['hosts'] + ['alarms'] + ) + } + ) + + """ + for datastores + """ + metric_list['datastores'].update( + { + 'vmware_datastore_yellow_alarms': GaugeMetricFamily( + 'vmware_datastore_yellow_alarms', + 'A metric with the amount of datastore yellow alarms and labeled with the list of alarm names', + labels=self._labelNames['datastores'] + ['alarms'] + ), + 'vmware_datastore_red_alarms': GaugeMetricFamily( + 'vmware_datastore_red_alarms', + 'A metric with the amount of datastore red alarms and labeled with the list of alarm names', + labels=self._labelNames['datastores'] + ['alarms'] + ) + } + ) + + """ + for vms + """ + metric_list['vms'].update( + { + 'vmware_vm_yellow_alarms': GaugeMetricFamily( + 'vmware_vm_yellow_alarms', + 'A metric with the amount of virtual machine yellow alarms and \ + labeled with the list of alarm names', + labels=self._labelNames['vms'] + ['alarms'] + ), + 'vmware_vm_red_alarms': GaugeMetricFamily( + 'vmware_vm_red_alarms', + 'A metric with the amount of virtual machine red alarms and \ + labeled with the list of alarm names', + labels=self._labelNames['vms'] + ['alarms'] + ) + } + ) + metric_list['vmguests'].update( + { + 'vmware_vm_yellow_alarms': GaugeMetricFamily( + 'vmware_vm_yellow_alarms', + 'A metric with the amount of virtual machine yellow alarms and \ + labeled with the list of alarm names', + labels=self._labelNames['vms'] + ['alarms'] + ), + 'vmware_vm_red_alarms': GaugeMetricFamily( + 'vmware_vm_red_alarms', + 'A metric with the amount of virtual machine red alarms and \ + labeled with the list of alarm names', + labels=self._labelNames['vms'] + ['alarms'] + ) + } + ) + metric_list['snapshots'].update( + { + 'vmware_vm_yellow_alarms': GaugeMetricFamily( + 'vmware_vm_yellow_alarms', + 'A metric with the amount of virtual machine yellow alarms and \ + labeled with the list of alarm names', + labels=self._labelNames['vms'] + ['alarms'] + ), + 'vmware_vm_red_alarms': GaugeMetricFamily( + 'vmware_vm_red_alarms', + 'A metric with the amount of virtual machine red alarms and \ + labeled with the list of alarm names', + labels=self._labelNames['vms'] + ['alarms'] + ) + } + ) metrics = {} for key, value in self.collect_only.items(): if value is True: + """ storing metric names to be used later """ + self._metricNames[key] = list(metric_list[key].keys()) metrics.update(metric_list[key]) return metrics @@ -222,7 +421,7 @@ def collect(self): # Collect Datastore metrics if collect_only['datastores'] is True: - tasks.append(self._vmware_get_datastores(metrics,)) + tasks.append(self._vmware_get_datastores(metrics, )) if collect_only['hosts'] is True: tasks.append(self._vmware_get_hosts(metrics)) @@ -234,12 +433,134 @@ def collect(self): logging.info("Finished collecting metrics from {vsphere_host}".format(vsphere_host=vsphere_host)) - return list(metrics.values()) # noqa: F705 + return list(metrics.values()) # noqa: F705 def _to_epoch(self, my_date): """ convert to epoch time """ return (my_date - datetime.datetime(1970, 1, 1, tzinfo=pytz.utc)).total_seconds() + @run_once_property + @defer.inlineCallbacks + def session(self): + + if self._session is None: + self._session = requests.Session() + self._session.verify = not self.ignore_ssl + self._session.auth = (self.username, self.password) + + try: + yield threads.deferToThread( + self._session.post, + 'https://{host}/rest/com/vmware/cis/session'.format(host=self.host) + ) + except Exception as e: + logging.error('Error creating vcenter API session ({})'.format(e)) + self._session = None + + return self._session + + @run_once_property + @defer.inlineCallbacks + def _tagIDs(self): + """ + fetch a list of all tags ids + """ + session = yield self.session + response = yield threads.deferToThread( + session.get, + 'https://{host}/rest/com/vmware/cis/tagging/tag'.format(host=self.host) + ) + output = [] + try: + output = response.json().get('value') + except Exception as e: + logging.error('Unable to fetch tag IDs from vcenter {} ({})'.format(self.host, e)) + + return output + + @run_once_property + @defer.inlineCallbacks + def _attachedObjectsOnTags(self): + """ + retrieve a dict with all objects which have a tag attached + """ + session = yield self.session + tagIDs = yield self._tagIDs + jsonBody = { + 'tag_ids': tagIDs + } + response = yield threads.deferToThread( + session.post, + 'https://{host}/rest/com/vmware/cis/tagging/tag-association?~action=list-attached-objects-on-tags' + .format(host=self.host), + json=jsonBody + ) + + output = {} + + try: + output = response.json().get('value', output) + except Exception as e: + logging.error('Unable to fetch list of attached objects on tags on vcenter {} ({})'.format(self.host, e)) + + return output + + @run_once_property + @defer.inlineCallbacks + def _tagNames(self): + """ + tag IDs are useless to enduser, so they have to be translated + to the tag text + """ + session = yield self.session + tagIDs = yield self._tagIDs + tagNames = {} + for tagID in tagIDs: + response = yield threads.deferToThread( + session.get, + 'https://{host}/rest/com/vmware/cis/tagging/tag/id:{tag_id}'.format(host=self.host, tag_id=tagID) + ) + tagObj = response.json().get('value', {}) + if tagObj: + tagNames[tagObj.get('id')] = tagObj.get('name') + + return tagNames + + @run_once_property + @defer.inlineCallbacks + def tags(self): + """ + tags are finally stored by category: vms, hosts, and datastores + and linked to object moid + """ + logging.info("Fetching tags") + start = datetime.datetime.utcnow() + + attachedObjs = yield self._attachedObjectsOnTags + tagNames = yield self._tagNames + tags = { + 'vms': {}, + 'hosts': {}, + 'datastores': {}, + 'others': {}, + } + + sections = {'VirtualMachine': 'vms', 'Datastore': 'datastores', 'HostSystem': 'hosts'} + + for attachedObj in attachedObjs: + tagName = tagNames.get(attachedObj.get('tag_id')) + for obj in attachedObj.get('object_ids'): + section = sections.get(obj.get('type'), 'others') + if obj.get('id') not in tags[section]: + tags[section][obj.get('id')] = [tagName] + else: + tags[section][obj.get('id')].append(tagName) + + fetch_time = datetime.datetime.utcnow() - start + logging.info("Fetched tags ({fetch_time})".format(fetch_time=fetch_time)) + + return tags + @run_once_property @defer.inlineCallbacks def connection(self): @@ -304,12 +625,41 @@ def datastore_inventory(self): 'vm', ] + """ + are custom attributes going to be retrieved? + """ + if self.fetch_custom_attributes: + """ yep! """ + properties.append('customValue') + + """ + triggeredAlarmState must be fetched to get datastore alarms list + """ + if self.fetch_alarms: + properties.append('triggeredAlarmState') + datastores = yield self.batch_fetch_properties( vim.Datastore, properties ) + + """ + once custom attributes are fetched, + store'em linked to their moid + if no customValue found for an object + it get an empty dict + """ + if self.fetch_custom_attributes: + self._datastoresCustomAttributes = dict( + [ + (ds_moId, ds.get('customValue', {})) + for ds_moId, ds in datastores.items() + ] + ) + fetch_time = datetime.datetime.utcnow() - start logging.info("Fetched vim.Datastore inventory ({fetch_time})".format(fetch_time=fetch_time)) + return datastores @run_once_property @@ -326,6 +676,7 @@ def host_system_inventory(self): 'summary.config.product.version', 'summary.config.product.build', 'runtime.powerState', + 'runtime.standbyMode', 'runtime.bootTime', 'runtime.connectionState', 'runtime.inMaintenanceMode', @@ -333,14 +684,48 @@ def host_system_inventory(self): 'summary.quickStats.overallMemoryUsage', 'summary.hardware.cpuModel', 'summary.hardware.model', + 'runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo', + 'runtime.healthSystemRuntime.hardwareStatusInfo.cpuStatusInfo', + 'runtime.healthSystemRuntime.hardwareStatusInfo.memoryStatusInfo', ] + """ + signal to fetch hosts custom attributes + yay! + """ + if self.fetch_custom_attributes: + properties.append('summary.customValue') + + """ + triggeredAlarmState must be fetched to get host alarms list + in case of hosts, sensors, cpu and memory status alarms + are going to be retrieved as well + """ + if self.fetch_alarms: + properties.append('triggeredAlarmState') + host_systems = yield self.batch_fetch_properties( vim.HostSystem, properties, ) + + """ + once custom attributes are fetched, + store'em linked to their moid + if no customValue found for an object + it get an empty dict + """ + if self.fetch_custom_attributes: + self._hostsCustomAttributes = dict( + [ + (host_moId, host.get('summary.customValue', {})) + for host_moId, host in host_systems.items() + ] + ) + fetch_time = datetime.datetime.utcnow() - start logging.info("Fetched vim.HostSystem inventory ({fetch_time})".format(fetch_time=fetch_time)) + return host_systems @run_once_property @@ -352,6 +737,7 @@ def vm_inventory(self): 'name', 'runtime.host', 'parent', + 'summary.config.vmPathName', ] if self.collect_only['vms'] is True: @@ -375,14 +761,209 @@ def vm_inventory(self): if self.collect_only['snapshots'] is True: properties.append('snapshot') + """ + papa smurf, are we collecting custom attributes? + """ + if self.fetch_custom_attributes: + properties.append('summary.customValue') + + """ + triggeredAlarmState must be fetched to get vm alarms list + """ + if self.fetch_alarms: + properties.append('triggeredAlarmState') + virtual_machines = yield self.batch_fetch_properties( vim.VirtualMachine, properties, ) + + """ + once custom attributes are fetched, + store'em linked to their moid + if no customValue found for an object + it get an empty dict + """ + if self.fetch_custom_attributes: + self._vmsCustomAttributes = dict( + [ + (vm_moId, vm.get('summary.customValue', {})) + for vm_moId, vm in virtual_machines.items() + ] + ) + fetch_time = datetime.datetime.utcnow() - start logging.info("Fetched vim.VirtualMachine inventory ({fetch_time})".format(fetch_time=fetch_time)) + return virtual_machines + @defer.inlineCallbacks + def customAttributesLabelNames(self, metric_type): + """ + vm perf, vms, vmguestes and snapshots metrics share the same custom attributes + as they re related to virtual machine objects + + host perf and hosts metrics share the same custom attributes + as they re related to host system objects + """ + + labelNames = [] + + if metric_type in ('datastores',): + labelNames = yield self.datastoresCustomAttributesLabelNames + + if metric_type in ('vms', 'vm_perf', 'snapshots', 'vmguests'): + labelNames = yield self.vmsCustomAttributesLabelNames + + if metric_type in ('hosts', 'host_perf'): + labelNames = yield self.hostsCustomAttributesLabelNames + + return labelNames + + @run_once_property + @defer.inlineCallbacks + def datastoresCustomAttributesLabelNames(self): + """ + normalizes custom attributes to all objects of the same type + it means + all objects of type datastore will share the same set of custom attributes + but these custom attributes can be filled or not, depending on + what has been gathered (of course) + """ + customAttributesLabelNames = [] + + if self.fetch_custom_attributes: + customAttributes = yield self._datastoresCustomAttributes + customAttributesLabelNames = list( + set( + chain( + *[ + attributes.keys() + for attributes in customAttributes.values() + ] + ) + ) + ) + + return customAttributesLabelNames + + @run_once_property + @defer.inlineCallbacks + def hostsCustomAttributesLabelNames(self): + """ + normalizes custom attributes to all objects of the same type + it means + all objects of type host system will share the same set of custom attributes + but these custom attributes can be filled or not, depending on + what has been gathered (of course) + """ + customAttributesLabelNames = [] + + if self.fetch_custom_attributes: + customAttributes = yield self._hostsCustomAttributes + customAttributesLabelNames = list( + set( + chain( + *[ + attributes.keys() + for attributes in customAttributes.values() + ] + ) + ) + ) + + return customAttributesLabelNames + + @run_once_property + @defer.inlineCallbacks + def vmsCustomAttributesLabelNames(self): + """ + normalizes custom attributes to all objects of the same type + it means + all objects of type virtual machine will share the same set of custom attributes + but these custom attributes can be filled or not, depending on + what has been gathered (of course) + """ + customAttributesLabelNames = [] + + if self.fetch_custom_attributes: + customAttributes = yield self._vmsCustomAttributes + customAttributesLabelNames = list( + set( + chain( + *[ + attributes.keys() + for attributes in customAttributes.values() + ] + ) + ) + ) + + return customAttributesLabelNames + + @run_once_property + @defer.inlineCallbacks + def datastoresCustomAttributes(self): + """ + creates a list of the custom attributes values, + in order their labels re gonna be inserted + when no value was found for that custom attribute + 'n/a' is inserted + """ + customAttributes = {} + + if self.fetch_custom_attributes: + customAttributes = yield self._datastoresCustomAttributes + datastoresCustomAttributesLabelNames = yield self.datastoresCustomAttributesLabelNames + for labelName in datastoresCustomAttributesLabelNames: + for ds in customAttributes.keys(): + if labelName not in customAttributes[ds].keys(): + customAttributes[ds][labelName] = 'n/a' + + return customAttributes + + @run_once_property + @defer.inlineCallbacks + def hostsCustomAttributes(self): + """ + creates a list of the custom attributes values, + in order their labels re gonna be inserted + when no value was found for that custom attribute + 'n/a' is inserted + """ + customAttributes = {} + + if self.fetch_custom_attributes: + customAttributes = yield self._hostsCustomAttributes + hostsCustomAttributesLabelNames = yield self.hostsCustomAttributesLabelNames + for labelName in hostsCustomAttributesLabelNames: + for host in customAttributes.keys(): + if labelName not in customAttributes[host].keys(): + customAttributes[host][labelName] = 'n/a' + + return customAttributes + + @run_once_property + @defer.inlineCallbacks + def vmsCustomAttributes(self): + """ + creates a list of the custom attributes values, + in order their labels re gonna be inserted + when no value was found for that custom attribute + 'n/a' is inserted + """ + customAttributes = {} + + if self.fetch_custom_attributes: + customAttributes = yield self._vmsCustomAttributes + vmsCustomAttributesLabelNames = yield self.customAttributesLabelNames('vms') + for labelName in vmsCustomAttributesLabelNames: + for vm in customAttributes.keys(): + if labelName not in customAttributes[vm].keys(): + customAttributes[vm][labelName] = 'n/a' + + return customAttributes + @run_once_property @defer.inlineCallbacks def datacenter_inventory(self): @@ -422,6 +1003,7 @@ def _collect(node, level=1, dc=None, storagePod=""): for dc in dcs: result = yield threads.deferToThread(lambda: _collect(dc)) labels.update(result) + return labels @run_once_property @@ -447,7 +1029,7 @@ def _collect(node, level=1, dc=None, folder=None): node.summary.config.name.rstrip('.'), dc, folder.name if isinstance(folder, vim.ClusterComputeResource) else '' - ] + ] else: logging.debug("[? ] {level} {node}".format(level=('-' * level).ljust(7), node=node)) return inventory @@ -459,9 +1041,46 @@ def _collect(node, level=1, dc=None, folder=None): labels.update(result) return labels + @run_once_property + @defer.inlineCallbacks + def vm_tags(self): + """ + return a dict that links vms moid to its tags + """ + tags = {} + if self.fetch_tags: + tags = yield self.tags + tags = tags['vms'] + return tags + + @run_once_property + @defer.inlineCallbacks + def host_tags(self): + """ + return a dict that links hosts moid to its tags + """ + tags = {} + if self.fetch_tags: + tags = yield self.tags + tags = tags['hosts'] + return tags + + @run_once_property + @defer.inlineCallbacks + def datastore_tags(self): + """ + return a dict that links datastore moid to its tags + """ + tags = {} + if self.fetch_tags: + tags = yield self.tags + tags = tags['datastores'] + return tags + @run_once_property @defer.inlineCallbacks def vm_labels(self): + virtual_machines, host_labels = yield parallelize(self.vm_inventory, self.host_labels) labels = {} @@ -473,9 +1092,39 @@ def vm_labels(self): labels[moid] = [row['name']] + if 'summary.config.vmPathName' in row: + p = row['summary.config.vmPathName'] + if p.startswith('['): + p = p[1:p.find("]")] + else: + p = 'n/a' + + labels[moid] = labels[moid] + [p] + if host_moid in host_labels: labels[moid] = labels[moid] + host_labels[host_moid] + """ + this code was in vm_inventory before + but I have the feeling it is best placed here where + vms label values are handled + """ + labels_cnt = len(labels[moid]) + if self.fetch_tags: + labels_cnt += 1 + + if labels_cnt < len(self._labelNames['vms']): + logging.info( + "Only ${cnt}/{expected} labels (vm, host, dc, cluster) found, filling n/a" + .format( + cnt=labels_cnt, + expected=len(self._labelNames['vms']) + ) + ) + + for i in range(labels_cnt, len(self._labelNames['vms'])): + labels[moid].append('n/a') + return labels @run_once_property @@ -519,18 +1168,89 @@ def _vmware_full_snapshots_list(self, snapshots): snapshot.childSnapshotList) return snapshot_data + @defer.inlineCallbacks + def updateMetricsLabelNames(self, metrics, metric_types): + """ + by the time metrics are created, we have no clue what are gonna be the custom attributes + or even if they re gonna be fetched. + so after custom attributes are finally retrieved from the datacenter, + their labels need to be inserted inside the already defined metric labels. + to be possible, we previously had to store metric names and map'em by object type, vms, + hosts and datastores, and so its metrics, so as to gather everything here + """ + # Insert custom attributes names as metric labels + if self.fetch_custom_attributes: + + for metric_type in metric_types: + + customAttributesLabelNames = yield self.customAttributesLabelNames(metric_type) + + for metric_name in self._metricNames.get(metric_type, []): + metric = metrics.get(metric_name) + labelnames = metric._labelnames + metric._labelnames = labelnames[0:len(self._labelNames[metric_type])] + metric._labelnames += customAttributesLabelNames + metric._labelnames += labelnames[len(self._labelNames[metric_type]):] + metric._labelnames = list(map(lambda x: re.sub('[^a-zA-Z0-9_]', '_', x), metric._labelnames)) + @defer.inlineCallbacks def _vmware_get_datastores(self, ds_metrics): """ Get Datastore information """ - results, datastore_labels = yield parallelize(self.datastore_inventory, self.datastore_labels) + if self.fetch_tags: + """ + if we need the tags, we fetch'em here + """ + results, datastore_labels, datastore_tags = yield parallelize( + self.datastore_inventory, + self.datastore_labels, + self.datastore_tags + ) + else: + results, datastore_labels = yield parallelize(self.datastore_inventory, self.datastore_labels) + + """ + fetch custom attributes + """ + customAttributes = {} + customAttributesLabelNames = {} + if self.fetch_custom_attributes: + customAttributes = yield self.datastoresCustomAttributes + customAttributesLabelNames = yield self.datastoresCustomAttributesLabelNames + + """ + updates the datastore metric label names with custom attributes names + """ + self.updateMetricsLabelNames(ds_metrics, ['datastores']) for datastore_id, datastore in results.items(): try: name = datastore['name'] labels = datastore_labels[name] + + """ + insert the tags values if needed + if tags are empty they receive a 'n/a' + """ + if self.fetch_tags: + tags = datastore_tags.get(datastore_id, []) + tags = ','.join(tags) + if not tags: + tags = 'n/a' + + labels += [tags] + + """ + time to insert the custom attributes values in order + """ + customLabels = [] + for labelName in customAttributesLabelNames: + customLabels.append(customAttributes[datastore_id].get(labelName)) + + labels += customLabels + except KeyError as e: logging.info( "Key error, unable to register datastore {error}, datastores are {datastore_labels}".format( @@ -539,6 +1259,29 @@ def _vmware_get_datastores(self, ds_metrics): ) continue + """ + filter red and yellow alarms + """ + if self.fetch_alarms: + alarms = datastore.get('triggeredAlarmState').split(',') + alarms = [a for a in alarms if ':' in a] + + # Red alarms + red_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'red'] + red_alarms_label = ','.join(red_alarms) if red_alarms else 'n/a' + ds_metrics['vmware_datastore_red_alarms'].add_metric( + labels + [red_alarms_label], + len(red_alarms) + ) + + # Yellow alarms + yellow_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'yellow'] + yellow_alarms_label = ','.join(yellow_alarms) if yellow_alarms else 'n/a' + ds_metrics['vmware_datastore_yellow_alarms'].add_metric( + labels + [yellow_alarms_label], + len(yellow_alarms) + ) + ds_capacity = float(datastore.get('summary.capacity', 0)) ds_freespace = float(datastore.get('summary.freeSpace', 0)) ds_uncommitted = float(datastore.get('summary.uncommitted', 0)) @@ -610,7 +1353,11 @@ def _vmware_get_vm_perf_manager_metrics(self, vm_metrics): vm_metrics[p_metric] = GaugeMetricFamily( p_metric, p_metric, - labels=['vm_name', 'host_name', 'dc_name', 'cluster_name']) + labels=self._labelNames['vm_perf']) + """ + store perf metric name for later ;) + """ + self._metricNames['vm_perf'].append(p_metric) metrics = [] metric_names = {} @@ -623,6 +1370,11 @@ def _vmware_get_vm_perf_manager_metrics(self, vm_metrics): )) metric_names[counter_key] = perf_metric_name + """ + updates vm perf metrics label names with vms custom attributes names + """ + self.updateMetricsLabelNames(vm_metrics, ['vm_perf']) + specs = [] for vm in virtual_machines.values(): if vm.get('runtime.powerState') != 'poweredOn': @@ -637,7 +1389,7 @@ def _vmware_get_vm_perf_manager_metrics(self, vm_metrics): content = yield self.content if len(specs) > 0: - chunks = [specs[x:x+self.specs_size] for x in range(0, len(specs), self.specs_size)] + chunks = [specs[x:x + self.specs_size] for x in range(0, len(specs), self.specs_size)] for list_specs in chunks: results, labels = yield parallelize( threads.deferToThread(content.perfManager.QueryStats, querySpec=list_specs), @@ -693,7 +1445,8 @@ def _vmware_get_host_perf_manager_metrics(self, host_metrics): host_metrics[p_metric] = GaugeMetricFamily( p_metric, p_metric, - labels=['host_name', 'dc_name', 'cluster_name']) + labels=self._labelNames['host_perf']) + self._metricNames['host_perf'].append(p_metric) metrics = [] metric_names = {} @@ -706,6 +1459,9 @@ def _vmware_get_host_perf_manager_metrics(self, host_metrics): )) metric_names[counter_key] = perf_metric_name + # Insert custom attributes names as metric labels + self.updateMetricsLabelNames(host_metrics, ['host_perf']) + specs = [] for host in host_systems.values(): if host.get('runtime.powerState') != 'poweredOn': @@ -730,7 +1486,7 @@ def _vmware_get_host_perf_manager_metrics(self, host_metrics): host_metrics[metric_names[metric.id.counterId]].add_metric( labels[ent.entity._moId], float(sum(metric.value)), - ) + ) logging.info('FIN: _vmware_get_host_perf_manager_metrics') @@ -741,7 +1497,24 @@ def _vmware_get_vms(self, metrics): """ logging.info("Starting vm metrics collection") - virtual_machines, vm_labels = yield parallelize(self.vm_inventory, self.vm_labels) + if self.fetch_tags: + virtual_machines, vm_labels, vm_tags = yield parallelize( + self.vm_inventory, + self.vm_labels, + self.vm_tags + ) + else: + virtual_machines, vm_labels = yield parallelize(self.vm_inventory, self.vm_labels) + + # fetch Custom Attributes Labels ("values") + customAttributes = {} + customAttributesLabelNames = {} + if self.fetch_custom_attributes: + customAttributes = yield self.vmsCustomAttributes + customAttributesLabelNames = yield self.customAttributesLabelNames('vms') + + # Insert custom attributes names as metric labels + self.updateMetricsLabelNames(metrics, ['vms', 'vmguests', 'snapshots']) for moid, row in virtual_machines.items(): # Ignore vm if field "runtime.host" does not exist @@ -750,13 +1523,44 @@ def _vmware_get_vms(self, metrics): continue labels = vm_labels[moid] - labels_cnt = len(labels) - if labels_cnt < 4: - logging.info("Only ${cnt}/4 labels (vm, host, dc, cluster) found, filling n/a".format(cnt=labels_cnt)) + customLabels = [] + for labelName in customAttributesLabelNames: + customLabels.append(customAttributes[moid].get(labelName)) + + if self.fetch_tags: + tags = vm_tags.get(moid, []) + tags = ','.join(tags) + if not tags: + tags = 'n/a' + + vm_labels[moid] += [tags] + customLabels + + else: + vm_labels[moid] += customLabels + + """ + filter red and yellow alarms + """ + if self.fetch_alarms and ('triggeredAlarmState' in row): + alarms = row.get('triggeredAlarmState').split(',') + alarms = [a for a in alarms if ':' in a] + + # Red alarms + red_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'red'] + red_alarms_label = ','.join(red_alarms) if red_alarms else 'n/a' + metrics['vmware_vm_red_alarms'].add_metric( + labels + [red_alarms_label], + len(red_alarms) + ) - for i in range(labels_cnt, 4): - labels.append('n/a') + # Yellow alarms + yellow_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'yellow'] + yellow_alarms_label = ','.join(yellow_alarms) if yellow_alarms else 'n/a' + metrics['vmware_vm_yellow_alarms'].add_metric( + labels + [yellow_alarms_label], + len(yellow_alarms) + ) if 'runtime.powerState' in row: power_state = 1 if row['runtime.powerState'] == 'poweredOn' else 0 @@ -827,11 +1631,44 @@ def _vmware_get_hosts(self, host_metrics): """ logging.info("Starting host metrics collection") - results, host_labels = yield parallelize(self.host_system_inventory, self.host_labels) + if self.fetch_tags: + results, host_labels, host_tags = yield parallelize( + self.host_system_inventory, + self.host_labels, + self.host_tags + ) + + else: + results, host_labels = yield parallelize(self.host_system_inventory, self.host_labels) + + # fetch Custom Attributes Labels ("values") + customAttributes = {} + customAttributesLabelNames = {} + if self.fetch_custom_attributes: + customAttributes = yield self.hostsCustomAttributes + customAttributesLabelNames = yield self.hostsCustomAttributesLabelNames + + # Insert custom attributes names as metric labels + self.updateMetricsLabelNames(host_metrics, ['hosts']) for host_id, host in results.items(): try: labels = host_labels[host_id] + + if self.fetch_tags: + tags = host_tags.get(host_id, []) + tags = ','.join(tags) + if not tags: + tags = 'n/a' + + labels += [tags] + + customLabels = [] + for labelName in customAttributesLabelNames: + customLabels.append(customAttributes[host_id].get(labelName)) + + labels += customLabels + except KeyError as e: logging.info( "Key error, unable to register host {error}, host labels are {host_labels}".format( @@ -840,6 +1677,103 @@ def _vmware_get_hosts(self, host_metrics): ) continue + """ + filter red and yellow alarms + """ + if self.fetch_alarms: + alarms = [a for a in host.get('triggeredAlarmState', '').split(',') if ':' in a] + + # Red alarms + red_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'red'] + red_alarms_label = ','.join(red_alarms) if red_alarms else 'n/a' + host_metrics['vmware_host_red_alarms'].add_metric( + labels + [red_alarms_label], + len(red_alarms) + ) + + # Yellow alarms + yellow_alarms = [':'.join(a.split(':')[:-1]) for a in alarms if a.split(':')[-1] == 'yellow'] + yellow_alarms_label = ','.join(yellow_alarms) if yellow_alarms else 'n/a' + host_metrics['vmware_host_yellow_alarms'].add_metric( + labels + [yellow_alarms_label], + len(yellow_alarms) + ) + + # Numeric Sensor Info + sensors = host.get('runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo', '').split(',') + \ + host.get('runtime.healthSystemRuntime.hardwareStatusInfo.cpuStatusInfo', '').split(',') + \ + host.get('runtime.healthSystemRuntime.hardwareStatusInfo.memoryStatusInfo', '').split(',') + + sensors = [s for s in sensors if ':' in s] + + for s in sensors: + sensor = dict(item.split("=") for item in re.split(r':(?=\w+=)', s)[1:]) + + if not all(key in sensor for key in ['sensorStatus', 'name', 'type', 'unit', 'value']): + continue + + sensor_status = { + 'red': 0, + 'yellow': 1, + 'green': 2, + 'unknown': 3, + }[sensor['sensorStatus'].lower()] + + host_metrics['vmware_host_sensor_state'].add_metric( + labels + [sensor['name'], sensor['type']], + sensor_status + ) + + # FAN speed + if sensor["unit"] == 'rpm': + host_metrics['vmware_host_sensor_fan'].add_metric( + labels + [sensor['name']], + int(sensor['value']) * (10 ** (int(sensor['unitModifier']))) + ) + + # Temperature + if sensor["unit"] == 'degrees c': + host_metrics['vmware_host_sensor_temperature'].add_metric( + labels + [sensor['name']], + int(sensor['value']) * (10 ** (int(sensor['unitModifier']))) + ) + + # Power Voltage + if sensor["unit"] == 'volts': + host_metrics['vmware_host_sensor_power_voltage'].add_metric( + labels + [sensor['name']], + int(sensor['value']) * (10 ** (int(sensor['unitModifier']))) + ) + + # Power Current + if sensor["unit"] == 'amps': + host_metrics['vmware_host_sensor_power_current'].add_metric( + labels + [sensor['name']], + int(sensor['value']) * (10 ** (int(sensor['unitModifier']))) + ) + + # Power Watt + if sensor["unit"] == 'watts': + host_metrics['vmware_host_sensor_power_watt'].add_metric( + labels + [sensor['name']], + int(sensor['value']) * (10 ** (int(sensor['unitModifier']))) + ) + + # Redundancy + if sensor["unit"] == 'redundancy-discrete': + host_metrics['vmware_host_sensor_redundancy'].add_metric( + labels + [sensor['name']], + int(sensor['value']) + ) + + # Standby Mode + standby_mode = 1 if host.get('runtime.standbyMode') == 'in' else 0 + standby_mode_state = host.get('runtime.standbyMode', 'unknown') + host_metrics['vmware_host_standby_mode'].add_metric( + labels + [standby_mode_state], + standby_mode + ) + # Power state power_state = 1 if host['runtime.powerState'] == 'poweredOn' else 0 host_metrics['vmware_host_power_state'].add_metric(labels, power_state) @@ -851,24 +1785,23 @@ def _vmware_get_hosts(self, host_metrics): 1 ) + # Host in maintenance mode? + if 'runtime.inMaintenanceMode' in host: + host_metrics['vmware_host_maintenance_mode'].add_metric( + labels, + host['runtime.inMaintenanceMode'] * 1, + ) + if not power_state: continue if host.get('runtime.bootTime'): - # Host uptime host_metrics['vmware_host_boot_timestamp_seconds'].add_metric( labels, self._to_epoch(host['runtime.bootTime']) ) - # Host in maintenance mode? - if 'runtime.inMaintenanceMode' in host: - host_metrics['vmware_host_maintenance_mode'].add_metric( - labels, - host['runtime.inMaintenanceMode'] * 1, - ) - # CPU Usage (in Mhz) if 'summary.quickStats.overallCpuUsage' in host: host_metrics['vmware_host_cpu_usage'].add_metric( @@ -925,7 +1858,6 @@ def collect(self): class VMWareMetricsResource(Resource): - isLeaf = True def __init__(self, args): @@ -938,7 +1870,9 @@ def __init__(self, args): def configure(self, args): if args.config_file: try: - self.config = YamlConfig(args.config_file) + with open(args.config_file) as cf: + self.config = yaml.load(cf, Loader=yaml.FullLoader) + if 'default' not in self.config.keys(): logging.error("Error, you must have a default section in config file (for now)") exit(1) @@ -953,6 +1887,9 @@ def configure(self, args): 'vsphere_password': os.environ.get('VSPHERE_PASSWORD'), 'ignore_ssl': get_bool_env('VSPHERE_IGNORE_SSL', False), 'specs_size': os.environ.get('VSPHERE_SPECS_SIZE', 5000), + 'fetch_custom_attributes': get_bool_env('VSPHERE_FETCH_CUSTOM_ATTRIBUTES', False), + 'fetch_tags': get_bool_env('VSPHERE_FETCH_TAGS', False), + 'fetch_alarms': get_bool_env('VSPHERE_FETCH_ALARMS', False), 'collect_only': { 'vms': get_bool_env('VSPHERE_COLLECT_VMS', True), 'vmguests': get_bool_env('VSPHERE_COLLECT_VMGUESTS', True), @@ -977,6 +1914,9 @@ def configure(self, args): 'vsphere_password': os.environ.get('VSPHERE_{}_PASSWORD'.format(section)), 'ignore_ssl': get_bool_env('VSPHERE_{}_IGNORE_SSL'.format(section), False), 'specs_size': os.environ.get('VSPHERE_{}_SPECS_SIZE'.format(section), 5000), + 'fetch_custom_attributes': get_bool_env('VSPHERE_{}_FETCH_CUSTOM_ATTRIBUTES'.format(section), False), + 'fetch_tags': get_bool_env('VSPHERE_{}_FETCH_TAGS'.format(section), False), + 'fetch_alarms': get_bool_env('VSPHERE_{}_FETCH_ALARMS'.format(section), False), 'collect_only': { 'vms': get_bool_env('VSPHERE_{}_COLLECT_VMS'.format(section), True), 'vmguests': get_bool_env('VSPHERE_{}_COLLECT_VMGUESTS'.format(section), True), @@ -1032,7 +1972,10 @@ def generate_latest_metrics(self, request): self.config[section]['vsphere_password'], self.config[section]['collect_only'], self.config[section]['specs_size'], + self.config[section]['fetch_custom_attributes'], self.config[section]['ignore_ssl'], + self.config[section]['fetch_tags'], + self.config[section]['fetch_alarms'], ) metrics = yield collector.collect() @@ -1047,7 +1990,6 @@ def generate_latest_metrics(self, request): class HealthzResource(Resource): - isLeaf = True def render_GET(self, request): @@ -1091,10 +2033,15 @@ def main(argv=None): parser = argparse.ArgumentParser(description='VMWare metrics exporter for Prometheus') parser.add_argument('-c', '--config', dest='config_file', default=None, help="configuration file") + parser.add_argument('-a', '--address', dest='address', type=str, + default='', help="HTTP address to expose metrics") parser.add_argument('-p', '--port', dest='port', type=int, default=9272, help="HTTP port to expose metrics") parser.add_argument('-l', '--loglevel', dest='loglevel', default="INFO", help="Set application loglevel INFO, DEBUG") + parser.add_argument('-v', '--version', action="version", + version='vmware_exporter {version}'.format(version=__version__), + help='Print version and exit') args = parser.parse_args(argv or sys.argv[1:]) @@ -1106,8 +2053,8 @@ def main(argv=None): reactor.suggestThreadPoolSize(25) factory = Site(registerEndpoints(args)) - logging.info("Starting web server on port {port}".format(port=args.port)) - endpoint = endpoints.TCP4ServerEndpoint(reactor, args.port) + logging.info("Starting web server on port {address}:{port}".format(address=args.address, port=args.port)) + endpoint = endpoints.TCP4ServerEndpoint(reactor, args.port, interface=args.address) endpoint.listen(factory) reactor.run()