diff --git a/.github/workflows/container-image.yml b/.github/workflows/container-image.yml new file mode 100644 index 0000000..f0b0aa6 --- /dev/null +++ b/.github/workflows/container-image.yml @@ -0,0 +1,96 @@ +name: Build Container Image + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +on: + push: + branches: [ "main" ] + # Publish semver tags as releases. + tags: [ '*.*.*' ] + pull_request: + branches: [ "main" ] + +env: + # Use docker.io for Docker Hub if empty + REGISTRY: ghcr.io + # github.repository as / + IMAGE_NAME: ${{ github.repository }} + + +jobs: + build: + + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + # This is used to complete the identity challenge + # with sigstore/fulcio when running outside of PRs. + id-token: write + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Install the cosign tool except on PR + # https://github.com/sigstore/cosign-installer + - name: Install cosign + if: github.event_name != 'pull_request' + uses: sigstore/cosign-installer@v3.4.0 + with: + cosign-release: 'v2.2.3' + + # Set up QEMU to be able to build to multiple architectures + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + + # Workaround: https://github.com/docker/build-push-action/issues/461 + - name: Setup Docker buildx + uses: docker/setup-buildx-action@v3.0.0 + + # Login against a Docker registry except on PR + # https://github.com/docker/login-action + - name: Log into registry ${{ env.REGISTRY }} + if: github.event_name != 'pull_request' + uses: docker/login-action@v3.0.0 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Extract metadata (tags, labels) for Docker + # https://github.com/docker/metadata-action + - name: Extract Docker metadata + id: meta + uses: docker/metadata-action@v5.5.1 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + # Build and push Docker image with Buildx (don't push on PR) + # https://github.com/docker/build-push-action + - name: Build and push Docker image + id: build-and-push + uses: docker/build-push-action@v5.1.0 + with: + context: . + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + platforms: linux/amd64,linux/arm64 + + + # Sign the resulting Docker image digest except on PRs. + # This will only write to the public Rekor transparency log when the Docker + # repository is public to avoid leaking data. If you would like to publish + # transparency data even for private images, pass --force to cosign below. + # https://github.com/sigstore/cosign + - name: Sign the published Docker image + if: ${{ github.event_name != 'pull_request' }} + env: + COSIGN_EXPERIMENTAL: "true" + # This step uses the identity token to provision an ephemeral certificate + # against the sigstore community Fulcio instance. + run: echo "${{ steps.meta.outputs.tags }}" | xargs -I {} cosign sign --yes {}@${{ steps.build-and-push.outputs.digest }} \ No newline at end of file diff --git a/.github/workflows/docker-push.yml b/.github/workflows/docker-push.yml deleted file mode 100644 index 3d0bf4c..0000000 --- a/.github/workflows/docker-push.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Build and Push Image -on: - release: - types: [released] - -jobs: - build: - name: Build and push image - runs-on: ubuntu-20.04 - - steps: - - uses: actions/checkout@v2 - - - name: Build Image - id: build-image - uses: redhat-actions/buildah-build@v2 - with: - image: pryorda/vmware_exporter - tags: latest ${{ github.sha }} ${{ github.event.release.tag_name }} - dockerfiles: | - ./Dockerfile - - # Podman Login action (https://github.com/redhat-actions/podman-login) also be used to log in, - # in which case 'username' and 'password' can be omitted. - - name: Push To dhub - id: push-to-dhub - uses: redhat-actions/push-to-registry@v2 - with: - image: ${{ steps.build-image.outputs.image }} - tags: ${{ steps.build-image.outputs.tags }} - registry: docker.io - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - - name: Print image url - run: echo "Image pushed to ${{ steps.push-to-dhub.outputs.registry-paths }}" diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 5c5a328..9c76f50 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -16,15 +16,15 @@ jobs: strategy: matrix: - python-version: [3.7] + python-version: [3.13] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4.2.2 with: fetch-depth: 0 ref: ${{ github.event.pull_request.head.sha }} - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v5.4.0 with: python-version: ${{ matrix.python-version }} - name: Install dependencies @@ -41,12 +41,11 @@ jobs: - name: Test with pytest run: | pytest --cov=. -v tests/unit - - uses: codecov/codecov-action@v1 - name: integration tests run: | pytest tests/integration - name: Setup ruby env - uses: actions/setup-ruby@v1 + uses: ruby/setup-ruby@v1.227.0 with: ruby-version: 2.7 - run: | @@ -54,31 +53,33 @@ jobs: export COMMIT_MESSAGE=$(git log -1) ruby validate-signature.rb "${COMMIT_MESSAGE}" - deploy: - strategy: - matrix: - python-version: [3.7] - - runs-on: ubuntu-latest - if: ${{ github.ref == 'refs/heads/main' && github.event_name == 'push' }} - - steps: - - uses: actions/checkout@v2 - with: - token: ${{ secrets.GH_TOKEN }} - fetch-depth: 0 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - name: install semantic-release - run: | - pip install python-semantic-release - - name: deploy pip - run: | - git config --global user.name "semantic-release (via github actions)" - git config --global user.email "semantic-release@github-actions" - semantic-release publish - env: # Or as an environment variable - GH_TOKEN: ${{ secrets.GH_TOKEN }} - PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} +# Deactivated pypi publishing until adoption process is done +# +# deploy: +# strategy: +# matrix: +# python-version: [3.7] +# +# runs-on: ubuntu-latest +# if: ${{ github.ref == 'refs/heads/main' && github.event_name == 'push' }} +# +# steps: +# - uses: actions/checkout@v2 +# with: +# token: ${{ secrets.GH_TOKEN }} +# fetch-depth: 0 +# - name: Set up Python ${{ matrix.python-version }} +# uses: actions/setup-python@v2 +# with: +# python-version: ${{ matrix.python-version }} +# - name: install semantic-release +# run: | +# pip install python-semantic-release +# - name: deploy pip +# run: | +# git config --global user.name "semantic-release (via github actions)" +# git config --global user.email "semantic-release@github-actions" +# semantic-release publish +# env: # Or as an environment variable +# GH_TOKEN: ${{ secrets.GH_TOKEN }} +# PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 37e87b2..01c9a68 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,53 +1,57 @@ +default_install_hook_types: [pre-commit, commit-msg] repos: -- repo: https://github.com/pre-commit/mirrors-autopep8 - rev: v1.5.6 # Use the sha / tag you want to point at +- repo: https://github.com/hhatto/autopep8 + rev: v2.3.2 hooks: - id: autopep8 - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v1.3.0 + rev: v5.0.0 hooks: # Git state - id: check-merge-conflict - stages: [commit] + stages: [pre-commit] - id: check-added-large-files - stages: [commit] + stages: [pre-commit] # Sensitive information - id: detect-private-key - stages: [commit] + stages: [pre-commit] - id: detect-aws-credentials - stages: [commit] + stages: [pre-commit] args: - --allow-missing-credentials # Generic file state - id: trailing-whitespace - stages: [commit] + stages: [pre-commit] - id: mixed-line-ending - stages: [commit] + stages: [pre-commit] - id: end-of-file-fixer - stages: [commit] + stages: [pre-commit] exclude: .*\.tfvars$ # terraform fmt separates everything with blank lines leaving a trailing line at the end - id: check-executables-have-shebangs - stages: [commit] + stages: [pre-commit] # Language syntax/formatting - id: check-yaml - stages: [commit] + stages: [pre-commit] - id: check-json - stages: [commit] + stages: [pre-commit] - id: pretty-format-json - stages: [commit] + stages: [pre-commit] args: - --autofix +- repo: https://github.com/pycqa/flake8 + rev: 7.1.2 + hooks: - id: flake8 - stages: [commit] + stages: [pre-commit] args: - --ignore=F705,E123,E402 - repo: https://github.com/pryorda/dockerfilelint-precommit-hooks rev: v0.1.0 hooks: - id: dockerfilelint - stages: [commit] + stages: [pre-commit] - repo: https://github.com/mattlqx/pre-commit-sign - rev: v1.1.3 + rev: v1.2.0 hooks: - id: sign-commit stages: [commit-msg] diff --git a/Dockerfile b/Dockerfile index 4ade0e9..3803c3b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.7-alpine +FROM python:3.13-alpine LABEL MAINTAINER="Daniel Pryor " LABEL NAME=vmware_exporter diff --git a/requirements-tests.txt b/requirements-tests.txt index 3e3cc7a..ac05ce1 100644 --- a/requirements-tests.txt +++ b/requirements-tests.txt @@ -1,7 +1,6 @@ pytest_docker_tools==0.2.0 -pytest==5.4.1 +pytest==6.2.5 pytest-cov==2.8.1 pytest-twisted==1.12 -codecov==2.0.17 flake8>=3.6.0 pyflakes>=1.5.0 diff --git a/requirements.txt b/requirements.txt index d924920..6e2ffe3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,7 @@ prometheus-client==0.0.19 pytz pyvmomi>=6.5 -twisted>=14.0.2 +twisted==24.7.0 pyyaml>=5.1 service-identity +requests diff --git a/tests/unit/test_vmware_exporter.py b/tests/unit/test_vmware_exporter.py index 9aa54a9..c14463e 100644 --- a/tests/unit/test_vmware_exporter.py +++ b/tests/unit/test_vmware_exporter.py @@ -62,6 +62,7 @@ def test_collect_vms(): 'datastores': True, 'hosts': True, 'snapshots': True, + 'volumes': False, } # Test runtime.host not found @@ -362,6 +363,7 @@ def test_metrics_without_hostaccess(): 'datastores': False, 'hosts': False, 'snapshots': False, + 'volumes': False, } collector = VmwareCollector( @@ -510,6 +512,7 @@ def test_collect_vm_perf(): 'datastores': True, 'hosts': True, 'snapshots': True, + 'volumes': False, } collector = VmwareCollector( '127.0.0.1', @@ -630,6 +633,7 @@ def test_collect_hosts(): 'datastores': True, 'hosts': True, 'snapshots': True, + 'volumes': False, } collector = VmwareCollector( '127.0.0.1', @@ -991,6 +995,7 @@ def test_collect_host_perf(): 'datastores': False, 'hosts': True, 'snapshots': False, + 'volumes': False, } collector = VmwareCollector( '127.0.0.1', @@ -1105,6 +1110,7 @@ def test_collect_datastore(): 'datastores': True, 'hosts': True, 'snapshots': True, + 'volumes': False, } collector = VmwareCollector( '127.0.0.1', @@ -1206,6 +1212,7 @@ def test_collect(): 'datastores': True, 'hosts': True, 'snapshots': True, + 'volumes': False, } collector = VmwareCollector( '127.0.0.1', @@ -1242,6 +1249,7 @@ def test_collect_deferred_error_works(): 'datastores': True, 'hosts': True, 'snapshots': True, + 'volumes': False, } collector = VmwareCollector( '127.0.0.1', @@ -1344,6 +1352,7 @@ def test_vmware_get_inventory(): 'datastores': True, 'hosts': True, 'snapshots': True, + 'volumes': False, } collector = VmwareCollector( '127.0.0.1', @@ -1388,6 +1397,7 @@ def test_vmware_connect(): 'datastores': True, 'hosts': True, 'snapshots': True, + 'volumes': False, } collector = VmwareCollector( '127.0.0.1', @@ -1416,6 +1426,7 @@ def test_vmware_disconnect(): 'datastores': True, 'hosts': True, 'snapshots': True, + 'volumes': False, } collector = VmwareCollector( '127.0.0.1', @@ -1451,6 +1462,7 @@ def test_counter_ids(): 'datastores': True, 'hosts': True, 'snapshots': True, + 'volumes': False, } collector = VmwareCollector( '127.0.0.1', @@ -1607,6 +1619,7 @@ def test_vmware_resource_async_render_GET_section(): 'snapshots': True, 'vmguests': True, 'vms': True, + 'volumes': False, } }, 'mysection': { @@ -1624,6 +1637,7 @@ def test_vmware_resource_async_render_GET_section(): 'snapshots': True, 'vmguests': True, 'vms': True, + 'volumes': False, } } } @@ -1686,6 +1700,7 @@ def test_config_env_multiple_sections(): 'snapshots': True, 'vmguests': True, 'vms': True, + 'volumes': False, } }, 'mysection': { @@ -1703,6 +1718,7 @@ def test_config_env_multiple_sections(): 'snapshots': True, 'vmguests': True, 'vms': True, + 'volumes': False, } } } diff --git a/vmware_exporter/helpers.py b/vmware_exporter/helpers.py index cb5fc98..dd8ebd7 100644 --- a/vmware_exporter/helpers.py +++ b/vmware_exporter/helpers.py @@ -5,7 +5,7 @@ def get_bool_env(key: str, default: bool): value = os.environ.get(key, default) - return value if type(value) == bool else value.lower() == 'true' + return value if type(value) is bool else value.lower() == 'true' def batch_fetch_properties(content, obj_type, properties): diff --git a/vmware_exporter/vmware_exporter.py b/vmware_exporter/vmware_exporter.py index e497fa2..0409d2f 100755 --- a/vmware_exporter/vmware_exporter.py +++ b/vmware_exporter/vmware_exporter.py @@ -101,6 +101,7 @@ def __init__( 'datastores': ['ds_name', 'dc_name', 'ds_cluster'], 'hosts': ['host_name', 'dc_name', 'cluster_name'], 'host_perf': ['host_name', 'dc_name', 'cluster_name'], + 'volumes': ['datastore', 'volume', 'backing_file_path'], } # if tags are gonna be fetched 'tags' will be a label too @@ -295,6 +296,23 @@ def _create_metric_containers(self): 'VMWare sensor redundancy value (1=ok / 0=ko) labeled by sensor name from the host.', labels=self._labelNames['hosts'] + ['name']), } + metric_list['volumes'] = { + 'vmware_volume_snapshots': GaugeMetricFamily( + 'vmware_volume_snapshots', + 'Number of snaphots for the volume', + labels=self._labelNames['volumes'], + ), + 'vmware_volume_capacity_bytes': GaugeMetricFamily( + 'vmware_volume_capacity_bytes', + 'The configured capacity of the volume', + labels=self._labelNames['volumes'], + ), + 'vmware_volume_snapshot_createtime': GaugeMetricFamily( + 'vmware_volume_snapshot_createtime', + 'Create time of snapshot as a unix timestamp', + labels=self._labelNames['volumes'] + ['snapshot_id'], + ) + } """ if alarms are being retrieved, metrics have to been created here @@ -427,6 +445,9 @@ def collect(self): tasks.append(self._vmware_get_hosts(metrics)) tasks.append(self._vmware_get_host_perf_manager_metrics(metrics)) + if collect_only['volumes'] is True: + tasks.append(self._vmware_get_volumes(metrics)) + yield parallelize(*tasks) yield self._vmware_disconnect() @@ -534,7 +555,7 @@ def tags(self): and linked to object moid """ logging.info("Fetching tags") - start = datetime.datetime.utcnow() + start = datetime.datetime.now(datetime.UTC) attachedObjs = yield self._attachedObjectsOnTags tagNames = yield self._tagNames @@ -556,7 +577,7 @@ def tags(self): else: tags[section][obj.get('id')].append(tagName) - fetch_time = datetime.datetime.utcnow() - start + fetch_time = datetime.datetime.now(datetime.UTC) - start logging.info("Fetched tags ({fetch_time})".format(fetch_time=fetch_time)) return tags @@ -612,7 +633,7 @@ def batch_fetch_properties(self, objtype, properties): @defer.inlineCallbacks def datastore_inventory(self): logging.info("Fetching vim.Datastore inventory") - start = datetime.datetime.utcnow() + start = datetime.datetime.now(datetime.UTC) properties = [ 'name', 'summary.capacity', @@ -657,7 +678,7 @@ def datastore_inventory(self): ] ) - fetch_time = datetime.datetime.utcnow() - start + fetch_time = datetime.datetime.now(datetime.UTC) - start logging.info("Fetched vim.Datastore inventory ({fetch_time})".format(fetch_time=fetch_time)) return datastores @@ -666,7 +687,7 @@ def datastore_inventory(self): @defer.inlineCallbacks def host_system_inventory(self): logging.info("Fetching vim.HostSystem inventory") - start = datetime.datetime.utcnow() + start = datetime.datetime.now(datetime.UTC) properties = [ 'name', 'parent', @@ -723,7 +744,7 @@ def host_system_inventory(self): ] ) - fetch_time = datetime.datetime.utcnow() - start + fetch_time = datetime.datetime.now(datetime.UTC) - start logging.info("Fetched vim.HostSystem inventory ({fetch_time})".format(fetch_time=fetch_time)) return host_systems @@ -732,7 +753,7 @@ def host_system_inventory(self): @defer.inlineCallbacks def vm_inventory(self): logging.info("Fetching vim.VirtualMachine inventory") - start = datetime.datetime.utcnow() + start = datetime.datetime.now(datetime.UTC) properties = [ 'name', 'runtime.host', @@ -792,7 +813,7 @@ def vm_inventory(self): ] ) - fetch_time = datetime.datetime.utcnow() - start + fetch_time = datetime.datetime.now(datetime.UTC) - start logging.info("Fetched vim.VirtualMachine inventory ({fetch_time})".format(fetch_time=fetch_time)) return virtual_machines @@ -1847,6 +1868,51 @@ def _vmware_get_hosts(self, host_metrics): logging.info("Finished host metrics collection") return results + @defer.inlineCallbacks + def _vmware_get_volumes(self, vol_metrics): + """ + Get Volume information + """ + + datastores = yield parallelize(self.datastore_inventory) + content = yield self.content + + for datastore_id, datastore in datastores[0].items(): + volumes = content.vStorageObjectManager.ListVStorageObject(datastore['obj']) + for volume_ref in volumes: + try: + volume = content.vStorageObjectManager.RetrieveVStorageObject(volume_ref, datastore['obj']) + except vim.fault.NotFound: + logging.error("Volume %s was listed in the datastore, but the storage object could not be found", + volume_ref.id) + except Exception as error: + logging.error("Error fetching volume information for volume %s: %s", volume_ref.id, error) + try: + snapshot_info = content.vStorageObjectManager.RetrieveSnapshotInfo(volume_ref, datastore['obj']) + vol_metrics['vmware_volume_snapshots'].add_metric([ + datastore['name'], + volume.config.name, + volume.config.backing.filePath + ], len(snapshot_info.snapshots)) + + vol_metrics['vmware_volume_capacity_bytes'].add_metric([ + datastore['name'], + volume.config.name, + volume.config.backing.filePath + ], volume.config.capacityInMB * 1024 * 1024) + + for snapshot in snapshot_info.snapshots: + vol_metrics['vmware_volume_snapshot_createtime'].add_metric([ + datastore['name'], + volume.config.name, + volume.config.backing.filePath, + snapshot.id.id, + ], int(snapshot.createTime.timestamp())) + except vim.fault.NotFound: + logging.error("Snapshot info for volume %s not found", volume_ref.id) + except Exception as error: + logging.error("Error fetching snapshot information for volume: %s", volume_ref.id, error) + class ListCollector(object): @@ -1896,6 +1962,7 @@ def configure(self, args): 'datastores': get_bool_env('VSPHERE_COLLECT_DATASTORES', True), 'hosts': get_bool_env('VSPHERE_COLLECT_HOSTS', True), 'snapshots': get_bool_env('VSPHERE_COLLECT_SNAPSHOTS', True), + 'volumes': get_bool_env('VSPHERE_COLLECT_VOLUMES', False), } } } @@ -1923,6 +1990,7 @@ def configure(self, args): 'datastores': get_bool_env('VSPHERE_{}_COLLECT_DATASTORES'.format(section), True), 'hosts': get_bool_env('VSPHERE_{}_COLLECT_HOSTS'.format(section), True), 'snapshots': get_bool_env('VSPHERE_{}_COLLECT_SNAPSHOTS'.format(section), True), + 'volumes': get_bool_env('VSPHERE_COLLECT_VOLUMES', False), } }