Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions prowler/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ All notable changes to the **Prowler SDK** are documented in this file.
- Update Azure Container Registry service metadata to new format [(#9615)](https://github.com/prowler-cloud/prowler/pull/9615)
- Update Azure Cosmos DB service metadata to new format [(#9616)](https://github.com/prowler-cloud/prowler/pull/9616)
- Update Azure Databricks service metadata to new format [(#9617)](https://github.com/prowler-cloud/prowler/pull/9617)
- Parallelize Azure Key Vault vaults and vaults contents retrieval to improve performance [(#9876)](https://github.com/prowler-cloud/prowler/pull/9876)

---

Expand Down
23 changes: 23 additions & 0 deletions prowler/providers/azure/lib/service/service.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
from concurrent.futures import ThreadPoolExecutor, as_completed

from prowler.lib.logger import logger
from prowler.providers.azure.azure_provider import AzureProvider

MAX_WORKERS = 10


class AzureService:
def __init__(
Expand All @@ -20,6 +24,25 @@
self.audit_config = provider.audit_config
self.fixer_config = provider.fixer_config

self.thread_pool = ThreadPoolExecutor(max_workers=MAX_WORKERS)

def __threading_call__(self, call, iterator):
"""Execute a function across multiple items using threading."""
items = list(iterator) if not isinstance(iterator, list) else iterator

futures = {self.thread_pool.submit(call, item): item for item in items}
results = []

for future in as_completed(futures):
try:
result = future.result()
if result is not None:
results.append(result)
except Exception:

Check notice

Code scanning / CodeQL

Empty except Note

'except' clause does nothing but pass and there is no explanatory comment.

Copilot Autofix

AI 1 day ago

In general, empty except blocks should be replaced with handling that at minimum logs the exception, and optionally re-raises it or aggregates it, depending on how critical the failure is. In this case, we want to preserve the current behavior of continuing to process other futures while improving observability.

The best minimal-impact fix is to log any exception raised by future.result() using the existing logger, similar to how __set_clients__ logs errors. We should not re-raise, because that would change the function’s behavior from “best effort, skip failures” to “fail entire call on first error”. Instead, we log the exception (optionally including the associated item and traceback line) and keep skipping that result, preserving the public behavior while making failures visible.

Concretely, in prowler/providers/azure/lib/service/service.py:

  • Inside AzureService.__threading_call__, replace the except Exception: pass block with except Exception as error: and a logger.error(...) call. We can mirror the formatting used in __set_clients__, and additionally include the item associated with the future (available via futures[future]).
  • No new imports are needed; logger is already imported at the top of the file.
Suggested changeset 1
prowler/providers/azure/lib/service/service.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/prowler/providers/azure/lib/service/service.py b/prowler/providers/azure/lib/service/service.py
--- a/prowler/providers/azure/lib/service/service.py
+++ b/prowler/providers/azure/lib/service/service.py
@@ -38,8 +38,11 @@
                 result = future.result()
                 if result is not None:
                     results.append(result)
-            except Exception:
-                pass
+            except Exception as error:
+                logger.error(
+                    f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] "
+                    f"while processing item {futures.get(future)!r}: {error}"
+                )
 
         return results
 
EOF
@@ -38,8 +38,11 @@
result = future.result()
if result is not None:
results.append(result)
except Exception:
pass
except Exception as error:
logger.error(
f"{error.__class__.__name__}[{error.__traceback__.tb_lineno}] "
f"while processing item {futures.get(future)!r}: {error}"
)

return results

Copilot is powered by AI and may make mistakes. Always verify output.
pass

return results

def __set_clients__(self, identity, session, service, region_config):
clients = {}
try:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,21 @@
class keyvault_rbac_secret_expiration_set(Check):
def execute(self) -> Check_Report_Azure:
findings = []

for subscription, key_vaults in keyvault_client.key_vaults.items():
for keyvault in key_vaults:
if keyvault.properties.enable_rbac_authorization and keyvault.secrets:
report = Check_Report_Azure(
metadata=self.metadata(), resource=keyvault
)
report.subscription = subscription
report.status = "PASS"
report.status_extended = f"Keyvault {keyvault.name} from subscription {subscription} has all the secrets with expiration date set."
has_secret_without_expiration = False
for secret in keyvault.secrets:
report = Check_Report_Azure(
metadata=self.metadata(), resource=secret
)
report.subscription = subscription
if not secret.attributes.expires and secret.enabled:
report.status = "FAIL"
report.status_extended = f"Keyvault {keyvault.name} from subscription {subscription} has the secret {secret.name} without expiration date set."
has_secret_without_expiration = True
findings.append(report)
if not has_secret_without_expiration:
report.status_extended = f"Secret '{secret.name}' in KeyVault '{keyvault.name}' does not have expiration date set."
else:
report.status = "PASS"
report.status_extended = f"Secret '{secret.name}' in KeyVault '{keyvault.name}' has expiration date set."
findings.append(report)

return findings
253 changes: 160 additions & 93 deletions prowler/providers/azure/services/keyvault/keyvault_service.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from datetime import datetime
from typing import List, Optional, Union
Expand All @@ -16,103 +17,149 @@
class KeyVault(AzureService):
def __init__(self, provider: AzureProvider):
super().__init__(KeyVaultManagementClient, provider)
# TODO: review this credentials assignment
self.key_vaults = self._get_key_vaults(provider)
self._provider = provider
self.key_vaults = self._get_key_vaults()

def _get_key_vaults(self, provider):
def _get_key_vaults(self):
"""
Get all KeyVaults with parallel processing.

Optimizations:
1. Uses list_by_subscription() for full Vault objects
2. Processes vaults in parallel using __threading_call__
3. Each vault's keys/secrets/monitor fetched in parallel
"""
logger.info("KeyVault - Getting key_vaults...")
key_vaults = {}

for subscription, client in self.clients.items():
try:
key_vaults.update({subscription: []})
key_vaults_list = client.vaults.list()
for keyvault in key_vaults_list:
resource_group = keyvault.id.split("/")[4]
keyvault_name = keyvault.name
keyvault_properties = client.vaults.get(
resource_group, keyvault_name
).properties
keys = self._get_keys(
subscription, resource_group, keyvault_name, provider
)
secrets = self._get_secrets(
subscription, resource_group, keyvault_name
)
key_vaults[subscription].append(
KeyVaultInfo(
id=getattr(keyvault, "id", ""),
name=getattr(keyvault, "name", ""),
location=getattr(keyvault, "location", ""),
resource_group=resource_group,
properties=VaultProperties(
tenant_id=getattr(keyvault_properties, "tenant_id", ""),
enable_rbac_authorization=getattr(
keyvault_properties,
"enable_rbac_authorization",
False,
),
private_endpoint_connections=[
PrivateEndpointConnection(id=conn.id)
for conn in (
getattr(
keyvault_properties,
"private_endpoint_connections",
[],
)
or []
)
],
enable_soft_delete=getattr(
keyvault_properties, "enable_soft_delete", False
),
enable_purge_protection=getattr(
keyvault_properties,
"enable_purge_protection",
False,
),
public_network_access_disabled=(
getattr(
keyvault_properties,
"public_network_access",
"Enabled",
)
== "Disabled"
),
),
keys=keys,
secrets=secrets,
monitor_diagnostic_settings=self._get_vault_monitor_settings(
keyvault_name, resource_group, subscription
),
)
)
key_vaults[subscription] = []
vaults_list = list(client.vaults.list_by_subscription())

if not vaults_list:
continue

# Prepare items for parallel processing
items = [
{"subscription": subscription, "keyvault": vault}
for vault in vaults_list
]

# Process all KeyVaults in parallel
results = self.__threading_call__(self._process_single_keyvault, items)
key_vaults[subscription] = results

except Exception as error:
logger.error(
f"Subscription name: {subscription} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)

return key_vaults

def _get_keys(self, subscription, resource_group, keyvault_name, provider):
logger.info(f"KeyVault - Getting keys for {keyvault_name}...")
def _process_single_keyvault(self, item: dict) -> Optional["KeyVaultInfo"]:
"""Process a single KeyVault in parallel."""
subscription = item["subscription"]
keyvault = item["keyvault"]

try:
resource_group = keyvault.id.split("/")[4]
keyvault_name = keyvault.name
keyvault_properties = keyvault.properties

# Fetch keys, secrets, and monitor in parallel
with ThreadPoolExecutor(max_workers=3) as executor:
keys_future = executor.submit(
self._get_keys, subscription, resource_group, keyvault_name
)
secrets_future = executor.submit(
self._get_secrets, subscription, resource_group, keyvault_name
)
monitor_future = executor.submit(
self._get_vault_monitor_settings,
keyvault_name,
resource_group,
subscription,
)

keys = keys_future.result()
secrets = secrets_future.result()
monitor_settings = monitor_future.result()

return KeyVaultInfo(
id=getattr(keyvault, "id", ""),
name=getattr(keyvault, "name", ""),
location=getattr(keyvault, "location", ""),
resource_group=resource_group,
properties=VaultProperties(
tenant_id=getattr(keyvault_properties, "tenant_id", ""),
enable_rbac_authorization=getattr(
keyvault_properties,
"enable_rbac_authorization",
False,
),
private_endpoint_connections=[
PrivateEndpointConnection(id=conn.id)
for conn in (
getattr(
keyvault_properties,
"private_endpoint_connections",
[],
)
or []
)
],
enable_soft_delete=getattr(
keyvault_properties, "enable_soft_delete", False
),
enable_purge_protection=getattr(
keyvault_properties,
"enable_purge_protection",
False,
),
public_network_access_disabled=(
getattr(
keyvault_properties,
"public_network_access",
"Enabled",
)
== "Disabled"
),
),
keys=keys,
secrets=secrets,
monitor_diagnostic_settings=monitor_settings,
)

except Exception as error:
logger.error(
f"KeyVault {keyvault.name} in {subscription} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)
return None

def _get_keys(self, subscription, resource_group, keyvault_name):
keys = []
keys_dict = {}

try:
client = self.clients[subscription]
keys_list = client.keys.list(resource_group, keyvault_name)
for key in keys_list:
keys.append(
Key(
id=getattr(key, "id", ""),
name=getattr(key, "name", ""),
key_obj = Key(
id=getattr(key, "id", ""),
name=getattr(key, "name", ""),
enabled=getattr(key.attributes, "enabled", False),
location=getattr(key, "location", ""),
attributes=KeyAttributes(
enabled=getattr(key.attributes, "enabled", False),
location=getattr(key, "location", ""),
attributes=KeyAttributes(
enabled=getattr(key.attributes, "enabled", False),
created=getattr(key.attributes, "created", 0),
updated=getattr(key.attributes, "updated", 0),
expires=getattr(key.attributes, "expires", 0),
),
)
created=getattr(key.attributes, "created", 0),
updated=getattr(key.attributes, "updated", 0),
expires=getattr(key.attributes, "expires", 0),
),
)
keys.append(key_obj)
keys_dict[key_obj.name] = key_obj

except Exception as error:
logger.error(
f"Subscription name: {subscription} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
Expand All @@ -121,31 +168,52 @@ def _get_keys(self, subscription, resource_group, keyvault_name, provider):
try:
key_client = KeyClient(
vault_url=f"https://{keyvault_name}.vault.azure.net/",
# TODO: review the following line
credential=provider.session,
credential=self._provider.session,
)
properties = key_client.list_properties_of_keys()
for prop in properties:
policy = key_client.get_key_rotation_policy(prop.name)
for key in keys:
if key.name == prop.name:
key.rotation_policy = KeyRotationPolicy(
properties = list(key_client.list_properties_of_keys())

if properties:
items = [
{"key_client": key_client, "prop": prop} for prop in properties
]
rotation_results = self.__threading_call__(
self._get_single_rotation_policy, items
)

for name, policy in rotation_results:
if policy and name in keys_dict:
keys_dict[name].rotation_policy = KeyRotationPolicy(
id=getattr(policy, "id", ""),
lifetime_actions=[
KeyRotationLifetimeAction(action=action.action)
for action in getattr(policy, "lifetime_actions", [])
],
)

# TODO: handle different errors here since we are catching all HTTP Errors here
except HttpResponseError:
logger.warning(
f"Subscription name: {subscription} -- has no access policy configured for keyvault {keyvault_name}"
)

return keys

def _get_single_rotation_policy(self, item: dict) -> tuple:
"""Thread-safe rotation policy retrieval."""
key_client = item["key_client"]
prop = item["prop"]

try:
policy = key_client.get_key_rotation_policy(prop.name)
return (prop.name, policy)
except HttpResponseError:
return (prop.name, None)
except Exception as error:
logger.warning(
f"KeyVault - Failed to get rotation policy for key {prop.name}: {error}"
)
return (prop.name, None)

def _get_secrets(self, subscription, resource_group, keyvault_name):
logger.info(f"KeyVault - Getting secrets for {keyvault_name}...")
secrets = []
try:
client = self.clients[subscription]
Expand Down Expand Up @@ -177,12 +245,10 @@ def _get_secrets(self, subscription, resource_group, keyvault_name):
logger.error(
f"Subscription name: {subscription} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)

return secrets

def _get_vault_monitor_settings(self, keyvault_name, resource_group, subscription):
logger.info(
f"KeyVault - Getting monitor diagnostics settings for {keyvault_name}..."
)
monitor_diagnostics_settings = []
try:
monitor_diagnostics_settings = monitor_client.diagnostic_settings_with_uri(
Expand All @@ -192,8 +258,9 @@ def _get_vault_monitor_settings(self, keyvault_name, resource_group, subscriptio
)
except Exception as error:
logger.error(
f"Subscription name: {self.subscription} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
f"Subscription name: {subscription} -- {error.__class__.__name__}[{error.__traceback__.tb_lineno}]: {error}"
)

return monitor_diagnostics_settings


Expand Down
Loading