Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions app/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,34 @@
"How many concurrent requests are currently being served",
)


class ConcurrentRequestCounter:
"""Counter for tracking concurrent requests in Eventlet worker.

Separate from metrics gauge to keep observability concerns separate from
application logic (load shedding).

Note: No lock needed - Eventlet uses cooperative concurrency (green threads)
which only yields at I/O boundaries, and simple integer operations are atomic
in this model. Additionally, CPython's GIL provides safety for integer
increment/decrement even in OS thread scenarios.
"""

def __init__(self):
self._count = 0

def increment(self):
self._count += 1

def decrement(self):
self._count = max(0, self._count - 1)

def get(self):
return self._count


concurrent_request_counter = ConcurrentRequestCounter()

memo_resetters: list[Callable] = []

#
Expand Down Expand Up @@ -410,13 +438,15 @@ def register_v2_blueprints(application):
def init_app(app):
@app.before_request
def record_request_details():
concurrent_request_counter.increment()
CONCURRENT_REQUESTS.inc()

g.start = monotonic()
g.endpoint = request.endpoint

@app.after_request
def after_request(response):
concurrent_request_counter.decrement()
CONCURRENT_REQUESTS.dec()

response.headers.add("Access-Control-Allow-Origin", "*")
Expand Down
13 changes: 13 additions & 0 deletions app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,19 @@ class Config:

NOTIFY_EVENTLET_STATS = os.getenv("NOTIFY_EVENTLET_STATS", "0") == "1"

# Load Shedding Configuration
LOAD_SHEDDING_ENABLED = os.getenv("LOAD_SHEDDING_ENABLED", "false").lower() == "true"
# High water mark: 75% of capacity (6 out of 8 concurrent requests per worker)
HIGH_WATER_MARK = int(os.getenv("HIGH_WATER_MARK", "6"))
# Throttle services contributing this % or more of total request volume
THROTTLE_CONTRIBUTION_PCT = int(os.getenv("THROTTLE_CONTRIBUTION_PCT", "20"))
# Only apply contribution-based throttling when enough services are active
THROTTLE_CONTRIBUTION_MIN_SERVICES = int(os.getenv("THROTTLE_CONTRIBUTION_MIN_SERVICES", "5"))
# Only apply contribution-based throttling when total volume is above this threshold
THROTTLE_CONTRIBUTION_MIN_VOLUME = int(os.getenv("THROTTLE_CONTRIBUTION_MIN_VOLUME", "50"))
# Throttle services with volume this many times above median
THROTTLE_VOLUME_MEDIAN_MULTIPLE = int(os.getenv("THROTTLE_VOLUME_MEDIAN_MULTIPLE", "10"))

###########################
# Default config values ###
###########################
Expand Down
9 changes: 9 additions & 0 deletions app/errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

from app.authentication.auth import AuthError
from app.exceptions import ArchiveValidationError
from app.load_shedding import ServiceUnavailableError


class VirusScanError(Exception):
Expand Down Expand Up @@ -53,6 +54,14 @@ def invalid_format(error: InvalidRecipientError) -> ResponseReturnValue:
def authentication_error(error: AuthError) -> ResponseReturnValue:
return jsonify(result="error", message=error.message), error.code

@blueprint.errorhandler(ServiceUnavailableError)
def service_unavailable_error(error: ServiceUnavailableError) -> ResponseReturnValue:
response = jsonify(result="error", message=error.message)
response.status_code = 429
response.headers["Retry-After"] = str(error.retry_after)
current_app.logger.info(error)
return response

@blueprint.errorhandler(ValidationError)
def marshmallow_validation_error(error: ValidationError) -> ResponseReturnValue:
current_app.logger.info(error)
Expand Down
Loading