diff --git a/.github/workflows/get-envs.py b/.github/workflows/get-envs.py index f5ef936936..14ea822da9 100755 --- a/.github/workflows/get-envs.py +++ b/.github/workflows/get-envs.py @@ -26,7 +26,7 @@ def main(f): joined_envs = ",".join(filtered_envs) assert joined_envs, ( # noqa: S101 - f"No environments found.\nenvironments = {str(environments)}\nGROUP_NUMBER = {GROUP_NUMBER + 1}\nTOTAL_GROUPS = {TOTAL_GROUPS}" + f"No environments found.\nenvironments = {environments!s}\nGROUP_NUMBER = {GROUP_NUMBER + 1}\nTOTAL_GROUPS = {TOTAL_GROUPS}" ) print(joined_envs) diff --git a/newrelic/api/application.py b/newrelic/api/application.py index 1219381518..46f38967e7 100644 --- a/newrelic/api/application.py +++ b/newrelic/api/application.py @@ -22,9 +22,9 @@ class Application: _lock = threading.Lock() - _instances = {} + _instances = {} # noqa: RUF012 - _delayed_callables = {} + _delayed_callables = {} # noqa: RUF012 @staticmethod def _instance(name, activate=True): diff --git a/newrelic/api/html_insertion.py b/newrelic/api/html_insertion.py index 6d306a8b62..ed1923088a 100644 --- a/newrelic/api/html_insertion.py +++ b/newrelic/api/html_insertion.py @@ -78,7 +78,7 @@ def insert_at_index(index): xua_meta = _xua_meta_re.search(data) charset_meta = _charset_meta_re.search(data) - index = max(xua_meta and xua_meta.end() or 0, charset_meta and charset_meta.end() or 0) + index = max((xua_meta and xua_meta.end()) or 0, (charset_meta and charset_meta.end()) or 0) if index: return insert_at_index(index) diff --git a/newrelic/api/log.py b/newrelic/api/log.py index 07896e6cb3..eacb270de3 100644 --- a/newrelic/api/log.py +++ b/newrelic/api/log.py @@ -131,7 +131,7 @@ def format(self, record): class NewRelicLogForwardingHandler(logging.Handler): - IGNORED_LOG_RECORD_KEYS = {"message", "msg"} + IGNORED_LOG_RECORD_KEYS = frozenset(("message", "msg")) def emit(self, record): try: diff --git a/newrelic/api/time_trace.py b/newrelic/api/time_trace.py index 8add4d421b..bef6f04561 100644 --- a/newrelic/api/time_trace.py +++ b/newrelic/api/time_trace.py @@ -436,7 +436,7 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, _, error_group_name = process_user_attribute("error.group.name", error_group_name_raw) if error_group_name is None or not isinstance(error_group_name, str): raise ValueError( - f"Invalid attribute value for error.group.name. Expected string, got: {repr(error_group_name_raw)}" + f"Invalid attribute value for error.group.name. Expected string, got: {error_group_name_raw!r}" ) except Exception: _logger.error( diff --git a/newrelic/common/agent_http.py b/newrelic/common/agent_http.py index 48a3d2e76f..5534142547 100644 --- a/newrelic/common/agent_http.py +++ b/newrelic/common/agent_http.py @@ -283,7 +283,7 @@ def __init__( else: self._host = proxy.host self._port = proxy.port or 443 - self._prefix = f"{self.PREFIX_SCHEME + host}:{str(port)}" + self._prefix = f"{self.PREFIX_SCHEME + host}:{port!s}" urlopen_kwargs["assert_same_host"] = False if proxy_headers: self._headers.update(proxy_headers) @@ -501,7 +501,7 @@ class ApplicationModeClient(SupportabilityMixin, HttpClient): class DeveloperModeClient(SupportabilityMixin, BaseClient): - RESPONSES = { + RESPONSES = { # noqa: RUF012 "preconnect": {"redirect_host": "fake-collector.newrelic.com"}, "agent_settings": [], "connect": { diff --git a/newrelic/common/encoding_utils.py b/newrelic/common/encoding_utils.py index 3620c7913d..5508f4a7a9 100644 --- a/newrelic/common/encoding_utils.py +++ b/newrelic/common/encoding_utils.py @@ -486,7 +486,7 @@ def text(self): if pr: pr = f"{pr:.6f}".rstrip("0").rstrip(".") - payload = f"0-0-{self['ac']}-{self['ap']}-{self.get('id', '')}-{self.get('tx', '')}-{'1' if self.get('sa') else '0'}-{pr}-{str(self['ti'])}" + payload = f"0-0-{self['ac']}-{self['ap']}-{self.get('id', '')}-{self.get('tx', '')}-{'1' if self.get('sa') else '0'}-{pr}-{self['ti']!s}" return f"{self.get('tk', self['ac'])}@nr={payload}" @classmethod diff --git a/newrelic/common/utilization.py b/newrelic/common/utilization.py index 2d5fd865db..22b158e3ec 100644 --- a/newrelic/common/utilization.py +++ b/newrelic/common/utilization.py @@ -161,7 +161,7 @@ class AWSUtilization(CommonUtilization): METADATA_HOST = "169.254.169.254" METADATA_PATH = "/latest/dynamic/instance-identity/document" METADATA_TOKEN_PATH = "/latest/api/token" # noqa: S105 - HEADERS = {"X-aws-ec2-metadata-token-ttl-seconds": "21600"} + HEADERS = {"X-aws-ec2-metadata-token-ttl-seconds": "21600"} # noqa: RUF012 VENDOR_NAME = "aws" _utilization_data = None @@ -219,18 +219,18 @@ def fetch(cls): class AzureUtilization(CommonUtilization): METADATA_HOST = "169.254.169.254" METADATA_PATH = "/metadata/instance/compute" - METADATA_QUERY = {"api-version": "2017-03-01"} + METADATA_QUERY = {"api-version": "2017-03-01"} # noqa: RUF012 EXPECTED_KEYS = ("location", "name", "vmId", "vmSize") - HEADERS = {"Metadata": "true"} + HEADERS = {"Metadata": "true"} # noqa: RUF012 VENDOR_NAME = "azure" class AzureFunctionUtilization(CommonUtilization): METADATA_HOST = "169.254.169.254" METADATA_PATH = "/metadata/instance/compute" - METADATA_QUERY = {"api-version": "2017-03-01"} + METADATA_QUERY = {"api-version": "2017-03-01"} # noqa: RUF012 EXPECTED_KEYS = ("faas.app_name", "cloud.region") - HEADERS = {"Metadata": "true"} + HEADERS = {"Metadata": "true"} # noqa: RUF012 VENDOR_NAME = "azurefunction" @staticmethod @@ -264,10 +264,10 @@ def get_values(cls, response): class GCPUtilization(CommonUtilization): EXPECTED_KEYS = ("id", "machineType", "name", "zone") - HEADERS = {"Metadata-Flavor": "Google"} + HEADERS = {"Metadata-Flavor": "Google"} # noqa: RUF012 METADATA_HOST = "metadata.google.internal" METADATA_PATH = "/computeMetadata/v1/instance/" - METADATA_QUERY = {"recursive": "true"} + METADATA_QUERY = {"recursive": "true"} # noqa: RUF012 VENDOR_NAME = "gcp" @classmethod diff --git a/newrelic/config.py b/newrelic/config.py index 879a4790f6..9a634bb8c3 100644 --- a/newrelic/config.py +++ b/newrelic/config.py @@ -50,7 +50,7 @@ ) from newrelic.core.config import Settings, apply_config_setting, default_host, fetch_config_setting -__all__ = ["initialize", "filter_app_factory"] +__all__ = ["filter_app_factory", "initialize"] _logger = logging.getLogger(__name__) diff --git a/newrelic/core/agent.py b/newrelic/core/agent.py index b712b129ed..ab5cdc19bd 100644 --- a/newrelic/core/agent.py +++ b/newrelic/core/agent.py @@ -121,8 +121,8 @@ class Agent: _instance_lock = threading.Lock() _instance = None - _startup_callables = [] - _registration_callables = {} + _startup_callables = [] # noqa: RUF012 + _registration_callables = {} # noqa: RUF012 @staticmethod def run_on_startup(callable): # noqa: A002 @@ -781,7 +781,7 @@ def shutdown_agent(timeout=None): def register_data_source(source, application=None, name=None, settings=None, **properties): agent = agent_instance() - agent.register_data_source(source, application and application.name or None, name, settings, **properties) + agent.register_data_source(source, (application and application.name) or None, name, settings, **properties) def _remove_thread_utilization(): diff --git a/newrelic/core/agent_control_health.py b/newrelic/core/agent_control_health.py index 33e49a8a16..533d79f5ae 100644 --- a/newrelic/core/agent_control_health.py +++ b/newrelic/core/agent_control_health.py @@ -172,11 +172,8 @@ def update_to_healthy_status(self, protocol_error=False, collector_error=False): # session. This function allows us to update to a healthy status if so based on the error type # Since this function is only called when we are in scenario where the agent functioned as expected, we check to # see if the previous status was unhealthy so we know to update it - if ( - protocol_error - and self.status_code in PROTOCOL_ERROR_CODES - or collector_error - and self.status_code == HealthStatus.FAILED_NR_CONNECTION.value + if (protocol_error and self.status_code in PROTOCOL_ERROR_CODES) or ( + collector_error and self.status_code == HealthStatus.FAILED_NR_CONNECTION.value ): self.status_code = HealthStatus.HEALTHY.value self.status_message = HEALTHY_STATUS_MESSAGE diff --git a/newrelic/core/agent_protocol.py b/newrelic/core/agent_protocol.py index 38a59e928d..de0cab678b 100644 --- a/newrelic/core/agent_protocol.py +++ b/newrelic/core/agent_protocol.py @@ -49,7 +49,7 @@ class AgentProtocol: VERSION = 17 - STATUS_CODE_RESPONSE = { + STATUS_CODE_RESPONSE = { # noqa: RUF012 400: DiscardDataForRequest, 401: ForceAgentRestart, 403: DiscardDataForRequest, @@ -69,7 +69,7 @@ class AgentProtocol: 500: RetryDataForRequest, 503: RetryDataForRequest, } - LOG_MESSAGES = { + LOG_MESSAGES = { # noqa: RUF012 401: ( logging.ERROR, ( @@ -146,7 +146,7 @@ class AgentProtocol: "ai_monitoring.enabled", ) - LOGGER_FUNC_MAPPING = { + LOGGER_FUNC_MAPPING = { # noqa: RUF012 "ERROR": _logger.error, "WARN": _logger.warning, "INFO": _logger.info, @@ -284,7 +284,7 @@ def _to_http(self, method, payload=()): @staticmethod def _connect_payload(app_name, linked_applications, environment, settings): settings = global_settings_dump(settings) - app_names = [app_name] + linked_applications + app_names = [app_name, *linked_applications] hostname = system_info.gethostname( settings["heroku.use_dyno_names"], settings["heroku.dyno_name_prefixes_to_shorten"] diff --git a/newrelic/core/agent_streaming.py b/newrelic/core/agent_streaming.py index 1b6c17f818..a0993a7e23 100644 --- a/newrelic/core/agent_streaming.py +++ b/newrelic/core/agent_streaming.py @@ -34,7 +34,7 @@ class StreamingRpc: """ RETRY_POLICY = ((15, False), (15, False), (30, False), (60, False), (120, False), (300, True)) - OPTIONS = [("grpc.enable_retries", 0)] + OPTIONS = (("grpc.enable_retries", 0),) def __init__(self, endpoint, stream_buffer, metadata, record_metric, ssl=True, compression=None): self._endpoint = endpoint diff --git a/newrelic/core/database_utils.py b/newrelic/core/database_utils.py index edc83c90a5..c37b419a39 100644 --- a/newrelic/core/database_utils.py +++ b/newrelic/core/database_utils.py @@ -272,7 +272,7 @@ def _uncomment_sql(sql): def _parse_default(sql, regex): match = regex.search(sql) - return match and _extract_identifier(match.group(1)) or "" + return (match and _extract_identifier(match.group(1))) or "" _parse_identifier_1_p = r'"((?:[^"]|"")+)"(?:\."((?:[^"]|"")+)")?' @@ -290,7 +290,7 @@ def _parse_default(sql, regex): def _join_identifier(m): - return m and ".".join([s for s in m.groups()[1:] if s]).lower() or "" + return (m and ".".join([s for s in m.groups()[1:] if s]).lower()) or "" def _parse_select(sql): @@ -415,14 +415,14 @@ def _parse_alter(sql): def _parse_operation(sql): match = _parse_operation_re.search(sql) - operation = match and match.group(1).lower() or "" + operation = (match and match.group(1).lower()) or "" return operation if operation in _operation_table else "" def _parse_target(sql, operation): sql = sql.rstrip(";") parse = _operation_table.get(operation, None) - return parse and parse(sql) or "" + return (parse and parse(sql)) or "" # For explain plan obfuscation, the regular expression for matching the diff --git a/newrelic/core/environment.py b/newrelic/core/environment.py index be6e59bf67..7d3a04f1b6 100644 --- a/newrelic/core/environment.py +++ b/newrelic/core/environment.py @@ -209,7 +209,7 @@ def plugins(): for name, module in sys.modules.copy().items(): # Exclude lib.sub_paths as independent modules except for newrelic.hooks. nr_hook = name.startswith("newrelic.hooks.") - if "." in name and not nr_hook or name.startswith("_"): + if ("." in name and not nr_hook) or name.startswith("_"): continue # If the module isn't actually loaded (such as failed relative imports diff --git a/newrelic/core/external_node.py b/newrelic/core/external_node.py index 547503a4dc..9165d2081f 100644 --- a/newrelic/core/external_node.py +++ b/newrelic/core/external_node.py @@ -89,7 +89,7 @@ def netloc(self): if (scheme, port) in (("http", 80), ("https", 443)): port = None - netloc = port and (f"{hostname}:{port}") or hostname + netloc = (port and (f"{hostname}:{port}")) or hostname return netloc def time_metrics(self, stats, root, parent): diff --git a/newrelic/core/infinite_tracing_pb2.py b/newrelic/core/infinite_tracing_pb2.py index 9720d501a7..45831c8eab 100644 --- a/newrelic/core/infinite_tracing_pb2.py +++ b/newrelic/core/infinite_tracing_pb2.py @@ -21,11 +21,11 @@ # Import appropriate generated pb2 file for protobuf version if PROTOBUF_VERSION >= (6,): - from newrelic.core.infinite_tracing_v6_pb2 import AttributeValue, RecordStatus, Span, SpanBatch # noqa: F401 + from newrelic.core.infinite_tracing_v6_pb2 import AttributeValue, RecordStatus, Span, SpanBatch elif PROTOBUF_VERSION >= (5,): - from newrelic.core.infinite_tracing_v5_pb2 import AttributeValue, RecordStatus, Span, SpanBatch # noqa: F401 + from newrelic.core.infinite_tracing_v5_pb2 import AttributeValue, RecordStatus, Span, SpanBatch elif PROTOBUF_VERSION >= (4,): - from newrelic.core.infinite_tracing_v4_pb2 import AttributeValue, RecordStatus, Span, SpanBatch # noqa: F401 + from newrelic.core.infinite_tracing_v4_pb2 import AttributeValue, RecordStatus, Span, SpanBatch else: from newrelic.core.infinite_tracing_v3_pb2 import AttributeValue, RecordStatus, Span, SpanBatch # noqa: F401 diff --git a/newrelic/core/node_mixin.py b/newrelic/core/node_mixin.py index 114b6f23e0..8eedd191d4 100644 --- a/newrelic/core/node_mixin.py +++ b/newrelic/core/node_mixin.py @@ -50,7 +50,7 @@ def get_trace_segment_params(self, settings, params=None): return _params def span_event(self, settings, base_attrs=None, parent_guid=None, attr_class=dict): - i_attrs = base_attrs and base_attrs.copy() or attr_class() + i_attrs = (base_attrs and base_attrs.copy()) or attr_class() i_attrs["type"] = "Span" i_attrs["name"] = self.name i_attrs["guid"] = self.guid diff --git a/newrelic/core/stats_engine.py b/newrelic/core/stats_engine.py index bcb1fb35d0..cc1bb42bee 100644 --- a/newrelic/core/stats_engine.py +++ b/newrelic/core/stats_engine.py @@ -92,7 +92,7 @@ def merge_stats(self, other): self[1] += other[1] self[2] += other[2] - self[3] = (self[0] or self[1] or self[2]) and min(self[3], other[3]) or other[3] + self[3] = ((self[0] or self[1] or self[2]) and min(self[3], other[3])) or other[3] self[4] = max(self[4], other[3]) def merge_apdex_metric(self, metric): @@ -102,7 +102,7 @@ def merge_apdex_metric(self, metric): self[1] += metric.tolerating self[2] += metric.frustrating - self[3] = (self[0] or self[1] or self[2]) and min(self[3], metric.apdex_t) or metric.apdex_t + self[3] = ((self[0] or self[1] or self[2]) and min(self[3], metric.apdex_t)) or metric.apdex_t self[4] = max(self[4], metric.apdex_t) @@ -140,7 +140,7 @@ def merge_stats(self, other): self[1] += other[1] self[2] += other[2] - self[3] = self[0] and min(self[3], other[3]) or other[3] + self[3] = (self[0] and min(self[3], other[3])) or other[3] self[4] = max(self[4], other[4]) self[5] += other[5] @@ -157,7 +157,7 @@ def merge_raw_time_metric(self, duration, exclusive=None): self[1] += duration self[2] += exclusive - self[3] = self[0] and min(self[3], duration) or duration + self[3] = (self[0] and min(self[3], duration)) or duration self[4] = max(self[4], duration) self[5] += duration**2 @@ -321,7 +321,7 @@ def __str__(self): return str(self.__stats_table) def __repr__(self): - return f"{__class__.__name__}({repr(self.__stats_table)})" + return f"{__class__.__name__}({self.__stats_table!r})" def items(self): return self.metrics() @@ -341,7 +341,7 @@ def merge_stats(self, other): """Merge data from another instance of this object.""" self[1] += other[1] - self[2] = self[0] and min(self[2], other[2]) or other[2] + self[2] = (self[0] and min(self[2], other[2])) or other[2] self[3] = max(self[3], other[3]) if self[3] == other[3]: @@ -358,7 +358,7 @@ def merge_slow_sql_node(self, node): duration = node.duration self[1] += duration - self[2] = self[0] and min(self[2], duration) or duration + self[2] = (self[0] and min(self[2], duration)) or duration self[3] = max(self[3], duration) if self[3] == duration: @@ -853,7 +853,7 @@ def notice_error(self, error=None, attributes=None, expected=None, ignore=None, _, error_group_name = process_user_attribute("error.group.name", error_group_name_raw) if error_group_name is None or not isinstance(error_group_name, str): raise ValueError( - f"Invalid attribute value for error.group.name. Expected string, got: {repr(error_group_name_raw)}" + f"Invalid attribute value for error.group.name. Expected string, got: {error_group_name_raw!r}" ) else: agent_attributes["error.group.name"] = error_group_name diff --git a/newrelic/core/thread_utilization.py b/newrelic/core/thread_utilization.py index 0c98654b17..bb1c0e3d8a 100644 --- a/newrelic/core/thread_utilization.py +++ b/newrelic/core/thread_utilization.py @@ -113,7 +113,7 @@ def __call__(self): yield ("Instance/Available", total_threads) yield ("Instance/Used", utilization) - busy = total_threads and utilization / total_threads or 0.0 + busy = (total_threads and utilization / total_threads) or 0.0 yield ("Instance/Busy", busy) diff --git a/newrelic/core/trace_cache.py b/newrelic/core/trace_cache.py index fb4a0db443..d4ed220a30 100644 --- a/newrelic/core/trace_cache.py +++ b/newrelic/core/trace_cache.py @@ -99,7 +99,7 @@ def __init__(self): self._cache = weakref.WeakValueDictionary() def __repr__(self): - return f"<{self.__class__.__name__} object at 0x{id(self):x} {str(dict(self.items()))}>" + return f"<{self.__class__.__name__} object at 0x{id(self):x} {dict(self.items())!s}>" def current_thread_id(self): """Returns the thread ID for the caller. diff --git a/newrelic/hooks/adapter_cheroot.py b/newrelic/hooks/adapter_cheroot.py index c9f7739d46..44497d8f52 100644 --- a/newrelic/hooks/adapter_cheroot.py +++ b/newrelic/hooks/adapter_cheroot.py @@ -19,7 +19,7 @@ def instrument_cheroot_wsgiserver(module): def wrap_wsgi_application_entry_point(server, bind_addr, wsgi_app, *args, **kwargs): application = newrelic.api.wsgi_application.WSGIApplicationWrapper(wsgi_app) - args = [server, bind_addr, application] + list(args) + args = [server, bind_addr, application, *args] return (args, kwargs) newrelic.api.in_function.wrap_in_function(module, "Server.__init__", wrap_wsgi_application_entry_point) diff --git a/newrelic/hooks/adapter_cherrypy.py b/newrelic/hooks/adapter_cherrypy.py index 8441f9112d..d30795e8f9 100644 --- a/newrelic/hooks/adapter_cherrypy.py +++ b/newrelic/hooks/adapter_cherrypy.py @@ -19,7 +19,7 @@ def instrument_cherrypy_wsgiserver(module): def wrap_wsgi_application_entry_point(server, bind_addr, wsgi_app, *args, **kwargs): application = newrelic.api.wsgi_application.WSGIApplicationWrapper(wsgi_app) - args = [server, bind_addr, application] + list(args) + args = [server, bind_addr, application, *args] return (args, kwargs) newrelic.api.in_function.wrap_in_function(module, "CherryPyWSGIServer.__init__", wrap_wsgi_application_entry_point) diff --git a/newrelic/hooks/adapter_flup.py b/newrelic/hooks/adapter_flup.py index a36980bb55..93f30bf72b 100644 --- a/newrelic/hooks/adapter_flup.py +++ b/newrelic/hooks/adapter_flup.py @@ -18,7 +18,7 @@ def wrap_wsgi_application_entry_point(server, application, *args, **kwargs): application = newrelic.api.wsgi_application.WSGIApplicationWrapper(application) - args = [server, application] + list(args) + args = [server, application, *args] return (args, kwargs) diff --git a/newrelic/hooks/adapter_gevent.py b/newrelic/hooks/adapter_gevent.py index b61af906e4..33751debb7 100644 --- a/newrelic/hooks/adapter_gevent.py +++ b/newrelic/hooks/adapter_gevent.py @@ -25,7 +25,7 @@ def _bind_params(self, listener, application, *args, **kwargs): application = WSGIApplicationWrapper(application) - _args = (self, listener, application) + _args + _args = (self, listener, application, *_args) return _args, _kwargs @@ -41,7 +41,7 @@ def _bind_params(self, listener, application, *args, **kwargs): application = WSGIApplicationWrapper(application) - _args = (self, listener, application) + _args + _args = (self, listener, application, *_args) return _args, _kwargs diff --git a/newrelic/hooks/adapter_meinheld.py b/newrelic/hooks/adapter_meinheld.py index 1dd0958193..8e69a668e0 100644 --- a/newrelic/hooks/adapter_meinheld.py +++ b/newrelic/hooks/adapter_meinheld.py @@ -19,7 +19,7 @@ def instrument_meinheld_server(module): def wrap_wsgi_application_entry_point(application, *args, **kwargs): application = newrelic.api.wsgi_application.WSGIApplicationWrapper(application) - args = [application] + list(args) + args = [application, *args] return (args, kwargs) newrelic.api.in_function.wrap_in_function(module, "run", wrap_wsgi_application_entry_point) diff --git a/newrelic/hooks/adapter_paste.py b/newrelic/hooks/adapter_paste.py index 878685f282..0f1d38ca39 100644 --- a/newrelic/hooks/adapter_paste.py +++ b/newrelic/hooks/adapter_paste.py @@ -19,7 +19,7 @@ def instrument_paste_httpserver(module): def wrap_wsgi_application_entry_point(server, application, *args, **kwargs): application = newrelic.api.wsgi_application.WSGIApplicationWrapper(application) - args = [server, application] + list(args) + args = [server, application, *args] return (args, kwargs) newrelic.api.in_function.wrap_in_function(module, "WSGIServerBase.__init__", wrap_wsgi_application_entry_point) diff --git a/newrelic/hooks/adapter_waitress.py b/newrelic/hooks/adapter_waitress.py index 2353510e3f..e1a5e485f9 100644 --- a/newrelic/hooks/adapter_waitress.py +++ b/newrelic/hooks/adapter_waitress.py @@ -21,7 +21,7 @@ def instrument_waitress_server(module): def wrap_wsgi_application_entry_point(server, application, *args, **kwargs): dispatcher_details = ("Waitress", get_package_version("waitress")) application = WSGIApplicationWrapper(application, dispatcher=dispatcher_details) - args = [server, application] + list(args) + args = [server, application, *args] return (args, kwargs) wrap_in_function(module, "WSGIServer.__init__", wrap_wsgi_application_entry_point) diff --git a/newrelic/hooks/adapter_wsgiref.py b/newrelic/hooks/adapter_wsgiref.py index f7b618e7de..c368194adf 100644 --- a/newrelic/hooks/adapter_wsgiref.py +++ b/newrelic/hooks/adapter_wsgiref.py @@ -19,7 +19,7 @@ def instrument_wsgiref_simple_server(module): def wrap_wsgi_application_entry_point(server, application, *args, **kwargs): application = newrelic.api.wsgi_application.WSGIApplicationWrapper(application) - args = [server, application] + list(args) + args = [server, application, *args] return (args, kwargs) newrelic.api.in_function.wrap_in_function(module, "WSGIServer.set_app", wrap_wsgi_application_entry_point) diff --git a/newrelic/hooks/application_gearman.py b/newrelic/hooks/application_gearman.py index 004f5aca1a..10df6c48db 100644 --- a/newrelic/hooks/application_gearman.py +++ b/newrelic/hooks/application_gearman.py @@ -66,7 +66,7 @@ def _bind_params(submitted_connections, *args, **kwargs): if not submitted_connections: return wrapped(*args, **kwargs) - first_connection = list(submitted_connections)[0] + first_connection = next(iter(submitted_connections)) url = f"gearman://{first_connection.gearman_host}:{first_connection.gearman_port}" diff --git a/newrelic/hooks/component_piston.py b/newrelic/hooks/component_piston.py index ff65314f2e..f2abb2c760 100644 --- a/newrelic/hooks/component_piston.py +++ b/newrelic/hooks/component_piston.py @@ -80,6 +80,6 @@ def instrument_piston_doc(module): def in_HandlerMethod_init(self, method, *args, **kwargs): if isinstance(method, MethodWrapper): method = method._nr_wrapped - return ((self, method) + args, kwargs) + return ((self, method, *args), kwargs) newrelic.api.in_function.wrap_in_function(module, "HandlerMethod.__init__", in_HandlerMethod_init) diff --git a/newrelic/hooks/external_aiobotocore.py b/newrelic/hooks/external_aiobotocore.py index c398a5b681..4cbaef3374 100644 --- a/newrelic/hooks/external_aiobotocore.py +++ b/newrelic/hooks/external_aiobotocore.py @@ -117,7 +117,7 @@ async def wrap_client__make_api_call(wrapped, instance, args, kwargs): ) raise - if not response or response_streaming and not settings.ai_monitoring.streaming.enabled: + if not response or (response_streaming and not settings.ai_monitoring.streaming.enabled): if ft: ft.__exit__(None, None, None) return response diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index a49c5e1f8a..a512a605ba 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -673,7 +673,7 @@ def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): ) raise - if not response or response_streaming and not settings.ai_monitoring.streaming.enabled: + if not response or (response_streaming and not settings.ai_monitoring.streaming.enabled): ft.__exit__(None, None, None) return response diff --git a/newrelic/hooks/framework_django.py b/newrelic/hooks/framework_django.py index d0a5f77475..1b04b27363 100644 --- a/newrelic/hooks/framework_django.py +++ b/newrelic/hooks/framework_django.py @@ -180,7 +180,7 @@ def newrelic_browser_timing_header(): from django.utils.safestring import mark_safe transaction = current_transaction() - return transaction and mark_safe(transaction.browser_timing_header()) or "" # noqa: S308 + return (transaction and mark_safe(transaction.browser_timing_header())) or "" # noqa: S308 def newrelic_browser_timing_footer(): diff --git a/newrelic/hooks/framework_graphql.py b/newrelic/hooks/framework_graphql.py index d64784605f..cc7ac1c096 100644 --- a/newrelic/hooks/framework_graphql.py +++ b/newrelic/hooks/framework_graphql.py @@ -196,7 +196,7 @@ def traverse_deepest_unique_path(fields, fragments): # list(fragments.values())[0] 's index is OK because the previous line # ensures that there is only one field in the list - full_fragment_selection_set = list(fragments.values())[0].selection_set.selections + full_fragment_selection_set = next(iter(fragments.values())).selection_set.selections fragment_selection_set = filter_ignored_fields(full_fragment_selection_set) if len(fragment_selection_set) != 1: diff --git a/newrelic/hooks/framework_grpc.py b/newrelic/hooks/framework_grpc.py index 70f296132c..462e51362c 100644 --- a/newrelic/hooks/framework_grpc.py +++ b/newrelic/hooks/framework_grpc.py @@ -31,10 +31,10 @@ def _get_uri_method(instance, *args, **kwargs): def _prepare_request(transaction, guid, request, timeout=None, metadata=None, *args, **kwargs): - metadata = metadata and list(metadata) or [] + metadata = (metadata and list(metadata)) or [] dt_metadata = transaction._create_distributed_trace_data_with_guid(guid) metadata.extend(transaction._generate_distributed_trace_headers(dt_metadata)) - args = (request, timeout, metadata) + args + args = (request, timeout, metadata, *args) return args, kwargs diff --git a/newrelic/hooks/messagebroker_kafkapython.py b/newrelic/hooks/messagebroker_kafkapython.py index 431af6cfdd..ed0acf60ef 100644 --- a/newrelic/hooks/messagebroker_kafkapython.py +++ b/newrelic/hooks/messagebroker_kafkapython.py @@ -243,7 +243,7 @@ def _metric_wrapper(wrapped, instance, args, kwargs): application = application_instance(activate=False) if application: - if not check_result or check_result and result: + if not check_result or (check_result and result): # If the result does not need validated, send metric. # If the result does need validated, ensure it is True. application.record_custom_metric(metric_name, 1) diff --git a/newrelic/hooks/messagebroker_pika.py b/newrelic/hooks/messagebroker_pika.py index 58cab675e7..821d05a318 100644 --- a/newrelic/hooks/messagebroker_pika.py +++ b/newrelic/hooks/messagebroker_pika.py @@ -54,7 +54,7 @@ def _add_consume_rabbitmq_trace(transaction, method, properties, nr_start_time, if hasattr(method, "routing_key"): routing_key = method.routing_key - properties = properties and properties.__dict__ or {} + properties = (properties and properties.__dict__) or {} correlation_id = properties.get("correlation_id") reply_to = properties.get("reply_to") @@ -130,7 +130,7 @@ def _nr_wrapper_basic_publish(wrapped, instance, args, kwargs): user_headers.pop("traceparent", None) user_headers.pop("tracestate", None) - args = (exchange, routing_key, body, properties) + args + args = (exchange, routing_key, body, properties, *args) params = {} if routing_key is not None: @@ -218,28 +218,28 @@ def _nr_wrap_BlockingChannel___init__(wrapped, instance, args, kwargs): def _wrap_basic_consume_BlockingChannel_old(wrapper, consumer_callback, queue, *args, **kwargs): - args = (wrapper(consumer_callback), queue) + args + args = (wrapper(consumer_callback), queue, *args) return queue, args, kwargs def _wrap_basic_consume_Channel_old(wrapper, consumer_callback, queue="", *args, **kwargs): - return queue, (wrapper(consumer_callback), queue) + args, kwargs + return queue, (wrapper(consumer_callback), queue, *args), kwargs def _wrap_basic_consume_Channel(wrapper, queue, on_message_callback, *args, **kwargs): - args = (queue, wrapper(on_message_callback)) + args + args = (queue, wrapper(on_message_callback), *args) return queue, args, kwargs def _wrap_basic_get_Channel(wrapper, queue, callback, *args, **kwargs): - args = (queue, wrapper(callback)) + args + args = (queue, wrapper(callback), *args) return queue, args, kwargs def _wrap_basic_get_Channel_old(wrapper, callback=None, queue="", *args, **kwargs): # pragma: no cover if callback is not None: callback = wrapper(callback) - args = (callback, queue) + args + args = (callback, queue, *args) return queue, args, kwargs diff --git a/newrelic/hooks/mlmodel_sklearn.py b/newrelic/hooks/mlmodel_sklearn.py index ed76448ffc..8e0207a2db 100644 --- a/newrelic/hooks/mlmodel_sklearn.py +++ b/newrelic/hooks/mlmodel_sklearn.py @@ -302,13 +302,13 @@ def create_prediction_event(transaction, class_, instance, args, kwargs, return_ if settings and settings.machine_learning and settings.machine_learning.inference_events_value.enabled: event.update( { - f"feature.{str(final_feature_names[feature_col_index])}": value + f"feature.{final_feature_names[feature_col_index]!s}": value for feature_col_index, value in enumerate(prediction) } ) event.update( { - f"label.{str(label_names_list[index])}": str(value) + f"label.{label_names_list[index]!s}": str(value) for index, value in enumerate(labels[prediction_index]) } ) diff --git a/pyproject.toml b/pyproject.toml index b701d9c928..2c449f8651 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,6 +60,7 @@ select = [ "PYI", # flake8-pyi "Q", # flake8-quotes "RSE", # flake8-raise + "RUF", # Ruff-specific rules "S", # flake8-bandit "SLOT", # flake8-slots "T10", # flake8-debugger @@ -73,7 +74,6 @@ select = [ # "FBT", # flake8-boolean-trap # "N", # pep8-naming # "RET", # flake8-return - # "RUF", # Ruff-specific rules # "SIM", # flake8-simplify # "TRY", # tryceratops ] @@ -94,6 +94,7 @@ select = [ # "NPY", # NumPy-specific rules # "FAST", # FastAPI # "AIR", # Airflow +# "SLF", # flake8-self (only scans for private member access, which is intentional) # Preview linters (disabled) # "CPY", # flake8-copyright @@ -141,7 +142,11 @@ ignore = [ "F401", # unused-import "F811", # redefined-while-unused (pytest fixtures trigger this) "F841", # unused-variable (intentional in tests to document what an unused output is) + "RUF012", # mutable-class-default + "RUF015", # unnecessary-iterable-allocation-for-first-element (more readable in tests) "E731", # lambda-assignment (acceptable in tests) + "RUF012", # mutable-class-default + "RUF015", # unnecessary-iterable-allocation-for-first-element (more readable in tests) "PLR2004", # magic-value-comparison (comparing to constant values) "ASYNC251", # blocking-sleep-in-async-function (acceptable in tests) "B904", # raise-without-from-inside-except (not necessary in tests) diff --git a/tests/agent_features/_test_code_level_metrics.py b/tests/agent_features/_test_code_level_metrics.py index 2873022e1a..73d93b4140 100644 --- a/tests/agent_features/_test_code_level_metrics.py +++ b/tests/agent_features/_test_code_level_metrics.py @@ -72,5 +72,5 @@ def __call__(self): TYPE_CONSTRUCTOR_CLASS_INSTANCE = ExerciseTypeConstructor() TYPE_CONSTRUCTOR_CALLABLE_CLASS_INSTANCE = ExerciseTypeConstructorCallable() -exercise_lambda = lambda: None # noqa: E731 +exercise_lambda = lambda: None exercise_partial = functools.partial(exercise_function) diff --git a/tests/agent_features/test_attributes_in_action.py b/tests/agent_features/test_attributes_in_action.py index a3386a30cf..0b24a3100a 100644 --- a/tests/agent_features/test_attributes_in_action.py +++ b/tests/agent_features/test_attributes_in_action.py @@ -360,13 +360,13 @@ def test_browser_include_request_params(normal_application): } _expected_attributes = { - "agent": TRACE_ERROR_AGENT_KEYS + [f"request.parameters.{URL_PARAM}"], + "agent": [*TRACE_ERROR_AGENT_KEYS, f"request.parameters.{URL_PARAM}"], "user": ERROR_USER_ATTRS, "intrinsic": ["trip_id"], } _expected_attributes_event = { - "agent": TRACE_ERROR_AGENT_KEYS + [f"request.parameters.{URL_PARAM}"], + "agent": [*TRACE_ERROR_AGENT_KEYS, f"request.parameters.{URL_PARAM}"], "user": ERROR_USER_ATTRS, "intrinsic": ERROR_EVENT_INTRINSICS, } @@ -387,7 +387,7 @@ def test_error_in_transaction_include_exclude(normal_application): } _expected_attributes = { - "agent": TRACE_ERROR_AGENT_KEYS + [f"request.parameters.{URL_PARAM}"], + "agent": [*TRACE_ERROR_AGENT_KEYS, f"request.parameters.{URL_PARAM}"], "user": USER_ATTRS, "intrinsic": ["trip_id"], } @@ -405,7 +405,7 @@ def test_transaction_trace_include_exclude(normal_application): } _expected_attributes = { - "agent": TRANS_EVENT_AGENT_KEYS + [f"request.parameters.{URL_PARAM}"], + "agent": [*TRANS_EVENT_AGENT_KEYS, f"request.parameters.{URL_PARAM}"], "user": USER_ATTRS, "intrinsic": TRANS_EVENT_INTRINSICS, } @@ -431,7 +431,7 @@ def test_transaction_event_include_exclude(normal_application): "intrinsic": BROWSER_INTRINSIC_KEYS, } -_expected_absent_attributes = {"agent": ABSENT_BROWSER_KEYS + [f"request.parameters.{URL_PARAM2}"], "user": []} +_expected_absent_attributes = {"agent": [*ABSENT_BROWSER_KEYS, f"request.parameters.{URL_PARAM2}"], "user": []} @validate_browser_attributes(_expected_attributes, _expected_absent_attributes) @@ -528,7 +528,7 @@ def test_browser_exclude_user_attribute(normal_application): "intrinsic": ERROR_EVENT_INTRINSICS, } -_expected_absent_attributes = {"agent": ["request.method", "request.uri"] + REQ_PARAMS, "user": [], "intrinsic": []} +_expected_absent_attributes = {"agent": ["request.method", "request.uri", *REQ_PARAMS], "user": [], "intrinsic": []} @validate_error_event_attributes(_expected_attributes_event, _expected_absent_attributes) @@ -557,7 +557,7 @@ def test_transaction_trace_exclude_agent_attribute(normal_application): "intrinsic": TRANS_EVENT_INTRINSICS, } -_expected_absent_attributes = {"agent": ["request.method", "request.uri"] + REQ_PARAMS, "user": [], "intrinsic": []} +_expected_absent_attributes = {"agent": ["request.method", "request.uri", *REQ_PARAMS], "user": [], "intrinsic": []} @validate_transaction_event_attributes(_expected_attributes, _expected_absent_attributes) @@ -570,7 +570,7 @@ def test_transaction_event_exclude_agent_attribute(normal_application): _expected_agent_attributes = ["response.status", "request.headers.contentType", "request.headers.contentLength"] -_expected_absent_agent_attributes = ["request.method", "request.uri"] + REQ_PARAMS +_expected_absent_agent_attributes = ["request.method", "request.uri", *REQ_PARAMS] @override_application_settings(_override_settings) @@ -612,7 +612,7 @@ def test_transaction_trace_deprecated_capture_params_true(normal_application): _expected_attributes = {"agent": TRANS_EVENT_AGENT_KEYS, "user": USER_ATTRS, "intrinsic": TRANS_EVENT_INTRINSICS} -_expected_absent_attributes = {"agent": ["wsgi.output.seconds"] + REQ_PARAMS, "user": [], "intrinsic": []} +_expected_absent_attributes = {"agent": ["wsgi.output.seconds", *REQ_PARAMS], "user": [], "intrinsic": []} @validate_transaction_event_attributes(_expected_attributes, _expected_absent_attributes) @@ -669,7 +669,7 @@ def test_transaction_trace_deprecated_capture_params_false(normal_application): _expected_attributes = {"agent": TRANS_EVENT_AGENT_KEYS, "user": USER_ATTRS, "intrinsic": TRANS_EVENT_INTRINSICS} -_expected_absent_attributes = {"agent": ["wsgi.output.seconds"] + REQ_PARAMS, "user": [], "intrinsic": []} +_expected_absent_attributes = {"agent": ["wsgi.output.seconds", *REQ_PARAMS], "user": [], "intrinsic": []} @validate_transaction_event_attributes(_expected_attributes, _expected_absent_attributes) @@ -728,7 +728,7 @@ def test_transaction_trace_exclude_intrinsic(normal_application): _expected_attributes = {"agent": TRANS_EVENT_AGENT_KEYS, "user": USER_ATTRS, "intrinsic": TRANS_EVENT_INTRINSICS} -_expected_absent_attributes = {"agent": ["wsgi.output.seconds"] + REQ_PARAMS, "user": [], "intrinsic": []} +_expected_absent_attributes = {"agent": ["wsgi.output.seconds", *REQ_PARAMS], "user": [], "intrinsic": []} @validate_transaction_event_attributes(_expected_attributes, _expected_absent_attributes) diff --git a/tests/agent_features/test_distributed_tracing.py b/tests/agent_features/test_distributed_tracing.py index 73009cc62e..502b3828b0 100644 --- a/tests/agent_features/test_distributed_tracing.py +++ b/tests/agent_features/test_distributed_tracing.py @@ -141,7 +141,8 @@ def test_distributed_trace_attributes(span_events, accept_payload): _forgone_error_attributes = {"intrinsic": _forgone_error_intrinsics, "agent": [], "user": []} else: _required_intrinsics = distributed_trace_intrinsics - _forgone_txn_intrinsics = _forgone_error_intrinsics = inbound_payload_intrinsics + [ + _forgone_txn_intrinsics = _forgone_error_intrinsics = [ + *inbound_payload_intrinsics, "grandparentId", "parentId", "parentSpanId", @@ -193,7 +194,7 @@ def _test(): _test() -_forgone_attributes = {"agent": [], "user": [], "intrinsic": (inbound_payload_intrinsics + ["grandparentId"])} +_forgone_attributes = {"agent": [], "user": [], "intrinsic": ([*inbound_payload_intrinsics, "grandparentId"])} @override_application_settings(_override_settings) diff --git a/tests/agent_features/test_error_group_callback.py b/tests/agent_features/test_error_group_callback.py index 2b03c1ed49..7d44b6313f 100644 --- a/tests/agent_features/test_error_group_callback.py +++ b/tests/agent_features/test_error_group_callback.py @@ -234,7 +234,7 @@ def _test(): notice_error(application=app, attributes={"notice_error_attribute": 1}) assert not callback_errors, ( - f"Callback inputs failed to validate.\nerror: {traceback.format_exception(*callback_errors[0])}\ndata: {str(_data[0])}" + f"Callback inputs failed to validate.\nerror: {traceback.format_exception(*callback_errors[0])}\ndata: {_data[0]!s}" ) if transaction_decorator is not None: diff --git a/tests/agent_features/test_log_events.py b/tests/agent_features/test_log_events.py index b70b340eb7..557aac48f2 100644 --- a/tests/agent_features/test_log_events.py +++ b/tests/agent_features/test_log_events.py @@ -56,6 +56,11 @@ def set_trace_ids(): trace.guid = "abcdefgh" +def active_session(): + txn = current_transaction() + return list(txn.application._agent._applications.values())[0]._active_session + + def exercise_record_log_event(): set_trace_ids() @@ -407,9 +412,7 @@ def test(): @override_application_settings({"labels": TEST_LABELS, "application_logging.forwarding.labels.enabled": True}) @background_task() def test_label_forwarding_enabled(): - txn = current_transaction() - session = list(txn.application._agent._applications.values())[0]._active_session - + session = active_session() common = session.get_log_events_common_block() # Excluded label should not appear, and other labels should be prefixed with 'tag.' assert common == {"tags.testlabel1": "A", "tags.testlabel2": "B", "tags.testlabelexclude": "C"} @@ -424,9 +427,7 @@ def test_label_forwarding_enabled(): ) @background_task() def test_label_forwarding_enabled_exclude(): - txn = current_transaction() - session = list(txn.application._agent._applications.values())[0]._active_session - + session = active_session() common = session.get_log_events_common_block() # Excluded label should not appear, and other labels should be prefixed with 'tags.' assert common == {"tags.testlabel1": "A", "tags.testlabel2": "B"} @@ -435,9 +436,7 @@ def test_label_forwarding_enabled_exclude(): @override_application_settings({"labels": TEST_LABELS, "application_logging.forwarding.labels.enabled": False}) @background_task() def test_label_forwarding_disabled(): - txn = current_transaction() - session = list(txn.application._agent._applications.values())[0]._active_session - + session = active_session() common = session.get_log_events_common_block() # No labels should appear assert common == {} @@ -453,9 +452,7 @@ def test_label_forwarding_disabled(): ) @background_task() def test_global_custom_attribute_forwarding_enabled(): - txn = current_transaction() - session = list(txn.application._agent._applications.values())[0]._active_session - + session = active_session() common = session.get_log_events_common_block() # Both attrs should appear assert common == {"custom_attr_1": "value 1", "custom_attr_2": "value 2"} @@ -464,9 +461,7 @@ def test_global_custom_attribute_forwarding_enabled(): @override_application_settings({"application_logging.forwarding.custom_attributes": [("custom_attr_1", "a" * 256)]}) @background_task() def test_global_custom_attribute_forwarding_truncation(): - txn = current_transaction() - session = list(txn.application._agent._applications.values())[0]._active_session - + session = active_session() common = session.get_log_events_common_block() # Attribute value should be truncated to the max user attribute length assert common == {"custom_attr_1": "a" * 255} @@ -477,9 +472,7 @@ def test_global_custom_attribute_forwarding_truncation(): ) @background_task() def test_global_custom_attribute_forwarding_max_num_attrs(): - txn = current_transaction() - session = list(txn.application._agent._applications.values())[0]._active_session - + session = active_session() common = session.get_log_events_common_block() # Should be truncated to the max number of user attributes assert common == {f"custom_attr_{i + 1}": "value" for i in range(128)} diff --git a/tests/agent_features/test_notice_error.py b/tests/agent_features/test_notice_error.py index dffa16ce39..1240dea1e8 100644 --- a/tests/agent_features/test_notice_error.py +++ b/tests/agent_features/test_notice_error.py @@ -364,7 +364,7 @@ def test_notice_error_strip_message_not_in_allowlist_outside_transaction(): def _raise_errors(num_errors, application=None): for i in range(num_errors): try: - raise RuntimeError(f"error{str(i)}") + raise RuntimeError(f"error{i!s}") except RuntimeError: notice_error(application=application) diff --git a/tests/agent_features/test_span_events.py b/tests/agent_features/test_span_events.py index e9d5ba5438..2d49ae01c6 100644 --- a/tests/agent_features/test_span_events.py +++ b/tests/agent_features/test_span_events.py @@ -630,7 +630,7 @@ def test_span_event_error_attributes_observed(trace_type, args): exact_agents = {"error.class": callable_name(error), "error.message": "whoops"} # Verify errors are not recorded since notice_error is not called - rollups = [("Errors/all", None)] + _span_event_metrics + rollups = [("Errors/all", None), *_span_event_metrics] @dt_enabled @validate_transaction_metrics( diff --git a/tests/agent_unittests/test_agent_protocol.py b/tests/agent_unittests/test_agent_protocol.py index 4bcf7dc579..f87f48f7f6 100644 --- a/tests/agent_unittests/test_agent_protocol.py +++ b/tests/agent_unittests/test_agent_protocol.py @@ -51,7 +51,7 @@ LABELS = "labels" LINKED_APPS = ["linked_app_1", "linked_app_2"] MEMORY = 12000.0 -PAYLOAD_APP_NAME = [APP_NAME] + LINKED_APPS +PAYLOAD_APP_NAME = [APP_NAME, *LINKED_APPS] PAYLOAD_ID = ",".join(PAYLOAD_APP_NAME) PID = 123 PROCESSOR_COUNT = 4 diff --git a/tests/component_graphqlserver/test_graphql.py b/tests/component_graphqlserver/test_graphql.py index 671762c088..3e8391ed2a 100644 --- a/tests/component_graphqlserver/test_graphql.py +++ b/tests/component_graphqlserver/test_graphql.py @@ -107,7 +107,8 @@ def test_query_and_mutation(target_application): ("GraphQL/GraphQLServer/all", 1), ("GraphQL/allWeb", 1), ("GraphQL/GraphQLServer/allWeb", 1), - ] + _test_query_scoped_metrics + *_test_query_scoped_metrics, + ] _test_mutation_scoped_metrics = [ ("GraphQL/resolve/GraphQLServer/storage_add", 1), @@ -119,7 +120,8 @@ def test_query_and_mutation(target_application): ("GraphQL/GraphQLServer/all", 1), ("GraphQL/allWeb", 1), ("GraphQL/GraphQLServer/allWeb", 1), - ] + _test_mutation_scoped_metrics + *_test_mutation_scoped_metrics, + ] _expected_mutation_operation_attributes = { "graphql.operation.type": "mutation", @@ -218,7 +220,8 @@ def test_exception_in_middleware(target_application): ("Errors/all", 1), ("Errors/allWeb", 1), ("Errors/WebTransaction/GraphQL/component_graphqlserver.test_graphql:error_middleware", 1), - ] + _test_exception_scoped_metrics + *_test_exception_scoped_metrics, + ] # Attributes _expected_exception_resolver_attributes = { @@ -265,7 +268,8 @@ def test_exception_in_resolver(target_application, field): ("Errors/all", 1), ("Errors/allWeb", 1), (f"Errors/WebTransaction/GraphQL/{txn_name}", 1), - ] + _test_exception_scoped_metrics + *_test_exception_scoped_metrics, + ] # Attributes _expected_exception_resolver_attributes = { @@ -324,7 +328,8 @@ def test_exception_in_validation(target_application, is_graphql_2, query, exc_cl ("Errors/all", 1), ("Errors/allWeb", 1), (f"Errors/WebTransaction/GraphQL/{txn_name}", 1), - ] + _test_exception_scoped_metrics + *_test_exception_scoped_metrics, + ] # Attributes _expected_exception_operation_attributes = { diff --git a/tests/cross_agent/test_w3c_trace_context.py b/tests/cross_agent/test_w3c_trace_context.py index 72745d3b5b..2be63c2c0a 100644 --- a/tests/cross_agent/test_w3c_trace_context.py +++ b/tests/cross_agent/test_w3c_trace_context.py @@ -215,10 +215,10 @@ def test_trace_context( ".web_transaction": web_transaction, ".raises_exception": raises_exception, ".transport_type": transport_type, - ".outbound_calls": outbound_payloads and len(outbound_payloads) or 0, + ".outbound_calls": (outbound_payloads and len(outbound_payloads)) or 0, } - inbound_headers = inbound_headers and inbound_headers[0] or None + inbound_headers = (inbound_headers and inbound_headers[0]) or None if transport_type != "HTTP": extra_environ[".inbound_headers"] = inbound_headers inbound_headers = None diff --git a/tests/datastore_aiomysql/test_database.py b/tests/datastore_aiomysql/test_database.py index 0ff0c36bf6..20d1a48586 100644 --- a/tests/datastore_aiomysql/test_database.py +++ b/tests/datastore_aiomysql/test_database.py @@ -93,8 +93,8 @@ async def execute_db_calls_with_cursor(cursor): @validate_transaction_metrics( "test_database:test_execute_via_connection", - scoped_metrics=list(SCOPED_METRICS) + [("Function/aiomysql.connection:connect", 1)], - rollup_metrics=list(ROLLUP_METRICS) + [("Function/aiomysql.connection:connect", 1)], + scoped_metrics=[*list(SCOPED_METRICS), ("Function/aiomysql.connection:connect", 1)], + rollup_metrics=[*list(ROLLUP_METRICS), ("Function/aiomysql.connection:connect", 1)], background_task=True, ) @validate_database_trace_inputs(sql_parameters_type=tuple) @@ -122,8 +122,8 @@ async def _test(): @validate_transaction_metrics( "test_database:test_execute_via_pool", - scoped_metrics=list(SCOPED_METRICS) + [("Function/aiomysql.pool:Pool._acquire", 1)], - rollup_metrics=list(ROLLUP_METRICS) + [("Function/aiomysql.pool:Pool._acquire", 1)], + scoped_metrics=[*list(SCOPED_METRICS), ("Function/aiomysql.pool:Pool._acquire", 1)], + rollup_metrics=[*list(ROLLUP_METRICS), ("Function/aiomysql.pool:Pool._acquire", 1)], background_task=True, ) @validate_database_trace_inputs(sql_parameters_type=tuple) diff --git a/tests/datastore_aioredis/conftest.py b/tests/datastore_aioredis/conftest.py index 0c68f5c72c..8ab0ad605e 100644 --- a/tests/datastore_aioredis/conftest.py +++ b/tests/datastore_aioredis/conftest.py @@ -75,4 +75,4 @@ def client(request, loop): @pytest.fixture(scope="session") def key(): - return f"AIOREDIS-TEST-{str(os.getpid())}" + return f"AIOREDIS-TEST-{os.getpid()!s}" diff --git a/tests/datastore_aredis/test_trace_node.py b/tests/datastore_aredis/test_trace_node.py index 6290933943..aadbe80c5a 100644 --- a/tests/datastore_aredis/test_trace_node.py +++ b/tests/datastore_aredis/test_trace_node.py @@ -80,26 +80,26 @@ async def _exercise_db(): @override_application_settings(_enable_instance_settings) @validate_tt_collector_json(datastore_params=_enabled_required, datastore_forgone_params=_enabled_forgone) @background_task() -def test_trace_node_datastore_params_enable_instance(loop): # noqa: F811 +def test_trace_node_datastore_params_enable_instance(loop): loop.run_until_complete(_exercise_db()) @override_application_settings(_disable_instance_settings) @validate_tt_collector_json(datastore_params=_disabled_required, datastore_forgone_params=_disabled_forgone) @background_task() -def test_trace_node_datastore_params_disable_instance(loop): # noqa: F811 +def test_trace_node_datastore_params_disable_instance(loop): loop.run_until_complete(_exercise_db()) @override_application_settings(_instance_only_settings) @validate_tt_collector_json(datastore_params=_instance_only_required, datastore_forgone_params=_instance_only_forgone) @background_task() -def test_trace_node_datastore_params_instance_only(loop): # noqa: F811 +def test_trace_node_datastore_params_instance_only(loop): loop.run_until_complete(_exercise_db()) @override_application_settings(_database_only_settings) @validate_tt_collector_json(datastore_params=_database_only_required, datastore_forgone_params=_database_only_forgone) @background_task() -def test_trace_node_datastore_params_database_only(loop): # noqa: F811 +def test_trace_node_datastore_params_database_only(loop): loop.run_until_complete(_exercise_db()) diff --git a/tests/datastore_asyncpg/test_multiple_dbs.py b/tests/datastore_asyncpg/test_multiple_dbs.py index 791ed2bd19..90d209ad1b 100644 --- a/tests/datastore_asyncpg/test_multiple_dbs.py +++ b/tests/datastore_asyncpg/test_multiple_dbs.py @@ -52,7 +52,7 @@ # Metrics -_base_scoped_metrics = CONNECT_METRICS + [("Datastore/statement/Postgres/pg_settings/select", 2)] +_base_scoped_metrics = [*CONNECT_METRICS, ("Datastore/statement/Postgres/pg_settings/select", 2)] _base_rollup_metrics = [ ("Datastore/all", TOTAL_COUNT), diff --git a/tests/datastore_elasticsearch/test_async_mget.py b/tests/datastore_elasticsearch/test_async_mget.py index 4181ab647f..46f70c302b 100644 --- a/tests/datastore_elasticsearch/test_async_mget.py +++ b/tests/datastore_elasticsearch/test_async_mget.py @@ -15,7 +15,7 @@ import pytest from conftest import ES_MULTIPLE_SETTINGS, ES_VERSION from elasticsearch import AsyncElasticsearch -from testing_support.fixture.event_loop import event_loop as loop # noqa: F401 +from testing_support.fixture.event_loop import event_loop as loop from testing_support.fixtures import override_application_settings from testing_support.util import instance_hostname from testing_support.validators.validate_transaction_metrics import validate_transaction_metrics diff --git a/tests/datastore_elasticsearch/test_async_trace_node.py b/tests/datastore_elasticsearch/test_async_trace_node.py index c503c7c065..cae4d404cf 100644 --- a/tests/datastore_elasticsearch/test_async_trace_node.py +++ b/tests/datastore_elasticsearch/test_async_trace_node.py @@ -13,7 +13,7 @@ # limitations under the License. from conftest import ES_SETTINGS, ES_VERSION -from testing_support.fixture.event_loop import event_loop as loop # noqa: F401 +from testing_support.fixture.event_loop import event_loop as loop from testing_support.fixtures import override_application_settings, validate_tt_parenting from testing_support.util import instance_hostname from testing_support.validators.validate_tt_collector_json import validate_tt_collector_json diff --git a/tests/datastore_firestore/conftest.py b/tests/datastore_firestore/conftest.py index 41d13c476c..a9b24e2b75 100644 --- a/tests/datastore_firestore/conftest.py +++ b/tests/datastore_firestore/conftest.py @@ -66,7 +66,7 @@ def client(): @pytest.fixture def collection(client): - collection_ = client.collection(f"firestore_collection_{str(uuid.uuid4())}") + collection_ = client.collection(f"firestore_collection_{uuid.uuid4()!s}") yield collection_ client.recursive_delete(collection_) diff --git a/tests/datastore_psycopg/test_register.py b/tests/datastore_psycopg/test_register.py index 473bad61d4..46ea9dfcb4 100644 --- a/tests/datastore_psycopg/test_register.py +++ b/tests/datastore_psycopg/test_register.py @@ -49,7 +49,7 @@ async def coro(): @background_task() def test_register_range(loop, connection): async def test(): - type_name = f"floatrange_{str(os.getpid())}" + type_name = f"floatrange_{os.getpid()!s}" create_sql = f"CREATE TYPE {type_name} AS RANGE (subtype = float8,subtype_diff = float8mi)" diff --git a/tests/datastore_psycopg2/test_register.py b/tests/datastore_psycopg2/test_register.py index 96b28aa94b..5e0081b3de 100644 --- a/tests/datastore_psycopg2/test_register.py +++ b/tests/datastore_psycopg2/test_register.py @@ -51,7 +51,7 @@ def test_register_range(): host=DB_SETTINGS["host"], port=DB_SETTINGS["port"], ) as connection: - type_name = f"floatrange_{str(os.getpid())}" + type_name = f"floatrange_{os.getpid()!s}" create_sql = f"CREATE TYPE {type_name} AS RANGE (subtype = float8,subtype_diff = float8mi)" diff --git a/tests/datastore_redis/test_multiple_dbs.py b/tests/datastore_redis/test_multiple_dbs.py index 1cd815e752..fb68c2c8a9 100644 --- a/tests/datastore_redis/test_multiple_dbs.py +++ b/tests/datastore_redis/test_multiple_dbs.py @@ -72,7 +72,7 @@ instance_metric_name_1 = f"Datastore/instance/Redis/{host_1}/{port_1}" instance_metric_name_2 = f"Datastore/instance/Redis/{host_2}/{port_2}" - instance_metric_name_1_count = 2 if REDIS_PY_VERSION >= (5, 0) else 2 + instance_metric_name_1_count = 2 instance_metric_name_2_count = 3 if REDIS_PY_VERSION >= (5, 0) else 1 _enable_rollup_metrics = _base_rollup_metrics.extend( diff --git a/tests/external_aiobotocore/conftest.py b/tests/external_aiobotocore/conftest.py index b7fddfc4de..3c35ffbf0b 100644 --- a/tests/external_aiobotocore/conftest.py +++ b/tests/external_aiobotocore/conftest.py @@ -69,7 +69,7 @@ class MotoService: _services = {} # {name: instance} - def __init__(self, service_name: str, port: int = None, ssl: bool = False): + def __init__(self, service_name, port=None, ssl=False): self._service_name = service_name if port: diff --git a/tests/external_aiobotocore/test_bedrock_chat_completion.py b/tests/external_aiobotocore/test_bedrock_chat_completion.py index 442bae83da..65cb276c77 100644 --- a/tests/external_aiobotocore/test_bedrock_chat_completion.py +++ b/tests/external_aiobotocore/test_bedrock_chat_completion.py @@ -33,7 +33,7 @@ chat_completion_streaming_expected_events, ) from testing_support.fixtures import override_llm_token_callback_settings, reset_core_stats_engine, validate_attributes -from testing_support.ml_testing_utils import ( # noqa: F401 +from testing_support.ml_testing_utils import ( add_token_count_to_events, disabled_ai_monitoring_record_content_settings, disabled_ai_monitoring_settings, diff --git a/tests/external_aiobotocore/test_bedrock_embeddings.py b/tests/external_aiobotocore/test_bedrock_embeddings.py index ddf2cd1e51..96b930feb5 100644 --- a/tests/external_aiobotocore/test_bedrock_embeddings.py +++ b/tests/external_aiobotocore/test_bedrock_embeddings.py @@ -26,7 +26,7 @@ embedding_payload_templates, ) from testing_support.fixtures import override_llm_token_callback_settings, reset_core_stats_engine, validate_attributes -from testing_support.ml_testing_utils import ( # noqa: F401 +from testing_support.ml_testing_utils import ( add_token_count_to_events, disabled_ai_monitoring_record_content_settings, disabled_ai_monitoring_settings, diff --git a/tests/external_botocore/_mock_bedrock_encoding_utils.py b/tests/external_botocore/_mock_bedrock_encoding_utils.py index d6fb59c6d8..9bef2a9e77 100644 --- a/tests/external_botocore/_mock_bedrock_encoding_utils.py +++ b/tests/external_botocore/_mock_bedrock_encoding_utils.py @@ -25,7 +25,7 @@ def crc(b): def int_to_escaped_bytes(i, num_bytes=1): """Convert an integer into an arbitrary number of bytes.""" - return bytes.fromhex(f"{{:0{str(num_bytes * 2)}x}}".format(i)) + return bytes.fromhex(f"{{:0{num_bytes * 2!s}x}}".format(i)) def encode_headers(headers): diff --git a/tests/external_botocore/_mock_external_bedrock_server.py b/tests/external_botocore/_mock_external_bedrock_server.py index 907a7c5d6c..5114a251fd 100644 --- a/tests/external_botocore/_mock_external_bedrock_server.py +++ b/tests/external_botocore/_mock_external_bedrock_server.py @@ -6751,6 +6751,6 @@ def __init__(self, handler=simple_get, port=None, *args, **kwargs): print(f"RESPONSES = {dict(sorted(RESPONSES.items(), key=lambda i: (i[1][1], i[0])))}") with MockExternalBedrockServer() as server: - print(f"MockExternalBedrockServer serving on port {str(server.port)}") + print(f"MockExternalBedrockServer serving on port {server.port!s}") while True: pass # Serve forever diff --git a/tests/external_botocore/_test_bedrock_chat_completion.py b/tests/external_botocore/_test_bedrock_chat_completion.py index f4ffda2b24..155b6c993c 100644 --- a/tests/external_botocore/_test_bedrock_chat_completion.py +++ b/tests/external_botocore/_test_bedrock_chat_completion.py @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Ignore unicode characters in this file from LLM responses +# ruff: noqa: RUF001 + chat_completion_payload_templates = { "amazon.titan-text-express-v1": '{ "inputText": "%s", "textGenerationConfig": {"temperature": %f, "maxTokenCount": %d }}', "ai21.j2-mid-v1": '{"prompt": "%s", "temperature": %f, "maxTokens": %d}', diff --git a/tests/external_feedparser/test_feedparser.py b/tests/external_feedparser/test_feedparser.py index 1bf4c4c004..54513a0f42 100644 --- a/tests/external_feedparser/test_feedparser.py +++ b/tests/external_feedparser/test_feedparser.py @@ -29,7 +29,7 @@ def feedparser(): @pytest.mark.parametrize("url", ("http://localhost", "feed:http://localhost", "feed://localhost")) def test_feedparser_external(feedparser, server, url): - url = f"{url}:{str(server.port)}" + url = f"{url}:{server.port!s}" @validate_transaction_metrics( "test_feedparser_external", @@ -66,6 +66,6 @@ def _test(): @pytest.mark.parametrize("url", ("http://localhost", "packages.xml")) def test_feedparser_no_transaction(feedparser, server, url): if url.startswith("http://"): - url = f"{url}:{str(server.port)}" + url = f"{url}:{server.port!s}" feed = feedparser.parse(url) assert feed["feed"]["link"] == "https://pypi.org/" diff --git a/tests/framework_aiohttp/_target_application.py b/tests/framework_aiohttp/_target_application.py index 96279e58fa..76582e2805 100644 --- a/tests/framework_aiohttp/_target_application.py +++ b/tests/framework_aiohttp/_target_application.py @@ -29,7 +29,7 @@ async def index(request): async def hang(request): while True: # noqa: ASYNC110 - await asyncio.sleep(0) # noqa: ASYNC110 + await asyncio.sleep(0) async def error(request): diff --git a/tests/framework_azurefunctions/sample_application/function_app.py b/tests/framework_azurefunctions/sample_application/function_app.py index 5079c0f9d6..ce0cbd1cdd 100644 --- a/tests/framework_azurefunctions/sample_application/function_app.py +++ b/tests/framework_azurefunctions/sample_application/function_app.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import azure.functions # pylint: disable=E0401 +import azure.functions app = azure.functions.FunctionApp(http_auth_level=azure.functions.AuthLevel.ANONYMOUS) diff --git a/tests/framework_django/test_asgi_application.py b/tests/framework_django/test_asgi_application.py index 3ca49b6911..3762e53ca0 100644 --- a/tests/framework_django/test_asgi_application.py +++ b/tests/framework_django/test_asgi_application.py @@ -45,7 +45,7 @@ ("Function/django.urls.resolvers:URLResolver.resolve", "present"), ] -rollup_metrics = scoped_metrics + [(f"Python/Framework/Django/{django.get_version()}", 1)] +rollup_metrics = [*scoped_metrics, (f"Python/Framework/Django/{django.get_version()}", 1)] @pytest.fixture @@ -57,7 +57,7 @@ def application(): @validate_transaction_metrics( - "views:index", scoped_metrics=[("Function/views:index", 1)] + scoped_metrics, rollup_metrics=rollup_metrics + "views:index", scoped_metrics=[("Function/views:index", 1), *scoped_metrics], rollup_metrics=rollup_metrics ) @validate_code_level_metrics("views", "index") def test_asgi_index(application): @@ -66,7 +66,7 @@ def test_asgi_index(application): @validate_transaction_metrics( - "views:exception", scoped_metrics=[("Function/views:exception", 1)] + scoped_metrics, rollup_metrics=rollup_metrics + "views:exception", scoped_metrics=[("Function/views:exception", 1), *scoped_metrics], rollup_metrics=rollup_metrics ) @validate_code_level_metrics("views", "exception") def test_asgi_exception(application): @@ -78,7 +78,7 @@ def test_asgi_exception(application): @validate_transaction_errors(errors=[]) @validate_transaction_metrics( "views:middleware_410", - scoped_metrics=[("Function/views:middleware_410", 1)] + scoped_metrics, + scoped_metrics=[("Function/views:middleware_410", 1), *scoped_metrics], rollup_metrics=rollup_metrics, ) @validate_code_level_metrics("views", "middleware_410") @@ -91,7 +91,7 @@ def test_asgi_middleware_ignore_status_codes(application): @validate_transaction_errors(errors=[]) @validate_transaction_metrics( "views:permission_denied", - scoped_metrics=[("Function/views:permission_denied", 1)] + scoped_metrics, + scoped_metrics=[("Function/views:permission_denied", 1), *scoped_metrics], rollup_metrics=rollup_metrics, ) @validate_code_level_metrics("views", "permission_denied") @@ -107,7 +107,7 @@ def test_asgi_class_based_view(application, url, view_name): @validate_transaction_errors(errors=[]) @validate_transaction_metrics( - view_name, scoped_metrics=[(f"Function/{view_name}", 1)] + scoped_metrics, rollup_metrics=rollup_metrics + view_name, scoped_metrics=[(f"Function/{view_name}", 1), *scoped_metrics], rollup_metrics=rollup_metrics ) @validate_code_level_metrics(namespace, func) def _test(): @@ -170,8 +170,8 @@ def test_asgi_html_insertion_failed(application, url): ("Function/views:template_tags", 1), ("Template/Render/main.html", 1), ("Template/Render/results.html", 1), - ] - + scoped_metrics, + *scoped_metrics, + ], rollup_metrics=rollup_metrics, ) @validate_code_level_metrics("views", "template_tags") diff --git a/tests/framework_graphql/_target_application.py b/tests/framework_graphql/_target_application.py index 91da5d7679..28f3b895ca 100644 --- a/tests/framework_graphql/_target_application.py +++ b/tests/framework_graphql/_target_application.py @@ -19,7 +19,7 @@ def check_response(query, response): - if isinstance(query, str) and "error" not in query or isinstance(query, Source) and "error" not in query.body: + if (isinstance(query, str) and "error" not in query) or (isinstance(query, Source) and "error" not in query.body): assert not response.errors, response.errors assert response.data else: diff --git a/tests/framework_graphql/test_application.py b/tests/framework_graphql/test_application.py index 8b34d31efc..7564dddaf2 100644 --- a/tests/framework_graphql/test_application.py +++ b/tests/framework_graphql/test_application.py @@ -240,7 +240,8 @@ def test_exception_in_middleware(target_application, middleware): ("Errors/all", 1), (f"Errors/all{'Other' if is_bg else 'Web'}", 1), (f"Errors/{'Other' if is_bg else 'Web'}Transaction/GraphQL/{name}", 1), - ] + _test_exception_scoped_metrics + *_test_exception_scoped_metrics, + ] # Attributes _expected_exception_resolver_attributes = { @@ -289,7 +290,8 @@ def test_exception_in_resolver(target_application, field): ("Errors/all", 1), (f"Errors/all{'Other' if is_bg else 'Web'}", 1), (f"Errors/{'Other' if is_bg else 'Web'}Transaction/GraphQL/{txn_name}", 1), - ] + _test_exception_scoped_metrics + *_test_exception_scoped_metrics, + ] # Attributes _expected_exception_resolver_attributes = { @@ -347,7 +349,8 @@ def test_exception_in_validation(target_application, query, exc_class): ("Errors/all", 1), (f"Errors/all{'Other' if is_bg else 'Web'}", 1), (f"Errors/{'Other' if is_bg else 'Web'}Transaction/GraphQL/{txn_name}", 1), - ] + _test_exception_scoped_metrics + *_test_exception_scoped_metrics, + ] # Attributes _expected_exception_operation_attributes = { diff --git a/tests/framework_starlette/test_application.py b/tests/framework_starlette/test_application.py index a565443f1e..55f751e9a3 100644 --- a/tests/framework_starlette/test_application.py +++ b/tests/framework_starlette/test_application.py @@ -50,13 +50,14 @@ def target_application(): MIDDLEWARE_METRICS = [ ("Function/_test_application:middleware_factory..middleware", 2), ("Function/_test_application:middleware_decorator", 1), -] + DEFAULT_MIDDLEWARE_METRICS + *DEFAULT_MIDDLEWARE_METRICS, +] @pytest.mark.parametrize("app_name", ("no_error_handler",)) @validate_transaction_metrics( "_test_application:index", - scoped_metrics=MIDDLEWARE_METRICS + [("Function/_test_application:index", 1)], + scoped_metrics=[*MIDDLEWARE_METRICS, ("Function/_test_application:index", 1)], rollup_metrics=[FRAMEWORK_METRIC], ) @validate_code_level_metrics("_test_application", "index") @@ -70,7 +71,7 @@ def test_application_index(target_application, app_name): @pytest.mark.parametrize("app_name", ("no_error_handler",)) @validate_transaction_metrics( "_test_application:non_async", - scoped_metrics=MIDDLEWARE_METRICS + [("Function/_test_application:non_async", 1)], + scoped_metrics=[*MIDDLEWARE_METRICS, ("Function/_test_application:non_async", 1)], rollup_metrics=[FRAMEWORK_METRIC], ) @validate_code_level_metrics("_test_application", "non_async") @@ -168,7 +169,7 @@ def _test(): def test_server_error_middleware(target_application, app_name, transaction_name, path, scoped_metrics): @validate_transaction_metrics( transaction_name, - scoped_metrics=scoped_metrics + [("Function/_test_application:runtime_error", 1)] + DEFAULT_MIDDLEWARE_METRICS, + scoped_metrics=[*scoped_metrics, ("Function/_test_application:runtime_error", 1), *DEFAULT_MIDDLEWARE_METRICS], rollup_metrics=[FRAMEWORK_METRIC], ) def _test(): diff --git a/tests/framework_strawberry/_target_application.py b/tests/framework_strawberry/_target_application.py index b48dd559c5..3e3b1d2330 100644 --- a/tests/framework_strawberry/_target_application.py +++ b/tests/framework_strawberry/_target_application.py @@ -33,7 +33,9 @@ def _run_sync(query, middleware=None): response = schema.execute_sync(query) - if isinstance(query, str) and "error" not in query or isinstance(query, Source) and "error" not in query.body: + if (isinstance(query, str) and "error" not in query) or ( + isinstance(query, Source) and "error" not in query.body + ): assert not response.errors else: assert response.errors @@ -53,7 +55,9 @@ def _run_async(query, middleware=None): loop = asyncio.get_event_loop() response = loop.run_until_complete(schema.execute(query)) - if isinstance(query, str) and "error" not in query or isinstance(query, Source) and "error" not in query.body: + if (isinstance(query, str) and "error" not in query) or ( + isinstance(query, Source) and "error" not in query.body + ): assert not response.errors else: assert response.errors diff --git a/tests/framework_tornado/_target_application.py b/tests/framework_tornado/_target_application.py index f2c3365490..2570b72d0e 100644 --- a/tests/framework_tornado/_target_application.py +++ b/tests/framework_tornado/_target_application.py @@ -184,7 +184,7 @@ async def coro_trace(): with FunctionTrace(name="trace", terminal=True): await tornado.gen.sleep(0) - asyncio.ensure_future(coro_trace()) + asyncio.ensure_future(coro_trace()) # noqa: RUF006 class WebNestedHandler(WebSocketHandler): diff --git a/tests/framework_tornado/test_server.py b/tests/framework_tornado/test_server.py index 792fdfdc9a..1d250ac48e 100644 --- a/tests/framework_tornado/test_server.py +++ b/tests/framework_tornado/test_server.py @@ -52,7 +52,7 @@ def test_server(app, uri, name, metrics, method_metric): metrics.append((FRAMEWORK_METRIC, 1)) metrics.append((METHOD_METRIC, 1 if method_metric else None)) - host = f"127.0.0.1:{str(app.get_http_port())}" + host = f"127.0.0.1:{app.get_http_port()!s}" namespace, func_name = name.split(".") namespace = namespace.replace(":", ".") diff --git a/tests/logger_loguru/test_attributes.py b/tests/logger_loguru/test_attributes.py index 525d78236b..8ba1c6f366 100644 --- a/tests/logger_loguru/test_attributes.py +++ b/tests/logger_loguru/test_attributes.py @@ -22,7 +22,7 @@ [ { # Fixed attributes "message": "context_attrs: arg1", - "context.file": f"(name='test_attributes.py', path='{str(__file__)}')", + "context.file": f"(name='test_attributes.py', path='{__file__!s}')", "context.function": "test_loguru_default_context_attributes", "context.extra.bound_attr": 1, "context.extra.contextual_attr": 2, diff --git a/tests/logger_structlog/conftest.py b/tests/logger_structlog/conftest.py index 902b1b1b7e..26b2cce367 100644 --- a/tests/logger_structlog/conftest.py +++ b/tests/logger_structlog/conftest.py @@ -54,7 +54,7 @@ def msg(self, event, **kwargs): fatal = failure = err = error = critical = exception = msg def __repr__(self): - return f"" + return f"" __str__ = __repr__ diff --git a/tests/messagebroker_confluentkafka/conftest.py b/tests/messagebroker_confluentkafka/conftest.py index 6830557986..b157abbca5 100644 --- a/tests/messagebroker_confluentkafka/conftest.py +++ b/tests/messagebroker_confluentkafka/conftest.py @@ -168,7 +168,7 @@ def __call__(self, obj, ctx): def topic(broker): from confluent_kafka.admin import AdminClient, NewTopic - topic = f"test-topic-{str(uuid.uuid4())}" + topic = f"test-topic-{uuid.uuid4()!s}" admin = AdminClient({"bootstrap.servers": broker}) new_topics = [NewTopic(topic, num_partitions=1, replication_factor=1)] diff --git a/tests/messagebroker_confluentkafka/test_consumer.py b/tests/messagebroker_confluentkafka/test_consumer.py index c46c040522..6eadb49edd 100644 --- a/tests/messagebroker_confluentkafka/test_consumer.py +++ b/tests/messagebroker_confluentkafka/test_consumer.py @@ -32,7 +32,8 @@ def test_custom_metrics(get_consumer_record, topic, expected_broker_metrics): custom_metrics = [ (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", 1), (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", 1), - ] + expected_broker_metrics + *expected_broker_metrics, + ] @validate_transaction_metrics( f"Named/{topic}", group="Message/Kafka/Topic", custom_metrics=custom_metrics, background_task=True @@ -62,8 +63,8 @@ def test_custom_metrics_on_existing_transaction(get_consumer_record, topic, expe (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", 1), (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", 1), (f"Python/MessageBroker/Confluent-Kafka/{version}", 1), - ] - + expected_broker_metrics, + *expected_broker_metrics, + ], background_task=True, ) @validate_transaction_count(1) @@ -80,8 +81,8 @@ def test_custom_metrics_inactive_transaction(get_consumer_record, topic, expecte custom_metrics=[ (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", None), (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", None), - ] - + expected_missing_broker_metrics, + *expected_missing_broker_metrics, + ], background_task=True, ) @validate_transaction_count(1) @@ -144,8 +145,8 @@ def _produce(): rollup_metrics=[ ("Supportability/DistributedTrace/AcceptPayload/Success", None), ("Supportability/TraceContext/Accept/Success", 1), - ] - + expected_broker_metrics, + *expected_broker_metrics, + ], background_task=True, ) @validate_transaction_count(1) diff --git a/tests/messagebroker_confluentkafka/test_producer.py b/tests/messagebroker_confluentkafka/test_producer.py index 53e78903fa..11c0d05a30 100644 --- a/tests/messagebroker_confluentkafka/test_producer.py +++ b/tests/messagebroker_confluentkafka/test_producer.py @@ -83,7 +83,7 @@ def test_trace_metrics(topic, send_producer_message, expected_broker_metrics): "test_producer:test_trace_metrics..test", scoped_metrics=scoped_metrics, rollup_metrics=unscoped_metrics, - custom_metrics=[(f"Python/MessageBroker/Confluent-Kafka/{version}", 1)] + expected_broker_metrics, + custom_metrics=[(f"Python/MessageBroker/Confluent-Kafka/{version}", 1), *expected_broker_metrics], background_task=True, ) @background_task() @@ -99,8 +99,8 @@ def test_distributed_tracing_headers(topic, send_producer_message, expected_brok rollup_metrics=[ ("Supportability/TraceContext/Create/Success", 1), ("Supportability/DistributedTrace/CreatePayload/Success", 1), - ] - + expected_broker_metrics, + *expected_broker_metrics, + ], background_task=True, ) @background_task() @@ -118,8 +118,8 @@ def test_distributed_tracing_headers_under_terminal(topic, send_producer_message rollup_metrics=[ ("Supportability/TraceContext/Create/Success", 1), ("Supportability/DistributedTrace/CreatePayload/Success", 1), - ] - + expected_broker_metrics, + *expected_broker_metrics, + ], background_task=True, ) @background_task(name="test_distributed_tracing_headers_under_terminal") diff --git a/tests/messagebroker_kafkapython/conftest.py b/tests/messagebroker_kafkapython/conftest.py index f8e2e7b45f..e300c15d1a 100644 --- a/tests/messagebroker_kafkapython/conftest.py +++ b/tests/messagebroker_kafkapython/conftest.py @@ -190,7 +190,7 @@ def topic(broker): from kafka.admin.client import KafkaAdminClient from kafka.admin.new_topic import NewTopic - topic = f"test-topic-{str(uuid.uuid4())}" + topic = f"test-topic-{uuid.uuid4()!s}" admin = KafkaAdminClient(bootstrap_servers=broker) new_topics = [NewTopic(topic, num_partitions=1, replication_factor=1)] diff --git a/tests/messagebroker_kafkapython/test_consumer.py b/tests/messagebroker_kafkapython/test_consumer.py index a48179b7a5..e53bc4ff7c 100644 --- a/tests/messagebroker_kafkapython/test_consumer.py +++ b/tests/messagebroker_kafkapython/test_consumer.py @@ -35,8 +35,8 @@ def test_custom_metrics(get_consumer_record, topic, expected_broker_metrics): custom_metrics=[ (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", 1), (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", 1), - ] - + expected_broker_metrics, + *expected_broker_metrics, + ], background_task=True, ) def _test(): @@ -63,8 +63,8 @@ def test_custom_metrics_on_existing_transaction(get_consumer_record, topic, expe (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", 1), (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", 1), (f"Python/MessageBroker/Kafka-Python/{version}", 1), - ] - + expected_broker_metrics, + *expected_broker_metrics, + ], background_task=True, ) @validate_transaction_count(1) @@ -81,8 +81,8 @@ def test_custom_metrics_inactive_transaction(get_consumer_record, topic, expecte custom_metrics=[ (f"Message/Kafka/Topic/Named/{topic}/Received/Bytes", None), (f"Message/Kafka/Topic/Named/{topic}/Received/Messages", None), - ] - + expected_missing_broker_metrics, + *expected_missing_broker_metrics, + ], background_task=True, ) @validate_transaction_count(1) @@ -138,8 +138,8 @@ def _produce(): rollup_metrics=[ ("Supportability/DistributedTrace/AcceptPayload/Success", None), ("Supportability/TraceContext/Accept/Success", 1), - ] - + expected_broker_metrics, + *expected_broker_metrics, + ], background_task=True, ) @validate_transaction_count(1) diff --git a/tests/messagebroker_kafkapython/test_producer.py b/tests/messagebroker_kafkapython/test_producer.py index 47e9d7538c..12a09d6a21 100644 --- a/tests/messagebroker_kafkapython/test_producer.py +++ b/tests/messagebroker_kafkapython/test_producer.py @@ -33,7 +33,7 @@ def test_trace_metrics(topic, send_producer_message, expected_broker_metrics): "test_producer:test_trace_metrics..test", scoped_metrics=scoped_metrics, rollup_metrics=unscoped_metrics, - custom_metrics=[(f"Python/MessageBroker/Kafka-Python/{version}", 1)] + expected_broker_metrics, + custom_metrics=[(f"Python/MessageBroker/Kafka-Python/{version}", 1), *expected_broker_metrics], background_task=True, ) @background_task() @@ -49,8 +49,8 @@ def test_distributed_tracing_headers(topic, send_producer_message, expected_brok rollup_metrics=[ ("Supportability/TraceContext/Create/Success", 1), ("Supportability/DistributedTrace/CreatePayload/Success", 1), - ] - + expected_broker_metrics, + *expected_broker_metrics, + ], background_task=True, ) @background_task() @@ -68,8 +68,8 @@ def test_distributed_tracing_headers_under_terminal(topic, send_producer_message rollup_metrics=[ ("Supportability/TraceContext/Create/Success", 1), ("Supportability/DistributedTrace/CreatePayload/Success", 1), - ] - + expected_broker_metrics, + *expected_broker_metrics, + ], background_task=True, ) @background_task(name="test_distributed_tracing_headers_under_terminal") diff --git a/tests/messagebroker_kombu/conftest.py b/tests/messagebroker_kombu/conftest.py index ae8675b0dd..6501b6bfd7 100644 --- a/tests/messagebroker_kombu/conftest.py +++ b/tests/messagebroker_kombu/conftest.py @@ -21,7 +21,7 @@ from amqp.exceptions import NotFound from kombu import messaging, serialization from testing_support.db_settings import rabbitmq_settings -from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture # noqa: F401 +from testing_support.fixtures import collector_agent_registration_fixture, collector_available_fixture from testing_support.validators.validate_distributed_trace_accepted import validate_distributed_trace_accepted from newrelic.api.transaction import current_transaction diff --git a/tests/mlmodel_gemini/_mock_external_gemini_server.py b/tests/mlmodel_gemini/_mock_external_gemini_server.py index 3077abc789..7e8c720e13 100644 --- a/tests/mlmodel_gemini/_mock_external_gemini_server.py +++ b/tests/mlmodel_gemini/_mock_external_gemini_server.py @@ -974,6 +974,6 @@ def __init__(self, handler=simple_get, port=None, *args, **kwargs): if __name__ == "__main__": with MockExternalGeminiServer() as server: - print(f"MockExternalGeminiServer serving on port {str(server.port)}") + print(f"MockExternalGeminiServer serving on port {server.port!s}") while True: pass # Serve forever diff --git a/tests/mlmodel_gemini/conftest.py b/tests/mlmodel_gemini/conftest.py index 597bcd20c8..06d591f0ab 100644 --- a/tests/mlmodel_gemini/conftest.py +++ b/tests/mlmodel_gemini/conftest.py @@ -54,7 +54,7 @@ @pytest.fixture(scope="session") -def gemini_clients(MockExternalGeminiServer): # noqa: F811 +def gemini_clients(MockExternalGeminiServer): """ This configures the Gemini client and returns it """ @@ -105,7 +105,7 @@ def gemini_server(gemini_clients, wrap_httpx_client_send): @pytest.fixture(scope="session") -def wrap_httpx_client_send(extract_shortened_prompt): # noqa: F811 +def wrap_httpx_client_send(extract_shortened_prompt): def _wrap_httpx_client_send(wrapped, instance, args, kwargs): bound_args = bind_args(wrapped, args, kwargs) request = bound_args["request"] diff --git a/tests/mlmodel_langchain/_mock_external_openai_server.py b/tests/mlmodel_langchain/_mock_external_openai_server.py index 6e81761d58..d6adcdb9fb 100644 --- a/tests/mlmodel_langchain/_mock_external_openai_server.py +++ b/tests/mlmodel_langchain/_mock_external_openai_server.py @@ -568,6 +568,6 @@ def openai_version(): if __name__ == "__main__": _MockExternalOpenAIServer = MockExternalOpenAIServer() with MockExternalOpenAIServer() as server: - print(f"MockExternalOpenAIServer serving on port {str(server.port)}") + print(f"MockExternalOpenAIServer serving on port {server.port!s}") while True: pass # Serve forever diff --git a/tests/mlmodel_langchain/conftest.py b/tests/mlmodel_langchain/conftest.py index c6d3fa7284..58b0221d0b 100644 --- a/tests/mlmodel_langchain/conftest.py +++ b/tests/mlmodel_langchain/conftest.py @@ -60,7 +60,7 @@ @pytest.fixture(scope="session") -def openai_clients(openai_version, MockExternalOpenAIServer): # noqa: F811 +def openai_clients(openai_version, MockExternalOpenAIServer): """ This configures the openai client and returns it for openai v1 and only configures openai for v0 since there is no client. @@ -96,12 +96,7 @@ def chat_openai_client(openai_clients): @pytest.fixture(autouse=True, scope="session") -def openai_server( - openai_version, # noqa: F811 - openai_clients, - wrap_httpx_client_send, - wrap_stream_iter_events, -): +def openai_server(openai_version, openai_clients, wrap_httpx_client_send, wrap_stream_iter_events): """ This fixture will either create a mocked backend for testing purposes, or will set up an audit log file to log responses of the real OpenAI backend to a file. @@ -123,7 +118,7 @@ def openai_server( @pytest.fixture(scope="session") -def wrap_httpx_client_send(extract_shortened_prompt): # noqa: F811 +def wrap_httpx_client_send(extract_shortened_prompt): def _wrap_httpx_client_send(wrapped, instance, args, kwargs): bound_args = bind_args(wrapped, args, kwargs) stream = bound_args.get("stream", False) diff --git a/tests/mlmodel_langchain/test_vectorstore.py b/tests/mlmodel_langchain/test_vectorstore.py index 8f479000b5..bdb152fe5c 100644 --- a/tests/mlmodel_langchain/test_vectorstore.py +++ b/tests/mlmodel_langchain/test_vectorstore.py @@ -121,8 +121,8 @@ def test_vectorstore_modules_instrumented(): if not hasattr(class_.asimilarity_search, "__wrapped__"): uninstrumented_async_classes.append(class_name) - assert not uninstrumented_sync_classes, f"Uninstrumented sync classes found: {str(uninstrumented_sync_classes)}" - assert not uninstrumented_async_classes, f"Uninstrumented async classes found: {str(uninstrumented_async_classes)}" + assert not uninstrumented_sync_classes, f"Uninstrumented sync classes found: {uninstrumented_sync_classes!s}" + assert not uninstrumented_async_classes, f"Uninstrumented async classes found: {uninstrumented_async_classes!s}" @reset_core_stats_engine() diff --git a/tests/mlmodel_openai/_mock_external_openai_server.py b/tests/mlmodel_openai/_mock_external_openai_server.py index 52248d5cc0..b008675f99 100644 --- a/tests/mlmodel_openai/_mock_external_openai_server.py +++ b/tests/mlmodel_openai/_mock_external_openai_server.py @@ -759,6 +759,6 @@ def openai_version(): if __name__ == "__main__": _MockExternalOpenAIServer = MockExternalOpenAIServer() with MockExternalOpenAIServer() as server: - print(f"MockExternalOpenAIServer serving on port {str(server.port)}") + print(f"MockExternalOpenAIServer serving on port {server.port!s}") while True: pass # Serve forever diff --git a/tests/mlmodel_openai/conftest.py b/tests/mlmodel_openai/conftest.py index 748037884f..625459367b 100644 --- a/tests/mlmodel_openai/conftest.py +++ b/tests/mlmodel_openai/conftest.py @@ -80,7 +80,7 @@ @pytest.fixture(scope="session") -def openai_clients(openai_version, MockExternalOpenAIServer): # noqa: F811 +def openai_clients(openai_version, MockExternalOpenAIServer): """ This configures the openai client and returns it for openai v1 and only configures openai for v0 since there is no client. @@ -129,7 +129,7 @@ def async_openai_client(openai_clients): @pytest.fixture(autouse=True, scope="session") def openai_server( - openai_version, # noqa: F811 + openai_version, openai_clients, wrap_openai_api_requestor_request, wrap_openai_api_requestor_interpret_response, @@ -172,7 +172,7 @@ def openai_server( @pytest.fixture(scope="session") -def wrap_httpx_client_send(extract_shortened_prompt): # noqa: F811 +def wrap_httpx_client_send(extract_shortened_prompt): def _wrap_httpx_client_send(wrapped, instance, args, kwargs): bound_args = bind_args(wrapped, args, kwargs) stream = bound_args.get("stream", False) @@ -233,7 +233,7 @@ def _wrap_openai_api_requestor_interpret_response(wrapped, instance, args, kwarg @pytest.fixture(scope="session") -def wrap_openai_api_requestor_request(extract_shortened_prompt): # noqa: F811 +def wrap_openai_api_requestor_request(extract_shortened_prompt): def _wrap_openai_api_requestor_request(wrapped, instance, args, kwargs): params = bind_request_params(*args, **kwargs) if not params: diff --git a/tests/testing_support/asgi_testing.py b/tests/testing_support/asgi_testing.py index d372721149..821a20fe96 100644 --- a/tests/testing_support/asgi_testing.py +++ b/tests/testing_support/asgi_testing.py @@ -78,7 +78,7 @@ def generate_input(self, method, path, params, headers, body): "http_version": "1.1", "method": method.upper(), "path": path, - "query_string": params and params.encode("utf-8") or b"", + "query_string": (params and params.encode("utf-8")) or b"", "raw_path": path.encode("utf-8"), "root_path": "", "scheme": "http", diff --git a/tests/testing_support/db_settings.py b/tests/testing_support/db_settings.py index cb51a01e23..4a70dffa91 100644 --- a/tests/testing_support/db_settings.py +++ b/tests/testing_support/db_settings.py @@ -215,7 +215,7 @@ def mongodb_settings(): host = "host.docker.internal" if "GITHUB_ACTIONS" in os.environ else "127.0.0.1" instances = 2 settings = [ - {"host": host, "port": 8080 + instance_num, "collection": f"mongodb_collection_{str(os.getpid())}"} + {"host": host, "port": 8080 + instance_num, "collection": f"mongodb_collection_{os.getpid()!s}"} for instance_num in range(instances) ] return settings diff --git a/tests/testing_support/validators/validate_custom_events.py b/tests/testing_support/validators/validate_custom_events.py index e051e64ca5..8a1bad4342 100644 --- a/tests/testing_support/validators/validate_custom_events.py +++ b/tests/testing_support/validators/validate_custom_events.py @@ -79,7 +79,7 @@ def _check_event_attributes(expected, captured, mismatches): extra_keys = captured_keys - expected_keys if extra_keys: - mismatches.append(f"extra_keys: {str(tuple(extra_keys))}") + mismatches.append(f"extra_keys: {tuple(extra_keys)!s}") return False for key, value in expected[1].items(): diff --git a/tests/testing_support/validators/validate_ml_events.py b/tests/testing_support/validators/validate_ml_events.py index d831ee12cd..e2ec47a30f 100644 --- a/tests/testing_support/validators/validate_ml_events.py +++ b/tests/testing_support/validators/validate_ml_events.py @@ -79,7 +79,7 @@ def _check_event_attributes(expected, captured, mismatches): extra_keys = captured_keys - expected_keys if extra_keys: - mismatches.append(f"extra_keys: {str(tuple(extra_keys))}") + mismatches.append(f"extra_keys: {tuple(extra_keys)!s}") return False for key, value in expected[1].items():