Skip to content

Commit cda08c6

Browse files
stainless-botRobertCraigie
authored andcommitted
feat(client): add logging setup
1 parent a985c07 commit cda08c6

File tree

8 files changed

+76
-7
lines changed

8 files changed

+76
-7
lines changed

README.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -380,6 +380,16 @@ Note that requests that time out are [retried twice by default](#retries).
380380

381381
## Advanced
382382

383+
### Logging
384+
385+
We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module.
386+
387+
You can enable logging by setting the environment variable `OPENAI_LOG` to `debug`.
388+
389+
```shell
390+
$ export OPENAI_LOG=debug
391+
```
392+
383393
### How to tell whether `None` means `null` or missing
384394

385395
In an API response, a field may be explicitly `null`, or missing entirely; in either case, its value is `None` in this library. You can differentiate the two cases with `.model_fields_set`:

pyproject.toml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,6 @@ target-version = ["py37"]
5454
testpaths = ["tests"]
5555
addopts = "--tb=short"
5656
xfail_strict = true
57-
log_cli = true
58-
log_level = "INFO"
5957
asyncio_mode = "auto"
6058
filterwarnings = [
6159
"error"

src/openai/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
UnprocessableEntityError,
3636
APIResponseValidationError,
3737
)
38+
from ._utils._logs import setup_logging as _setup_logging
3839

3940
__all__ = [
4041
"types",
@@ -70,6 +71,8 @@
7071

7172
from .version import VERSION as VERSION
7273

74+
_setup_logging()
75+
7376
# Update the __module__ attribute for exported symbols so that
7477
# error messages point to this module instead of the module
7578
# it was originally defined in, e.g.

src/openai/_base_client.py

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import time
55
import uuid
66
import inspect
7+
import logging
78
import platform
89
import warnings
910
import email.utils
@@ -59,7 +60,7 @@
5960
ModelBuilderProtocol,
6061
)
6162
from ._utils import is_dict, is_given, is_mapping
62-
from ._compat import model_copy
63+
from ._compat import model_copy, model_dump
6364
from ._models import (
6465
BaseModel,
6566
GenericModel,
@@ -75,6 +76,8 @@
7576
APIResponseValidationError,
7677
)
7778

79+
log: logging.Logger = logging.getLogger(__name__)
80+
7881
# TODO: make base page type vars covariant
7982
SyncPageT = TypeVar("SyncPageT", bound="BaseSyncPage[Any]")
8083
AsyncPageT = TypeVar("AsyncPageT", bound="BaseAsyncPage[Any]")
@@ -427,7 +430,8 @@ def _build_request(
427430
self,
428431
options: FinalRequestOptions,
429432
) -> httpx.Request:
430-
headers = self._build_headers(options)
433+
if log.isEnabledFor(logging.DEBUG):
434+
log.debug("Request options: %s", model_dump(options, exclude_unset=True))
431435

432436
kwargs: dict[str, Any] = {}
433437

@@ -440,6 +444,7 @@ def _build_request(
440444
else:
441445
raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`")
442446

447+
headers = self._build_headers(options)
443448
params = _merge_mappings(self._custom_query, options.params)
444449

445450
# If the given Content-Type header is multipart/form-data then it
@@ -877,6 +882,9 @@ def _request(
877882

878883
try:
879884
response = self._client.send(request, auth=self.custom_auth, stream=stream)
885+
log.debug(
886+
'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase
887+
)
880888
response.raise_for_status()
881889
except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
882890
if retries > 0 and self._should_retry(err.response):
@@ -925,6 +933,7 @@ def _retry_request(
925933
) -> ResponseT | _StreamT:
926934
remaining = remaining_retries - 1
927935
timeout = self._calculate_retry_timeout(remaining, options, response_headers)
936+
log.info("Retrying request to %s in %f seconds", options.url, timeout)
928937

929938
# In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
930939
# different thread if necessary.
@@ -1276,6 +1285,9 @@ async def _request(
12761285

12771286
try:
12781287
response = await self._client.send(request, auth=self.custom_auth, stream=stream)
1288+
log.debug(
1289+
'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase
1290+
)
12791291
response.raise_for_status()
12801292
except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
12811293
if retries > 0 and self._should_retry(err.response):
@@ -1334,6 +1346,7 @@ async def _retry_request(
13341346
) -> ResponseT | _AsyncStreamT:
13351347
remaining = remaining_retries - 1
13361348
timeout = self._calculate_retry_timeout(remaining, options, response_headers)
1349+
log.info("Retrying request to %s in %f seconds", options.url, timeout)
13371350

13381351
await anyio.sleep(timeout)
13391352

src/openai/_compat.py

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -126,10 +126,24 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
126126
return model.json(indent=indent) # type: ignore
127127

128128

129-
def model_dump(model: pydantic.BaseModel) -> dict[str, Any]:
129+
def model_dump(
130+
model: pydantic.BaseModel,
131+
*,
132+
exclude_unset: bool = False,
133+
exclude_defaults: bool = False,
134+
) -> dict[str, Any]:
130135
if PYDANTIC_V2:
131-
return model.model_dump()
132-
return cast("dict[str, Any]", model.dict()) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
136+
return model.model_dump(
137+
exclude_unset=exclude_unset,
138+
exclude_defaults=exclude_defaults,
139+
)
140+
return cast(
141+
"dict[str, Any]",
142+
model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
143+
exclude_unset=exclude_unset,
144+
exclude_defaults=exclude_defaults,
145+
),
146+
)
133147

134148

135149
def model_parse(model: type[_ModelT], data: Any) -> _ModelT:

src/openai/_types.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,9 @@ def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...
104104
def __bool__(self) -> Literal[False]:
105105
return False
106106

107+
def __repr__(self) -> str:
108+
return "NOT_GIVEN"
109+
107110

108111
NotGivenOr = Union[_T, NotGiven]
109112
NOT_GIVEN = NotGiven()

src/openai/_utils/_logs.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
import os
2+
import logging
3+
4+
logger: logging.Logger = logging.getLogger("openai")
5+
httpx_logger: logging.Logger = logging.getLogger("httpx")
6+
7+
8+
def _basic_config() -> None:
9+
# e.g. [2023-10-05 14:12:26 - openai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK"
10+
logging.basicConfig(
11+
format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s",
12+
datefmt="%Y-%m-%d %H:%M:%S",
13+
)
14+
15+
16+
def setup_logging() -> None:
17+
env = os.environ.get("OPENAI_LOG")
18+
if env == "debug":
19+
_basic_config()
20+
logger.setLevel(logging.DEBUG)
21+
httpx_logger.setLevel(logging.DEBUG)
22+
elif env == "info":
23+
_basic_config()
24+
logger.setLevel(logging.INFO)
25+
httpx_logger.setLevel(logging.INFO)

tests/conftest.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,13 @@
11
import asyncio
2+
import logging
23
from typing import Iterator
34

45
import pytest
56

67
pytest.register_assert_rewrite("tests.utils")
78

9+
logging.getLogger("openai").setLevel(logging.DEBUG)
10+
811

912
@pytest.fixture(scope="session")
1013
def event_loop() -> Iterator[asyncio.AbstractEventLoop]:

0 commit comments

Comments
 (0)