Skip to content

Commit a8492a0

Browse files
Rework dspy logger (#1732)
* Rework dspy logger * add enable/disable logging support * file rename
1 parent 1bab822 commit a8492a0

File tree

16 files changed

+203
-190
lines changed

16 files changed

+203
-190
lines changed

dspy/__init__.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,16 +5,18 @@
55
from .primitives import *
66
from .retrieve import *
77
from .signatures import *
8-
from .utils.logging import logger, set_log_output
98

109
# Functional must be imported after primitives, predict and signatures
1110
from .functional import * # isort: skip
1211
from dspy.evaluate import Evaluate # isort: skip
1312
from dspy.clients import * # isort: skip
1413
from dspy.adapters import * # isort: skip
14+
from dspy.utils.logging_utils import configure_dspy_loggers, disable_logging, enable_logging
1515

1616
settings = dsp.settings
1717

18+
configure_dspy_loggers(__name__)
19+
1820
# LM = dsp.LM
1921

2022
AzureOpenAI = dsp.AzureOpenAI

dspy/clients/anyscale.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
1-
from typing import Any, Dict, List, Optional
21
import json
3-
import yaml
42
import os
3+
from typing import Any, Dict, List, Optional
4+
5+
import yaml
6+
import logging
57

6-
from dspy.utils.logging import logger
78
from dspy.clients.finetune import (
89
FinetuneJob,
910
TrainingMethod,
@@ -18,6 +19,7 @@
1819
except ImportError:
1920
anyscale = None
2021

22+
logger = logging.getLogger(__name__)
2123

2224
# List of training methods supported by AnyScale
2325
TRAINING_METHODS_ANYSCALE = [

dspy/clients/finetune.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,15 @@
1+
import logging
12
import os
23
from abc import abstractmethod
34
from concurrent.futures import Future
45
from enum import Enum
56
from pathlib import Path
67
from typing import Any, Dict, List, Optional
7-
from dspy.utils.logging import logger
88

99
import ujson
1010
from datasets.fingerprint import Hasher
1111

12+
logger = logging.getLogger(__name__)
1213

1314
def get_finetune_directory() -> str:
1415
"""Get the directory to save the fine-tuned models."""

dspy/clients/lm.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import functools
2+
import logging
23
import os
34
import uuid
45
from concurrent.futures import ThreadPoolExecutor
@@ -13,7 +14,6 @@
1314
from dspy.clients.finetune import FinetuneJob, TrainingMethod
1415
from dspy.clients.lm_finetune_utils import execute_finetune_job, get_provider_finetune_job_class
1516
from dspy.utils.callback import BaseCallback, with_callbacks
16-
from dspy.utils.logging import logger
1717

1818
DISK_CACHE_DIR = os.environ.get("DSPY_CACHEDIR") or os.path.join(Path.home(), ".dspy_cache")
1919
litellm.cache = Cache(disk_cache_dir=DISK_CACHE_DIR, type="disk")
@@ -24,6 +24,8 @@
2424

2525
GLOBAL_HISTORY = []
2626

27+
logger = logging.getLogger(__name__)
28+
2729

2830
class LM:
2931
"""

dspy/clients/lm_finetune_utils.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
1+
import logging
12
from typing import Any, Dict, List, Optional, Type, Union
23

34
from dspy.clients.anyscale import FinetuneJobAnyScale, finetune_anyscale
45
from dspy.clients.finetune import FinetuneJob, TrainingMethod
56
from dspy.clients.openai import FinetuneJobOpenAI, finetune_openai
6-
from dspy.utils.logging import logger
7+
8+
logger = logging.getLogger(__name__)
79

810
_PROVIDER_ANYSCALE = "anyscale"
911
_PROVIDER_OPENAI = "openai"

dspy/clients/openai.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import logging
12
import re
23
import time
34
from collections import defaultdict
@@ -12,11 +13,12 @@
1213
save_data,
1314
validate_finetune_data,
1415
)
15-
from dspy.utils.logging import logger
1616

1717
# Provider name
1818
PROVIDER_OPENAI = "openai"
1919

20+
logger = logging.getLogger(__name__)
21+
2022

2123
def is_openai_model(model: str) -> bool:
2224
"""Check if the model is an OpenAI model."""

dspy/evaluate/evaluate.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import contextlib
2+
import logging
23
import signal
34
import sys
45
import threading
@@ -42,6 +43,8 @@ def HTML(x: str) -> str:
4243
# we print the number of failures, the first N examples that failed, and the first N exceptions raised.
4344

4445

46+
logger = logging.getLogger(__name__)
47+
4548
class Evaluate:
4649
def __init__(
4750
self,
@@ -102,7 +105,7 @@ def interrupt_handler_manager():
102105

103106
def interrupt_handler(sig, frame):
104107
self.cancel_jobs.set()
105-
dspy.logger.warning("Received SIGINT. Cancelling evaluation.")
108+
logger.warning("Received SIGINT. Cancelling evaluation.")
106109
default_handler(sig, frame)
107110

108111
signal.signal(signal.SIGINT, interrupt_handler)
@@ -135,7 +138,7 @@ def cancellable_wrapped_program(idx, arg):
135138
pbar.close()
136139

137140
if self.cancel_jobs.is_set():
138-
dspy.logger.warning("Evaluation was cancelled. The results may be incomplete.")
141+
logger.warning("Evaluation was cancelled. The results may be incomplete.")
139142
raise KeyboardInterrupt
140143

141144
return reordered_devset, ncorrect, ntotal
@@ -193,11 +196,11 @@ def wrapped_program(example_idx, example):
193196
raise e
194197

195198
if self.provide_traceback:
196-
dspy.logger.error(
199+
logger.error(
197200
f"Error for example in dev set: \t\t {e}\n\twith inputs:\n\t\t{example.inputs()}\n\nStack trace:\n\t{traceback.format_exc()}"
198201
)
199202
else:
200-
dspy.logger.error(
203+
logger.error(
201204
f"Error for example in dev set: \t\t {e}. Set `provide_traceback=True` to see the stack trace."
202205
)
203206

@@ -219,7 +222,7 @@ def wrapped_program(example_idx, example):
219222
display_progress,
220223
)
221224

222-
dspy.logger.info(f"Average Metric: {ncorrect} / {ntotal} ({round(100 * ncorrect / ntotal, 1)}%)")
225+
logger.info(f"Average Metric: {ncorrect} / {ntotal} ({round(100 * ncorrect / ntotal, 1)}%)")
223226

224227
predicted_devset = sorted(reordered_devset)
225228

dspy/primitives/assertions.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
11
import inspect
2+
import logging
23
import uuid
34
from typing import Any
45

56
import dsp
67
import dspy
78

9+
logger = logging.getLogger(__name__)
810
#################### Assertion Helpers ####################
911

1012

@@ -82,10 +84,10 @@ def __call__(self) -> bool:
8284
if self.result:
8385
return True
8486
elif dspy.settings.bypass_assert:
85-
dspy.logger.error(f"AssertionError: {self.msg}")
87+
logger.error(f"AssertionError: {self.msg}")
8688
return True
8789
else:
88-
dspy.logger.error(f"AssertionError: {self.msg}")
90+
logger.error(f"AssertionError: {self.msg}")
8991
raise DSPyAssertionError(
9092
id=self.id,
9193
msg=self.msg,
@@ -105,10 +107,10 @@ def __call__(self) -> Any:
105107
if self.result:
106108
return True
107109
elif dspy.settings.bypass_suggest:
108-
dspy.logger.info(f"SuggestionFailed: {self.msg}")
110+
logger.info(f"SuggestionFailed: {self.msg}")
109111
return True
110112
else:
111-
dspy.logger.info(f"SuggestionFailed: {self.msg}")
113+
logger.info(f"SuggestionFailed: {self.msg}")
112114
raise DSPySuggestionError(
113115
id=self.id,
114116
msg=self.msg,
@@ -248,7 +250,7 @@ def wrapper(*args, **kwargs):
248250
dspy.settings.backtrack_to = dsp.settings.trace[-1][0]
249251

250252
if dspy.settings.backtrack_to is None:
251-
dspy.logger.error("Module not found in trace. If passing a DSPy Signature, please specify the intended module for the assertion (e.g., use `target_module = self.my_module(my_signature)` instead of `target_module = my_signature`).")
253+
logger.error("Module not found in trace. If passing a DSPy Signature, please specify the intended module for the assertion (e.g., use `target_module = self.my_module(my_signature)` instead of `target_module = my_signature`).")
252254

253255
# save unique feedback message for predictor
254256
if error_msg not in dspy.settings.predictor_feedbacks.setdefault(
@@ -277,7 +279,7 @@ def wrapper(*args, **kwargs):
277279
error_op.pop("_assert_traces", None)
278280

279281
else:
280-
dspy.logger.error(
282+
logger.error(
281283
"UNREACHABLE: No trace available, this should not happen. Is this run time?",
282284
)
283285

@@ -316,7 +318,7 @@ def assert_transform_module(
316318
"Module must have a forward method to have assertions handled.",
317319
)
318320
if getattr(module, "_forward", False):
319-
dspy.logger.info(
321+
logger.info(
320322
f"Module {module.__class__.__name__} already has a _forward method. Skipping...",
321323
)
322324
pass # TODO warning: might be overwriting a previous _forward method

dspy/retrieve/faiss_rm.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
Author: Jagane Sundar: https://github.com/jagane.
33
"""
44

5+
import logging
56
from typing import Optional, Union
67

78
import numpy as np
@@ -23,6 +24,7 @@
2324
)
2425

2526

27+
logger = logging.getLogger(__name__)
2628
class FaissRM(dspy.Retrieve):
2729
"""A retrieval module that uses an in-memory Faiss to return the top passages for a given query.
2830
@@ -80,7 +82,7 @@ def __init__(self, document_chunks, vectorizer=None, k: int = 3):
8082
embeddings = self._vectorizer(document_chunks)
8183
xb = np.array(embeddings)
8284
d = len(xb[0])
83-
dspy.logger.info(f"FaissRM: embedding size={d}")
85+
logger.info(f"FaissRM: embedding size={d}")
8486
if len(xb) < 100:
8587
self._faiss_index = faiss.IndexFlatL2(d)
8688
self._faiss_index.add(xb)
@@ -92,7 +94,7 @@ def __init__(self, document_chunks, vectorizer=None, k: int = 3):
9294
self._faiss_index.train(xb)
9395
self._faiss_index.add(xb)
9496

95-
dspy.logger.info(f"{self._faiss_index.ntotal} vectors in faiss index")
97+
logger.info(f"{self._faiss_index.ntotal} vectors in faiss index")
9698
self._document_chunks = document_chunks # save the input document chunks
9799

98100
super().__init__(k=k)
@@ -101,9 +103,9 @@ def _dump_raw_results(self, queries, index_list, distance_list) -> None:
101103
for i in range(len(queries)):
102104
indices = index_list[i]
103105
distances = distance_list[i]
104-
dspy.logger.debug(f"Query: {queries[i]}")
106+
logger.debug(f"Query: {queries[i]}")
105107
for j in range(len(indices)):
106-
dspy.logger.debug(f" Hit {j} = {indices[j]}/{distances[j]}: {self._document_chunks[indices[j]]}")
108+
logger.debug(f" Hit {j} = {indices[j]}/{distances[j]}: {self._document_chunks[indices[j]]}")
107109
return
108110

109111
def forward(self, query_or_queries: Union[str, list[str]], k: Optional[int] = None, **kwargs) -> dspy.Prediction:

dspy/teleprompt/bootstrap.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,14 @@
1-
import dspy
2-
import tqdm
1+
import logging
32
import random
43
import threading
5-
64
from typing import Dict, Optional
7-
from .vanilla import LabeledFewShot
5+
6+
import tqdm
7+
8+
import dspy
9+
810
from .teleprompt import Teleprompter
11+
from .vanilla import LabeledFewShot
912

1013
# TODO: metrics should return an object with __bool__ basically, but fine if they're more complex.
1114
# They can also be sortable.
@@ -28,6 +31,8 @@
2831

2932
# TODO: Add baselines=[...]
3033

34+
logger = logging.getLogger(__name__)
35+
3136
class BootstrapFewShot(Teleprompter):
3237
def __init__(
3338
self,
@@ -207,7 +212,7 @@ def _bootstrap_one_example(self, example, round_idx=0):
207212
current_error_count = self.error_count
208213
if current_error_count >= self.max_errors:
209214
raise e
210-
dspy.logger.error(f"Failed to run or to evaluate example {example} with {self.metric} due to {e}.")
215+
logger.error(f"Failed to run or to evaluate example {example} with {self.metric} due to {e}.")
211216

212217
if success:
213218
for step in trace:

0 commit comments

Comments
 (0)