Skip to content

Commit b25381e

Browse files
committed
Better name
1 parent 6af11ba commit b25381e

File tree

3 files changed

+17
-17
lines changed

3 files changed

+17
-17
lines changed

guidance/models/_guidance_engine_metrics.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,5 @@
22

33

44
class GuidanceEngineMetrics(BaseModel):
5-
model_input_tokens: NonNegativeInt = 0
6-
model_output_tokens: NonNegativeInt = 0
5+
engine_input_tokens: NonNegativeInt = 0
6+
engine_output_tokens: NonNegativeInt = 0

guidance/models/transformers/_transformers.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -269,8 +269,8 @@ def get_logits(self, token_ids, forced_bytes, current_temp):
269269
self._cached_logits = (
270270
model_out.logits[0, -1, : len(self.tokenizer.tokens)].cpu().numpy()
271271
)
272-
self.metrics.model_input_tokens += len(new_token_ids)
273-
self.metrics.model_output_tokens += 1
272+
self.metrics.engine_input_tokens += len(new_token_ids)
273+
self.metrics.engine_output_tokens += 1
274274

275275
return self._cached_logits
276276

tests/library/test_gen.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -83,21 +83,21 @@ def test_metrics_smoke(selected_model: models.Model):
8383
print(f"{lm.engine.metrics=}")
8484
# Can't be sure of exact count due to token healing
8585
assert (
86-
lm.engine.metrics.model_output_tokens == 1
87-
or lm.engine.metrics.model_output_tokens == 2
86+
lm.engine.metrics.engine_output_tokens == 1
87+
or lm.engine.metrics.engine_output_tokens == 2
8888
)
89-
assert lm.engine.metrics.model_input_tokens > 1
89+
assert lm.engine.metrics.engine_input_tokens > 1
9090

9191
lm += "fg"
9292
lm += gen("second", max_tokens=1)
9393
# Again, trouble with healing
9494
assert (
95-
lm.engine.metrics.model_output_tokens == 1
96-
or lm.engine.metrics.model_output_tokens == 2
95+
lm.engine.metrics.engine_output_tokens == 1
96+
or lm.engine.metrics.engine_output_tokens == 2
9797
)
9898
assert (
99-
lm.engine.metrics.model_output_tokens >= 2
100-
or lm.engine.metrics.model_output_tokens <= 4
99+
lm.engine.metrics.engine_output_tokens >= 2
100+
or lm.engine.metrics.engine_output_tokens <= 4
101101
)
102102

103103

@@ -115,12 +115,12 @@ def test_metrics_select(selected_model: models.Model):
115115
)
116116
print(f"lm={str(lm)}")
117117
print(f"{lm.engine.metrics=}")
118-
assert lm.engine.metrics.model_input_tokens > 1
119-
assert lm.engine.metrics.model_output_tokens > 0
118+
assert lm.engine.metrics.engine_input_tokens > 1
119+
assert lm.engine.metrics.engine_output_tokens > 0
120120
# Guidance should be able to force the generation after only a couple of tokens
121121
# so even though the options are long, relatively few output tokens should be
122122
# needed
123-
assert lm.engine.metrics.model_input_tokens > lm.engine.metrics.model_output_tokens
123+
assert lm.engine.metrics.engine_input_tokens > lm.engine.metrics.engine_output_tokens
124124

125125

126126
def test_unicode(selected_model):
@@ -140,12 +140,12 @@ def test_unicode2(selected_model: models.Model):
140140
lm.engine.reset_metrics()
141141
prompt = "Janet’s ducks lay 16 eggs per day"
142142
lm += prompt + gen(max_tokens=10)
143-
assert lm.engine.metrics.model_input_tokens > 1
143+
assert lm.engine.metrics.engine_input_tokens > 1
144144
# Due to token healing, we can't be sure of the
145145
# precise output count
146146
assert (
147-
lm.engine.metrics.model_output_tokens == 10
148-
or lm.engine.metrics.model_output_tokens == 11
147+
lm.engine.metrics.engine_output_tokens == 10
148+
or lm.engine.metrics.engine_output_tokens == 11
149149
)
150150

151151

0 commit comments

Comments
 (0)