Skip to content

Commit 62efa71

Browse files
Update generative-proof-of-concept-CPU-preprocessing-in-memory.py
Syntax / naming error.
1 parent 3a97f0e commit 62efa71

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

generative-proof-of-concept-CPU-preprocessing-in-memory.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1092,7 +1092,7 @@ def test_text(test_prompt: str, max_new_tokens: int, sample_number: int, result:
10921092
- max_new_tokens: int, number of tokens to generate unless we generate a stop token.
10931093
- sample_number: Metadata for sample...
10941094
- result: Perplexity score from this run
1095-
- result cutoff: Perplexity score that would be expected to indicate a trial worth running this pn
1095+
- result_cutoff: Perplexity score that would be expected to indicate a trial worth running this pn
10961096
10971097
"""
10981098
response1 = response = complete_text_greedy(text=test_prompt, max_new_tokens=max_new_tokens)
@@ -1116,7 +1116,7 @@ def test_text(test_prompt: str, max_new_tokens: int, sample_number: int, result:
11161116

11171117
counter = 0
11181118
for sample in prompt_samples:
1119-
test_text(test_prompt=sample, max_new_tokens=MAX_NEW_TOKENS, sample_number= counter, result=result, result cutoff = RESULT_CUTOFF)
1119+
test_text(test_prompt=sample, max_new_tokens=MAX_NEW_TOKENS, sample_number= counter, result=result, result_cutoff = RESULT_CUTOFF)
11201120

11211121
# # Tokenize the text without padding first to get actual tokens
11221122
# sample_tokenized = tokenizer(

0 commit comments

Comments
 (0)