Skip to content

Commit e05e4a6

Browse files
Update generative-proof-of-concept-CPU-preprocessing-in-memory.py
Make the printing of generated text samples more consistent.
1 parent 843845f commit e05e4a6

File tree

1 file changed

+5
-4
lines changed

1 file changed

+5
-4
lines changed

generative-proof-of-concept-CPU-preprocessing-in-memory.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1164,9 +1164,11 @@ def test_text(test_prompt: str, max_new_tokens: int, sample_number: int, result:
11641164
]
11651165
# Default cases, no params
11661166
response1 = response = complete_text_greedy(text=test_prompt, max_new_tokens=max_new_tokens)
1167-
print(f"Sample {sample_number}: I ask the generator (greedy): {test_prompt}... It responds: '{response1}'.")
1167+
print(f"Trial #: {trial_id} Text Sample #: {test_sample_number} Perplexity: {result_0} GENERATE SAMPLING PARAMS: Greedy max_new_tokens=10 otherwise - N/A: PROMPT: '{test_prompt}' RESPONSE: '{response_1}'")
1168+
# print(f"Sample {sample_number}: I ask the generator (greedy): {test_prompt}... It responds: '{response1}'.")
11681169
response_2 = complete_text_beam(text=test_prompt, max_new_tokens=max_new_tokens)
1169-
print(f"Sample {sample_number}: I ask the generator (Beam defaults - max_new_tokens: 10, temperature: 0.75, top_k: 75, top_p: 0.98, repetition_penalty: None, presence_penalty: 1.3, frequency_penalty: 1.4): {test_prompt}... It responds: '{response_2}'.")
1170+
print(f"Trial #: {trial_id} Text Sample #: {test_sample_number} Perplexity: {result_0} GENERATE PARAMS: Beam Default - max_new_tokens = 10, temperature=0.75, top_k=75, top_p=0.98, repetition_penalty=None, presence_penalty=1.3, frequency_penalty=1.4: PROMPT: '{test_prompt}' RESPONSE: '{response_2}'.")
1171+
# print(f"Sample {sample_number}: I ask the generator (Beam defaults - max_new_tokens: 10, temperature: 0.75, top_k: 75, top_p: 0.98, repetition_penalty: None, presence_penalty: 1.3, frequency_penalty: 1.4): {test_prompt}... It responds: '{response_2}'.")
11701172

11711173
for perm_0 in generation_param_permutations:
11721174
response_0 = complete_text_beam(text=test_prompt,
@@ -1177,14 +1179,13 @@ def test_text(test_prompt: str, max_new_tokens: int, sample_number: int, result:
11771179
repetition_penalty=perm_0['repetition_penalty'],
11781180
presence_penalty=perm_0['presence_penalty'],
11791181
frequency_penalty=perm_0['frequency_penalty'])
1180-
print(f"Trial #: {trial_id} Text Sample #: {test_sample_number} Perplexity: {result_0} GENERATE PARAMS: max_new_tokens: {perm_0['max_new_tokens']} temperature={perm_0['temperature']}, top_k={perm_0['top_k']}, top_p={perm_0['top_p']}, repetition_penalty={perm_0['repetition_penalty']} presence_penalty={perm_0['presence_penalty']} frequency_penalty{perm_0['frequency_penalty']} PROMPT: '{test_prompt}' RESPONSE: '{response_0}'")
1182+
print(f"Trial #: {trial_id} Text Sample #: {test_sample_number} Perplexity: {result_0} GENERATE PARAMS: max_new_tokens={perm_0['max_new_tokens']} temperature={perm_0['temperature']}, top_k={perm_0['top_k']}, top_p={perm_0['top_p']}, repetition_penalty={perm_0['repetition_penalty']} presence_penalty={perm_0['presence_penalty']} frequency_penalty{perm_0['frequency_penalty']} PROMPT: '{test_prompt}' RESPONSE: '{response_0}'")
11811183
#
11821184
# print(f"Sample {sample_number}: I ask the generator (Beam: - max_new_tokens: 10, temperature=0.6, top_k=75, top_p=0.98, repetition_penalty=None, presence_penalty = 1.3, frequency_penalty = 1.4): {test_prompt}... It responds: '{response_3}'.")
11831185
# response_4 = complete_text_beam(text=test_prompt, max_new_tokens=max_new_tokens, temperature=0.7, top_k=75, top_p=0.98, repetition_penalty=None, presence_penalty = 1.3, frequency_penalty = 1.4)
11841186
# print(f"Sample {sample_number}: I ask the generator (Beam: - max_new_tokens: 10, temperature=0.7, top_k=75, top_p=0.98, repetition_penalty=None, presence_penalty = 1.3, frequency_penalty = 1.4): {test_prompt}... It responds: '{response_4}'.")
11851187
# response_5 = complete_text_beam(text=test_prompt, max_new_tokens=max_new_tokens, temperature=0.7, top_k=75, top_p=0.97, repetition_penalty=None, presence_penalty = 1.3, frequency_penalty = 1.4)
11861188
# print(f"Sample {sample_number}: I ask the generator (Beam: - max_new_tokens: 10, temperature=0.7, top_k=75, top_p=0.97, repetition_penalty=None, presence_penalty = 1.3, frequency_penalty = 1.4): {test_prompt}... It responds: '{response_5}'.")
1187-
# Reorder printouts to the format f"Trial #: {trial_id} Text Sample #: {text_sample_number} generate_params temperature={temperature}, top_k={top_k}, top_p={top_p}, presence_penalty={presence_penalty} frequency_penalty{frequency_penalty} PROMPT: {prompt} RESPONSE: {}"
11881189

11891190
# Sample prompts to test:
11901191

0 commit comments

Comments
 (0)