@@ -1095,16 +1095,17 @@ def test_text(test_prompt: str, max_new_tokens: int, sample_number: int, result:
10951095 - result_cutoff: Perplexity score that would be expected to indicate a trial worth running this pn
10961096
10971097 """
1098- response1 = response = complete_text_greedy (text = test_prompt , max_new_tokens = max_new_tokens )
1099- print (f"Sample { sample_number } : I ask the generator (greedy): { test_prompt } ... It responds: '{ response1 } '." )
1100- response_2 = complete_text_beam (text = test_prompt , max_new_tokens = max_new_tokens )
1101- print (f"Sample { sample_number } : I ask the generator (Beam defaults - max_new_tokens: 10, temperature: 0.75, top_k: 75, top_p: 0.98, repetition_penalty: None, presence_penalty: 1.3, frequency_penalty: 1.4): { test_prompt } ... It responds: '{ response_2 } '." )
1102- response_3 = complete_text_beam (text = test_prompt , max_new_tokens = max_new_tokens , temperature = 0.6 , top_k = 75 , top_p = 0.98 , repetition_penalty = None , presence_penalty = 1.3 , frequency_penalty = 1.4 )
1103- print (f"Sample { sample_number } : I ask the generator (Beam: - max_new_tokens: 10, temperature=0.6, top_k=75, top_p=0.98, repetition_penalty=None, presence_penalty = 1.3, frequency_penalty = 1.4): { test_prompt } ... It responds: '{ response_3 } '." )
1104- response_4 = complete_text_beam (text = test_prompt , max_new_tokens = max_new_tokens , temperature = 0.7 , top_k = 75 , top_p = 0.98 , repetition_penalty = None , presence_penalty = 1.3 , frequency_penalty = 1.4 )
1105- print (f"Sample { sample_number } : I ask the generator (Beam: - max_new_tokens: 10, temperature=0.7, top_k=75, top_p=0.98, repetition_penalty=None, presence_penalty = 1.3, frequency_penalty = 1.4): { test_prompt } ... It responds: '{ response_4 } '." )
1106- response_5 = complete_text_beam (text = test_prompt , max_new_tokens = max_new_tokens , temperature = 0.7 , top_k = 75 , top_p = 0.97 , repetition_penalty = None , presence_penalty = 1.3 , frequency_penalty = 1.4 )
1107- print (f"Sample { sample_number } : I ask the generator (Beam: - max_new_tokens: 10, temperature=0.7, top_k=75, top_p=0.97, repetition_penalty=None, presence_penalty = 1.3, frequency_penalty = 1.4): { test_prompt } ... It responds: '{ response_5 } '." )
1098+ if result < result_cutoff :
1099+ response1 = response = complete_text_greedy (text = test_prompt , max_new_tokens = max_new_tokens )
1100+ print (f"Sample { sample_number } : I ask the generator (greedy): { test_prompt } ... It responds: '{ response1 } '." )
1101+ response_2 = complete_text_beam (text = test_prompt , max_new_tokens = max_new_tokens )
1102+ print (f"Sample { sample_number } : I ask the generator (Beam defaults - max_new_tokens: 10, temperature: 0.75, top_k: 75, top_p: 0.98, repetition_penalty: None, presence_penalty: 1.3, frequency_penalty: 1.4): { test_prompt } ... It responds: '{ response_2 } '." )
1103+ response_3 = complete_text_beam (text = test_prompt , max_new_tokens = max_new_tokens , temperature = 0.6 , top_k = 75 , top_p = 0.98 , repetition_penalty = None , presence_penalty = 1.3 , frequency_penalty = 1.4 )
1104+ print (f"Sample { sample_number } : I ask the generator (Beam: - max_new_tokens: 10, temperature=0.6, top_k=75, top_p=0.98, repetition_penalty=None, presence_penalty = 1.3, frequency_penalty = 1.4): { test_prompt } ... It responds: '{ response_3 } '." )
1105+ response_4 = complete_text_beam (text = test_prompt , max_new_tokens = max_new_tokens , temperature = 0.7 , top_k = 75 , top_p = 0.98 , repetition_penalty = None , presence_penalty = 1.3 , frequency_penalty = 1.4 )
1106+ print (f"Sample { sample_number } : I ask the generator (Beam: - max_new_tokens: 10, temperature=0.7, top_k=75, top_p=0.98, repetition_penalty=None, presence_penalty = 1.3, frequency_penalty = 1.4): { test_prompt } ... It responds: '{ response_4 } '." )
1107+ response_5 = complete_text_beam (text = test_prompt , max_new_tokens = max_new_tokens , temperature = 0.7 , top_k = 75 , top_p = 0.97 , repetition_penalty = None , presence_penalty = 1.3 , frequency_penalty = 1.4 )
1108+ print (f"Sample { sample_number } : I ask the generator (Beam: - max_new_tokens: 10, temperature=0.7, top_k=75, top_p=0.97, repetition_penalty=None, presence_penalty = 1.3, frequency_penalty = 1.4): { test_prompt } ... It responds: '{ response_5 } '." )
11081109
11091110
11101111 prompt_samples = [
0 commit comments