Skip to content

Commit ceb296a

Browse files
author
Lorenzo Toniazzi
committed
New base prompt
1 parent 943baac commit ceb296a

File tree

1 file changed

+3
-4
lines changed

1 file changed

+3
-4
lines changed

tests/test_lora_conversion_and_inference.sh

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ results=()
1818
run_conversion_and_inference_lora() {
1919
local model_name=$1
2020
local size_matrix=$2
21-
local bos_token=$3
2221

2322
# Convert safetensors to gguf
2423
echo "Running convert_hf_to_gguf.py for $model_name with size $size_matrix..."
@@ -40,7 +39,7 @@ run_conversion_and_inference_lora() {
4039
# Run inference
4140
echo "Running llama-cli without lora for $model_name with size $size_matrix..."
4241
OUTPUT_BASE=$(llama-cli -m $MODELS_REPO/$model_name/size=$size_matrix/base/Base-F32.gguf \
43-
-p "When forty winters shall besiege" -n 50 --seed 42)
42+
-p "Look again at that dot." -n 50 --seed 42)
4443

4544
echo "Running llama-cli with lora for $model_name with size $size_matrix..."
4645
OUTPUT_LORA_HOT=$(llama-cli -m $MODELS_REPO/$model_name/size=$size_matrix/base/Base-F32.gguf \
@@ -65,8 +64,8 @@ run_conversion_and_inference_lora() {
6564

6665
# Array of parameters to iterate over
6766
declare -a params=(
68-
"Gemma2ForCausalLM 64 <bos>"
69-
"LlamaForCausalLM 64 <|begin_of_text|>"
67+
"Gemma2ForCausalLM 64"
68+
"LlamaForCausalLM 64"
7069
)
7170

7271
# Loop through each set of parameters

0 commit comments

Comments
 (0)