1111 echo " Repository already exists. Skipping clone."
1212fi
1313
14+ # Declare a regular array to store results
15+ results=()
1416
1517run_conversion_and_inference_lora () {
1618 local model_name=$1
@@ -46,23 +48,31 @@ run_conversion_and_inference_lora() {
4648 OUTPUT_LORA_MERGED=$( llama-cli -m reduce-llms-for-testing/$model_name /size=$size_matrix /base/Base-$model_size_mb -F32-lora-merged.gguf \
4749 -p " <bos>I see a little silhouetto" -n 50 --seed 42)
4850
49-
50- # Echo the outputs with bullet points and spacing
51- echo -e " \n\n\n\033[1mResults:\033[0m"
52- echo -e " \n • \033[32mBase:\n $OUTPUT_BASE " # Green color for "BASE"
53- echo -e " \n • \033[34mLora hot:\n $OUTPUT_LORA_HOT " # Blue color for "Lora hot"
54- echo -e " \n • \033[36mLora merged:\n $OUTPUT_LORA_MERGED " # Cyan color for "Lora merged"
55- echo -e " \n\n\n \033[0m"
56-
51+ # Store the results in the regular array
52+ results+=( "
53+ \n\n\n\033[1mResults for $model_name with size $size_matrix and model size $model_size_mb :\033[0m
54+ \n • \033[32mBase:\n $OUTPUT_BASE
55+ \n • \033[34mLora hot:\n $OUTPUT_LORA_HOT
56+ \n • \033[36mLora merged:\n $OUTPUT_LORA_MERGED
57+ \n\n\n \033[0m
58+ " )
5759
5860 echo " All steps completed for $model_name with size $size_matrix and model size $model_size_mb !"
5961}
6062
63+ # Array of parameters to iterate over
6164declare -a params=(
6265 " Gemma2ForCausalLM 64 19M"
6366 # "AnotherModel 128 25M"
6467)
6568
69+ # Loop through each set of parameters
6670for param in " ${params[@]} " ; do
6771 run_conversion_and_inference_lora $param
68- done
72+ done
73+
74+ # Print all collected results after the loop completes
75+ echo -e " \n\n\033[1mSummary of All Results:\033[0m"
76+ for result in " ${results[@]} " ; do
77+ echo -e " $result "
78+ done
0 commit comments