@@ -26,23 +26,23 @@ def test_launch_command_success(runner):
2626 # for Rich table rendering
2727 mock_response = MagicMock ()
2828 mock_response .config = {
29- "slurm_job_id" : "14933053" ,
30- "model_name" : "Meta-Llama-3.1-8B" ,
31- "model_type" : "LLM" ,
32- "log_dir" : "/tmp/test_logs" ,
33- "partition" : "gpu" ,
34- "qos" : "normal" ,
35- "time" : "1:00:00" ,
36- "num_nodes" : "1" ,
37- "gpus_per_node" : "1" ,
38- "cpus_per_task" : "8" ,
39- "mem_per_node" : "32G" ,
40- "model_weights_parent_dir" : "/model-weights" ,
41- "vocab_size" : "128000" ,
42- "venv" : "/path/to/venv" ,
43- "vllm_args" : {"max_model_len" : 8192 },
44- "env" : {"CACHE" : "/cache" },
45- }
29+ "slurm_job_id" : "14933053" ,
30+ "model_name" : "Meta-Llama-3.1-8B" ,
31+ "model_type" : "LLM" ,
32+ "log_dir" : "/tmp/test_logs" ,
33+ "partition" : "gpu" ,
34+ "qos" : "normal" ,
35+ "time" : "1:00:00" ,
36+ "num_nodes" : "1" ,
37+ "gpus_per_node" : "1" ,
38+ "cpus_per_task" : "8" ,
39+ "mem_per_node" : "32G" ,
40+ "model_weights_parent_dir" : "/model-weights" ,
41+ "vocab_size" : "128000" ,
42+ "venv" : "/path/to/venv" ,
43+ "vllm_args" : {"max_model_len" : 8192 },
44+ "env" : {"CACHE" : "/cache" },
45+ }
4646 mock_client .launch_model .return_value = mock_response
4747
4848 result = runner .invoke (cli , ["launch" , "Meta-Llama-3.1-8B" ])
0 commit comments