From 1e68fc2c7c244e52ef9586a3c09e0860807687c0 Mon Sep 17 00:00:00 2001 From: artste Date: Sun, 8 Oct 2023 10:11:08 +0200 Subject: [PATCH] Downloading GGML model from Huggingface hub Downloading `llama-2-7b-chat.Q4_K_M.gguf` from Huggingface when needed instead of relying on the pre-downloaded copy. --- lm-hackers.ipynb | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lm-hackers.ipynb b/lm-hackers.ipynb index 12795c1..5b126a9 100644 --- a/lm-hackers.ipynb +++ b/lm-hackers.ipynb @@ -2513,7 +2513,15 @@ } ], "source": [ - "llm = Llama(model_path=\"/home/jhoward/git/llamacpp/llama-2-7b-chat.Q4_K_M.gguf\")" + "# Download model from HF hub\n", + "from huggingface_hub import hf_hub_download; \n", + "downloaded_model_path = hf_hub_download(repo_id='TheBloke/Llama-2-7b-Chat-GGUF',\n", + " filename='llama-2-7b-chat.Q4_K_M.gguf',\n", + " use_auth_token=True\n", + " )\n", + "\n", + "# Create model\n", + "llm = Llama(model_path=downloaded_model_path)" ] }, {