You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: README.md
+12-12Lines changed: 12 additions & 12 deletions
Display the source diff
Display the rich diff
Original file line number
Diff line number
Diff line change
@@ -27,7 +27,7 @@ await uploadFile({
27
27
}
28
28
});
29
29
30
-
// Use HF Inference API, or external Inference Providers!
30
+
// Use all supported Inference Providers!
31
31
32
32
awaitinference.chatCompletion({
33
33
model: "meta-llama/Llama-3.1-8B-Instruct",
@@ -55,7 +55,7 @@ await inference.textToImage({
55
55
56
56
This is a collection of JS libraries to interact with the Hugging Face API, with TS types included.
57
57
58
-
-[@huggingface/inference](packages/inference/README.md): Use HF Inference API (serverless), Inference Endpoints (dedicated) and all supported Inference Providers to make calls to 100,000+ Machine Learning models
58
+
-[@huggingface/inference](packages/inference/README.md): Use all supported (serverless) Inference Providers or switch to Inference Endpoints (dedicated) to make calls to 100,000+ Machine Learning models
59
59
-[@huggingface/hub](packages/hub/README.md): Interact with huggingface.co to create or delete repos and commit / download files
60
60
-[@huggingface/agents](packages/agents/README.md): Interact with HF models through a natural language interface
61
61
-[@huggingface/gguf](packages/gguf/README.md): A GGUF parser that works on remotely hosted files.
@@ -128,18 +128,18 @@ import { InferenceClient } from "@huggingface/inference";
gguf: "{# Alias tools -> available_tools #}\n{%- if tools and not available_tools -%}\n {%- set available_tools = tools -%}\n{%- endif -%}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n {%- else %}\n {%- set system_message = \" Knowledge Cutoff Date: April 2024.\n Today's Date: \" + strftime_now('%B %d, %Y') + \". You are Granite, developed by IBM.\" %}\n {%- if available_tools and documents %}\n {%- set system_message = system_message + \" You are a helpful assistant with access to the following tools. When a tool is required to answer the user's query, respond only with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request. \nWrite the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data.\" %}\n {%- elif available_tools %}\n {%- set system_message = system_message + \" You are a helpful assistant with access to the following tools. When a tool is required to answer the user's query, respond only with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request.\" %}\n {%- elif documents %}\n {%- set system_message = system_message + \" Write the response to the user's input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data.\" %}\n {%- elif thinking %}\n {%- set system_message = system_message + \" You are a helpful AI assistant.\nRespond to every user query in a comprehensive and detailed way. You can write down your thoughts and reasoning process before responding. In the thought process, engage in a comprehensive cycle of analysis, summarization, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. In the response section, based on various attempts, explorations, and reflections from the thoughts section, systematically present the final solution that you deem correct. The response should summarize the thought process. Write your thoughts between <think></think> and write your response between <response></response> for each user query.\" %}\n {%- else %}\n {%- set system_message = system_message + \" You are a helpful AI assistant.\" %}\n {%- endif %}\n {%- if 'citations' in controls and documents %}\n {%- set system_message = system_message + ' \nUse the symbols <|start_of_cite|> and <|end_of_cite|> to indicate when a fact comes from a document in the search result, e.g <|start_of_cite|> {document_id: 1}my fact <|end_of_cite|> for a fact from document 1. Afterwards, list all the citations with their corresponding documents in an ordered list.' %}\n {%- endif %}\n {%- if 'hallucinations' in controls and documents %}\n {%- set system_message = system_message + ' \nFinally, after the response is written, include a numbered list of sentences from the response with a corresponding risk value that are hallucinated and not based in the documents.' %}\n {%- endif %}\n {%- set loop_messages = messages %}\n {%- endif %}\n {{- '<|start_of_role|>system<|end_of_role|>' + system_message + '<|end_of_text|>\n' }}\n {%- if available_tools %}\n {{- '<|start_of_role|>available_tools<|end_of_role|>' }}\n {{- available_tools | tojson(indent=4) }}\n {{- '<|end_of_text|>\n' }}\n {%- endif %}\n {%- if documents %}\n {%- for document in documents %}\n {{- '<|start_of_role|>document {\"document_id\": \"' + document['doc_id'] | string + '\"}<|end_of_role|>\n' }}\n {{- document['text'] }}\n {{- '<|end_of_text|>\n' }}\n {%- endfor %}\n {%- endif %}\n {%- for message in loop_messages %}\n {{- '<|start_of_role|>' + message['role'] + '<|end_of_role|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- if loop.last and add_generation_prompt %}\n {{- '<|start_of_role|>assistant' }}\n {%- if controls %}\n {{- ' ' + controls | tojson()}}\n {%- endif %}\n {{- '<|end_of_role|>' }}\n {%- endif %}\n {%- endfor %}",
554
+
ollama: {
555
+
template:
556
+
'{{- /*\n\n------ MESSAGE PARSING ------\n\n*/}}\n{{- /*\nDeclare the prompt structure variables to be filled in from messages\n*/}}\n{{- $system := "" }}\n{{- $documents := "" }}\n{{- $documentCounter := 0 }}\n{{- $thinking := false }}\n{{- $citations := false }}\n{{- $hallucinations := false }}\n{{- $length := "" }}\n{{- $originality := "" }}\n\n{{- /*\nLoop over messages and look for a user-provided system message and documents\n*/ -}}\n{{- range .Messages }}\n\n {{- /* User defined system prompt(s) */}}\n {{- if (eq .Role "system")}}\n {{- if (ne $system "") }}\n {{- $system = print $system "\\n\\n" }}\n {{- end}}\n {{- $system = print $system .Content }}\n {{- end}}\n\n {{- /*\n NOTE: Since Ollama collates consecutive roles, for control and documents, we\n work around this by allowing the role to contain a qualifier after the\n role string.\n */ -}}\n\n {{- /* Role specified controls */ -}}\n {{- if (and (ge (len .Role) 7) (eq (slice .Role 0 7) "control")) }}\n {{- if (eq .Content "thinking")}}{{- $thinking = true }}{{- end}}\n {{- if (eq .Content "citations")}}{{- $citations = true }}{{- end}}\n {{- if (eq .Content "hallucinations")}}{{- $hallucinations = true }}{{- end}}\n {{- if (and (ge (len .Content) 7) (eq (slice .Content 0 7) "length "))}}\n {{- $length = slice .Content 7 }}\n {{- end}}\n {{- if (and (ge (len .Content) 12) (eq (slice .Content 0 12) "originality "))}}\n {{- $originality = slice .Content 12 }}\n {{- end}}\n {{- end}}\n\n {{- /* Role specified document */ -}}\n {{- if (and (ge (len .Role) 8) (eq (slice .Role 0 8) "document")) }}\n {{- if (ne $documentCounter 0)}}\n {{- $documents = print $documents "\\n\\n"}}\n {{- end}}\n {{- $identifier := ""}}\n {{- if (ge (len .Role) 9) }}\n {{- $identifier = (slice .Role 9)}}\n {{- end}}\n {{- if (eq $identifier "") }}\n {{- $identifier := print $documentCounter}}\n {{- end}}\n {{- $documents = print $documents "<|start_of_role|>document {\\"document_id\\": \\"" $identifier "\\"}<|end_of_role|>\\n" .Content "<|end_of_text|>"}}\n {{- $documentCounter = len (printf "a%*s" $documentCounter "")}}\n {{- end}}\n{{- end}}\n\n{{- /*\nIf no user message provided, build the default system message\n*/ -}}\n{{- if eq $system "" }}\n {{- $system = "Knowledge Cutoff Date: April 2024.\\nYou are Granite, developed by IBM."}}\n\n {{- /* Add Tools prompt */}}\n {{- if .Tools }}\n {{- $system = print $system " You are a helpful assistant with access to the following tools. When a tool is required to answer the user\'s query, respond only with <|tool_call|> followed by a JSON list of tools used. If a tool does not exist in the provided list of tools, notify the user that you do not have the ability to fulfill the request." }}\n {{- end}}\n\n {{- /* Add documents prompt */}}\n {{- if $documents }}\n {{- if .Tools }}\n {{- $system = print $system "\\n"}}\n {{- else }}\n {{- $system = print $system " "}}\n {{- end}}\n {{- $system = print $system "Write the response to the user\'s input by strictly aligning with the facts in the provided documents. If the information needed to answer the question is not available in the documents, inform the user that the question cannot be answered based on the available data." }}\n {{- if $citations}}\n {{- $system = print $system "\\nUse the symbols <|start_of_cite|> and <|end_of_cite|> to indicate when a fact comes from a document in the search result, e.g <|start_of_cite|> {document_id: 1}my fact <|end_of_cite|> for a fact from document 1. Afterwards, list all the citations with their corresponding documents in an ordered list."}}\n {{- end}}\n {{- if $hallucinations}}\n {{- $system = print $system "\\nFinally, after the response is written, include a numbered list of sentences from the response with a corresponding risk value that are hallucinated and not based in the documents."}}\n {{- end}}\n {{- end}}\n\n {{- /* Prompt without tools or documents */}}\n {{- if (and (not .Tools) (not $documents)) }}\n {{- $system = print $system " You are a helpful AI assistant."}}\n {{- if $thinking}}\n {{- $system = print $system "\\nRespond to every user query in a comprehensive and detailed way. You can write down your thoughts and reasoning process before responding. In the thought process, engage in a comprehensive cycle of analysis, summarization, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. In the response section, based on various attempts, explorations, and reflections from the thoughts section, systematically present the final solution that you deem correct. The response should summarize the thought process. Write your thoughts between <think></think> and write your response between <response></response> for each user query."}}\n {{- end}}\n {{- end}}\n\n{{- end}}\n{{- /*\n\n------ TEMPLATE EXPANSION ------\n\n*/}}\n{{- /* System Prompt */ -}}\n<|start_of_role|>system<|end_of_role|>{{- $system }}<|end_of_text|>\n\n{{- /* Tools */ -}}\n{{- if .Tools }}\n<|start_of_role|>available_tools<|end_of_role|>[\n{{- range $index, $_ := .Tools }}\n{{ . }}\n{{- if and (ne (len (slice $.Tools $index)) 1) (gt (len $.Tools) 1) }},\n{{- end}}\n{{- end }}\n]<|end_of_text|>\n{{- end}}\n\n{{- /* Documents */ -}}\n{{- if $documents }}\n{{ $documents }}\n{{- end}}\n\n{{- /* Standard Messages */}}\n{{- range $index, $_ := .Messages }}\n{{- if (and\n (ne .Role "system")\n (or (lt (len .Role) 7) (ne (slice .Role 0 7) "control"))\n (or (lt (len .Role) 8) (ne (slice .Role 0 8) "document"))\n)}}\n<|start_of_role|>\n{{- if eq .Role "tool" }}tool_response\n{{- else }}{{ .Role }}\n{{- end }}<|end_of_role|>\n{{- if .Content }}{{ .Content }}\n{{- else if .ToolCalls }}<|tool_call|>\n{{- range .ToolCalls }}{"name": "{{ .Function.Name }}", "arguments": {{ .Function.Arguments }}}\n{{- end }}\n{{- end }}\n{{- if eq (len (slice $.Messages $index)) 1 }}\n{{- if eq .Role "assistant" }}\n{{- else }}<|end_of_text|>\n<|start_of_role|>assistant\n{{- if and (ne $length "") (ne $originality "") }} {"length": "{{ $length }}", "originality": "{{ $originality }}"}\n{{- else if ne $length "" }} {"length": "{{ $length }}"}\n{{- else if ne $originality "" }} {"originality": "{{ $originality }}"}\n{{- end }}<|end_of_role|>\n{{- end -}}\n{{- else }}<|end_of_text|>\n{{- end }}\n{{- end }}\n{{- end }}',
557
+
tokens: [
558
+
"<|tool_call|>",
559
+
"<think>",
560
+
"<response>",
561
+
"<|start_of_cite|>",
562
+
"<|end_of_cite|>",
563
+
"<|start_of_role|>",
564
+
"<|end_of_role|>",
565
+
"<|end_of_text|>",
566
+
],
567
+
},
568
+
},
557
569
{
558
570
model: "library/hermes3:70b",
559
571
gguf: "{{bos_token}}{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
0 commit comments