Skip to content

Commit f09d070

Browse files
committed
adds llama-guard
1 parent f33020b commit f09d070

File tree

1 file changed

+120
-0
lines changed

1 file changed

+120
-0
lines changed
Lines changed: 120 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
{
2+
"id": "cc80437b-9a8d-4f1a-9c77-9aaf0d226922",
3+
"source": 1,
4+
"name": "@cf/meta/llama-guard-3-8b",
5+
"description": "Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification) and in LLM responses (response classification). It acts as an LLM – it generates text in its output that indicates whether a given prompt or response is safe or unsafe, and if unsafe, it also lists the content categories violated.",
6+
"task": {
7+
"id": "c329a1f9-323d-4e91-b2aa-582dd4188d34",
8+
"name": "Text Generation",
9+
"description": "Family of generative text models, such as large language models (LLM), that can be adapted for a variety of natural language tasks."
10+
},
11+
"tags": [],
12+
"properties": [],
13+
"schema": {
14+
"input": {
15+
"type": "object",
16+
"properties": {
17+
"messages": {
18+
"type": "array",
19+
"description": "An array of message objects representing the conversation history.",
20+
"items": {
21+
"type": "object",
22+
"properties": {
23+
"role": {
24+
"type": "string",
25+
"description": "The role of the message sender (e.g., 'user', 'assistant', 'system', 'tool')."
26+
},
27+
"content": {
28+
"type": "string",
29+
"maxLength": 131072,
30+
"description": "The content of the message as a string."
31+
}
32+
},
33+
"required": [
34+
"role",
35+
"content"
36+
]
37+
}
38+
},
39+
"max_tokens": {
40+
"type": "integer",
41+
"default": 256,
42+
"description": "The maximum number of tokens to generate in the response."
43+
},
44+
"temperature": {
45+
"type": "number",
46+
"default": 0.6,
47+
"minimum": 0,
48+
"maximum": 5,
49+
"description": "Controls the randomness of the output; higher values produce more random results."
50+
},
51+
"response_format": {
52+
"type": "object",
53+
"description": "Dictate the output format of the generated response.",
54+
"properties": {
55+
"type": {
56+
"type": "string",
57+
"description": "Set to json_object to process and output generated text as JSON."
58+
}
59+
}
60+
}
61+
},
62+
"required": [
63+
"messages"
64+
]
65+
},
66+
"output": {
67+
"type": "object",
68+
"contentType": "application/json",
69+
"properties": {
70+
"response": {
71+
"oneOf": [
72+
{
73+
"type": "string",
74+
"description": "The generated text response from the model."
75+
},
76+
{
77+
"type": "object",
78+
"description": "The json response parsed from the generated text response from the model.",
79+
"properties": {
80+
"safe": {
81+
"type": "boolean",
82+
"description": "Whether the conversation is safe or not."
83+
},
84+
"categories": {
85+
"type": "array",
86+
"description": "A list of what hazard categories predicted for the conversation, if the conversation is deemed unsafe.",
87+
"items": {
88+
"type": "string",
89+
"description": "Hazard category classname, from S1 to S14."
90+
}
91+
}
92+
}
93+
}
94+
]
95+
},
96+
"usage": {
97+
"type": "object",
98+
"description": "Usage statistics for the inference request",
99+
"properties": {
100+
"prompt_tokens": {
101+
"type": "number",
102+
"description": "Total number of tokens in input",
103+
"default": 0
104+
},
105+
"completion_tokens": {
106+
"type": "number",
107+
"description": "Total number of tokens in output",
108+
"default": 0
109+
},
110+
"total_tokens": {
111+
"type": "number",
112+
"description": "Total number of input and output tokens",
113+
"default": 0
114+
}
115+
}
116+
}
117+
}
118+
}
119+
}
120+
}

0 commit comments

Comments
 (0)