Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 58 additions & 0 deletions src/content/workers-ai-models/embeddinggemma-300m.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
{
"id": "d2f07a41-c152-4061-8083-ec655cbf91a1",
"source": 1,
"name": "@cf/google/embeddinggemma-300m",
"description": "EmbeddingGemma is a 300M parameter, state-of-the-art for its size, open embedding model from Google, built from Gemma 3 (with T5Gemma initialization) and the same research and technology used to create Gemini models. EmbeddingGemma produces vector representations of text, making it well-suited for search and retrieval tasks, including classification, clustering, and semantic similarity search. This model was trained with data in 100+ spoken languages.",
"task": {
"id": "0137cdcf-162a-4108-94f2-1ca59e8c65ee",
"name": "Text Embeddings",
"description": "Feature extraction models transform raw data into numerical features that can be processed while preserving the information in the original dataset. These models are ideal as part of building vector search applications or Retrieval Augmented Generation workflows with Large Language Models (LLM)."
},
"created_at": "2025-09-03 20:48:38.784",
"tags": [],
"properties": [],
"schema": {
"input": {
"type": "object",
"properties": {
"text": {
"oneOf": [{
"type": "string"
}, {
"type": "array",
"items": {
"type": "string"
}
}],
"description": "Input text to embed. Can be a single string or a list of strings."
}
},
"required": ["text"]
},
"output": {
"type": "object",
"properties": {
"data": {
"type": "array",
"items": {
"type": "array",
"items": {
"type": "number"
}
},
"description": "Embedding vectors, where each vector is a list of floats."
},
"shape": {
"type": "array",
"items": {
"type": "integer"
},
"minItems": 2,
"maxItems": 2,
"description": "Shape of the embedding data as [number_of_embeddings, embedding_dimension]."
}
},
"required": ["data", "shape"]
}
}
}
15 changes: 10 additions & 5 deletions src/content/workers-ai-models/lucid-origin.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,6 @@
"created_at": "2025-08-25 19:21:28.770",
"tags": [],
"properties": [
{
"property_id": "partner",
"value": "true"
},
{
"property_id": "price",
"value": [
Expand All @@ -29,6 +25,10 @@
"currency": "USD"
}
]
},
{
"property_id": "partner",
"value": "true"
}
],
"schema": {
Expand Down Expand Up @@ -68,7 +68,12 @@
},
"num_steps": {
"type": "integer",
"default": 4,
"minimum": 1,
"maximum": 40,
"description": "The number of diffusion steps; higher values can improve quality but take longer"
},
"steps": {
"type": "integer",
"minimum": 1,
"maximum": 40,
"description": "The number of diffusion steps; higher values can improve quality but take longer"
Expand Down
22 changes: 21 additions & 1 deletion src/content/workers-ai-models/nova-3.json
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@
"type": "string",
"description": "Arbitrary key-value pairs that are attached to the API response for usage in downstream processing"
},
"filter_words": {
"filler_words": {
"type": "boolean",
"description": "Filler Words can help transcribe interruptions in your audio, like 'uh' and 'um'"
},
Expand Down Expand Up @@ -195,6 +195,26 @@
"utt_split": {
"type": "number",
"description": "Seconds to wait before detecting a pause between words in submitted audio."
},
"channels": {
"type": "number",
"description": "The number of channels in the submitted audio"
},
"interim_results": {
"type": "boolean",
"description": "Specifies whether the streaming endpoint should provide ongoing transcription updates as more audio is received. When set to true, the endpoint sends continuous updates, meaning transcription results may evolve over time. Note: Supported only for webosockets."
},
"endpointing": {
"type": "string",
"description": "Indicates how long model will wait to detect whether a speaker has finished speaking or pauses for a significant period of time. When set to a value, the streaming endpoint immediately finalizes the transcription for the processed time range and returns the transcript with a speech_final parameter set to true. Can also be set to false to disable endpointing"
},
"vad_events": {
"type": "boolean",
"description": "Indicates that speech has started. You'll begin receiving Speech Started messages upon speech starting. Note: Supported only for webosockets."
},
"utterance_end_ms": {
"type": "boolean",
"description": "Indicates how long model will wait to send an UtteranceEnd message after a word has been transcribed. Use with interim_results. Note: Supported only for webosockets."
}
},
"required": [
Expand Down
4 changes: 4 additions & 0 deletions src/content/workers-ai-models/smart-turn-v2.json
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@
"currency": "USD"
}
]
},
{
"property_id": "realtime",
"value": "true"
}
],
"schema": {
Expand Down