Skip to content

Commit e8964e1

Browse files
authored
Export jinja files as TS module at build time (#1296)
This PR should fix huggingface-internal/moon-landing#13013. It partially removes the structure introduced in #1255. Current problem is that inference snippets are generated in the front-end. Since front-end cannot access file-system and therefore the jinja files, we have to find a workaround. This PR adds a build step which exports all jinja files into a single TS module. I've updated the `package.json` file so that now the snippets code should be available in any environment (both node and browser). cc @coyotte508 who suggested such a solution. Tested it in `@tasks-gen` and "it works" --- For the record, the exported file (not committed in this PR) looks like this: _# packages/inference/src/snippets/templates.exported.ts_ ```ts // Generated file - do not edit directly export const templates: Record<string, Record<string, Record<string, string>>> = { "js": { "fetch": { "basic": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});", "basicAudio": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"audio/flac\"\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});", "basicImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\"\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});", "textToAudio": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ", "textToImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Use image\n});", "zeroShotClassification": "async function query(data) {\n const response = await fetch(\n\t\t\"{{ fullUrl }}\",\n {\n headers: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n \"Content-Type\": \"application/json\",\n },\n method: \"POST\",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: [\"refund\", \"legal\", \"faq\"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});" }, "huggingface.js": { "basic": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst output = await client.{{ methodName }}({\n\tmodel: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tprovider: \"{{ provider }}\",\n});\n\nconsole.log(output);", "basicAudio": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n\tdata,\n\tmodel: \"{{ model.id }}\",\n\tprovider: \"{{ provider }}\",\n});\n\nconsole.log(output);", "basicImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n\tdata,\n\tmodel: \"{{ model.id }}\",\n\tprovider: \"{{ provider }}\",\n});\n\nconsole.log(output);", "conversational": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst chatCompletion = await client.chatCompletion({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);", "conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nlet out = \"\";\n\nconst stream = await client.chatCompletionStream({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t} \n}", "textToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToImage({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tparameters: { num_inference_steps: 5 },\n});\n/// Use the generated image (it's a Blob)", "textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToVideo({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n});\n// Use the generated video (it's a Blob)" }, "openai": { "conversational": "import { OpenAI } from \"openai\";\n\nconst client = new OpenAI({\n\tbaseURL: \"{{ baseUrl }}\",\n\tapiKey: \"{{ accessToken }}\",\n});\n\nconst chatCompletion = await client.chat.completions.create({\n\tmodel: \"{{ providerModelId }}\",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);", "conversationalStream": "import { OpenAI } from \"openai\";\n\nconst client = new OpenAI({\n\tbaseURL: \"{{ baseUrl }}\",\n\tapiKey: \"{{ accessToken }}\",\n});\n\nlet out = \"\";\n\nconst stream = await client.chat.completions.create({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t} \n}" } }, "python": { "fal_client": { "textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\nprint(result)\n{% endif %} " }, "huggingface_hub": { "basic": "result = client.{{ methodName }}(\n inputs={{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n)", "basicAudio": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")", "basicImage": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")", "conversational": "completion = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ", "conversationalStream": "stream = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\") ", "documentQuestionAnswering": "output = client.document_question_answering(\n \"{{ inputs.asObj.image }}\",\n question=\"{{ inputs.asObj.question }}\",\n model=\"{{ model.id }}\",\n) ", "imageToImage": "# output is a PIL.Image object\nimage = client.image_to_image(\n \"{{ inputs.asObj.inputs }}\",\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) ", "importInferenceClient": "from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider=\"{{ provider }}\",\n api_key=\"{{ accessToken }}\",\n)", "textToImage": "# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) ", "textToVideo": "video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) " }, "openai": { "conversational": "from openai import OpenAI\n\nclient = OpenAI(\n base_url=\"{{ baseUrl }}\",\n api_key=\"{{ accessToken }}\"\n)\n\ncompletion = client.chat.completions.create(\n model=\"{{ providerModelId }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ", "conversationalStream": "from openai import OpenAI\n\nclient = OpenAI(\n base_url=\"{{ baseUrl }}\",\n api_key=\"{{ accessToken }}\"\n)\n\nstream = client.chat.completions.create(\n model=\"{{ providerModelId }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\")" }, "requests": { "basic": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n}) ", "basicAudio": "def query(filename):\n with open(filename, \"rb\") as f:\n data = f.read()\n response = requests.post(API_URL, headers={\"Content-Type\": \"audio/flac\", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})", "basicImage": "def query(filename):\n with open(filename, \"rb\") as f:\n data = f.read()\n response = requests.post(API_URL, headers={\"Content-Type\": \"image/jpeg\", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})", "conversational": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ providerInputs.asJsonString }}\n})\n\nprint(response[\"choices\"][0][\"message\"])", "conversationalStream": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b\"data:\"):\n continue\n if line.strip() == b\"data: [DONE]\":\n return\n yield json.loads(line.decode(\"utf-8\").lstrip(\"data:\").rstrip(\"/n\"))\n\nchunks = query({\n{{ providerInputs.asJsonString }},\n \"stream\": True,\n})\n\nfor chunk in chunks:\n print(chunk[\"choices\"][0][\"delta\"][\"content\"], end=\"\")", "documentQuestionAnswering": "def query(payload):\n with open(payload[\"image\"], \"rb\") as f:\n img = f.read()\n payload[\"image\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {\n \"image\": \"{{ inputs.asObj.image }}\",\n \"question\": \"{{ inputs.asObj.question }}\",\n },\n}) ", "imageToImage": "def query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ", "importRequests": "{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = \"{{ fullUrl }}\"\nheaders = {\"Authorization\": \"{{ authorizationHeader }}\"}", "tabular": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n \"inputs\": {\n \"data\": {{ providerInputs.asObj.inputs }}\n },\n}) ", "textToAudio": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ", "textToImage": "{% if provider == \"hf-inference\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}", "zeroShotClassification": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]},\n}) ", "zeroShotImageClassification": "def query(data):\n with open(data[\"image_path\"], \"rb\") as f:\n img = f.read()\n payload={\n \"parameters\": data[\"parameters\"],\n \"inputs\": base64.b64encode(img).decode(\"utf-8\")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"image_path\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"cat\", \"dog\", \"llama\"]},\n}) " } }, "sh": { "curl": { "basic": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }}\n }'", "basicAudio": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: audio/flac' \\\n --data-binary @{{ providerInputs.asObj.inputs }}", "basicImage": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: image/jpeg' \\\n --data-binary @{{ providerInputs.asObj.inputs }}", "conversational": "curl {{ fullUrl }} \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }},\n \"stream\": false\n }'", "conversationalStream": "curl {{ fullUrl }} \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n -d '{\n{{ providerInputs.asCurlString }},\n \"stream\": true\n }'", "zeroShotClassification": "curl {{ fullUrl }} \\\n -X POST \\\n -d '{\"inputs\": {{ providerInputs.asObj.inputs }}, \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]}}' \\\n -H 'Content-Type: application/json' \\\n -H 'Authorization: {{ authorizationHeader }}'" } } } as const; ```
1 parent b6f3c06 commit e8964e1

File tree

5 files changed

+69
-60
lines changed

5 files changed

+69
-60
lines changed

packages/inference/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
src/snippets/templates.exported.ts

packages/inference/package.json

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -26,27 +26,21 @@
2626
},
2727
"files": [
2828
"dist",
29-
"src"
29+
"src",
30+
"!src/snippets/templates/**/*.jinja"
3031
],
3132
"source": "src/index.ts",
3233
"types": "./dist/src/index.d.ts",
3334
"main": "./dist/index.cjs",
3435
"module": "./dist/index.js",
3536
"exports": {
36-
".": {
37-
"types": "./dist/src/index.d.ts",
38-
"require": "./dist/index.cjs",
39-
"import": "./dist/index.js"
40-
}
41-
},
42-
"browser": {
43-
"./src/snippets/index.js": false,
44-
"./dist/index.js": "./dist/browser/index.js",
45-
"./dist/index.mjs": "./dist/browser/index.mjs"
37+
"types": "./dist/src/index.d.ts",
38+
"require": "./dist/index.cjs",
39+
"import": "./dist/index.js"
4640
},
4741
"type": "module",
4842
"scripts": {
49-
"build": "tsup src/index.ts --format cjs,esm --clean && tsc --emitDeclarationOnly --declaration",
43+
"build": "pnpm run export-templates && tsup src/index.ts --format cjs,esm --clean && tsc --emitDeclarationOnly --declaration",
5044
"dts": "tsx scripts/generate-dts.ts && tsc --noEmit dist/index.d.ts",
5145
"lint": "eslint --quiet --fix --ext .cjs,.ts .",
5246
"lint:check": "eslint --ext .cjs,.ts .",
@@ -57,7 +51,8 @@
5751
"test": "vitest run --config vitest.config.mts",
5852
"test:browser": "vitest run --browser.name=chrome --browser.headless --config vitest.config.mts",
5953
"check": "tsc",
60-
"dev": "tsup src/index.ts --format cjs,esm --watch"
54+
"dev": "pnpm run export-templates && tsup src/index.ts --format cjs,esm --watch",
55+
"export-templates": "tsx scripts/export-templates.ts"
6156
},
6257
"dependencies": {
6358
"@huggingface/tasks": "workspace:^",
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
/* Script that export all jinja files from packages/inference/src/snippets/templates into a TS module.
2+
3+
Templates are exported in packages/inference/src/snippets/templates.exported.ts.
4+
*/
5+
import { readFileSync, writeFileSync, readdirSync } from "node:fs";
6+
import path from "node:path";
7+
8+
const TEMPLATES_DIR = path.join(process.cwd(), "src", "snippets", "templates");
9+
10+
function exportTemplatesAsCode(): string {
11+
/// language -> client -> templateName -> templateContent
12+
const templates: Record<string, Record<string, Record<string, string>>> = {};
13+
14+
// Read all language directories
15+
const languages = readdirSync(TEMPLATES_DIR);
16+
for (const language of languages) {
17+
const languagePath = path.join(TEMPLATES_DIR, language);
18+
if (!readdirSync(languagePath).length) continue;
19+
20+
templates[language] = {};
21+
22+
// Read all client directories
23+
const clients = readdirSync(languagePath);
24+
for (const client of clients) {
25+
const clientPath = path.join(languagePath, client);
26+
if (!readdirSync(clientPath).length) continue;
27+
28+
templates[language][client] = {};
29+
30+
// Read all template files
31+
const files = readdirSync(clientPath);
32+
for (const file of files) {
33+
if (!file.endsWith(".jinja")) continue;
34+
35+
const templatePath = path.join(clientPath, file);
36+
const templateContent = readFileSync(templatePath, "utf8");
37+
const templateName = path.basename(file, ".jinja");
38+
39+
templates[language][client][templateName] = templateContent;
40+
}
41+
}
42+
}
43+
44+
const templatesJson = JSON.stringify(templates, null, 2);
45+
return `// Generated file - do not edit directly
46+
export const templates: Record<string, Record<string, Record<string, string>>> = ${templatesJson} as const;
47+
`;
48+
}
49+
50+
// Generate and write the templates file
51+
const output = exportTemplatesAsCode();
52+
const outputPath = path.join(process.cwd(), "src", "snippets", "templates.exported.ts");
53+
writeFileSync(outputPath, output);
54+
console.log("Templates exported successfully! 🚀");

packages/inference/src/snippets/getInferenceSnippets.ts

Lines changed: 6 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,7 @@ import {
1010
import type { InferenceProvider, InferenceTask, RequestArgs } from "../types";
1111
import { Template } from "@huggingface/jinja";
1212
import { makeRequestOptionsFromResolvedModel } from "../lib/makeRequestOptions";
13-
import fs from "fs";
14-
import path from "path";
15-
import { existsSync as pathExists } from "node:fs";
13+
import { templates } from "./templates.exported";
1614

1715
const PYTHON_CLIENTS = ["huggingface_hub", "fal_client", "requests", "openai"] as const;
1816
const JS_CLIENTS = ["fetch", "huggingface.js", "openai"] as const;
@@ -44,34 +42,18 @@ interface TemplateParams {
4442

4543
// Helpers to find + load templates
4644

47-
const rootDirFinder = (): string => {
48-
let currentPath =
49-
typeof import.meta !== "undefined" && import.meta.url
50-
? path.normalize(new URL(import.meta.url).pathname) /// for ESM
51-
: __dirname; /// for CJS
52-
53-
while (currentPath !== "/") {
54-
if (pathExists(path.join(currentPath, "package.json"))) {
55-
return currentPath;
56-
}
57-
58-
currentPath = path.normalize(path.join(currentPath, ".."));
59-
}
60-
61-
return "/";
62-
};
63-
64-
const templatePath = (language: InferenceSnippetLanguage, client: Client, templateName: string): string =>
65-
path.join(rootDirFinder(), "src", "snippets", "templates", language, client, `${templateName}.jinja`);
6645
const hasTemplate = (language: InferenceSnippetLanguage, client: Client, templateName: string): boolean =>
67-
pathExists(templatePath(language, client, templateName));
46+
templates[language]?.[client]?.[templateName] !== undefined;
6847

6948
const loadTemplate = (
7049
language: InferenceSnippetLanguage,
7150
client: Client,
7251
templateName: string
7352
): ((data: TemplateParams) => string) => {
74-
const template = fs.readFileSync(templatePath(language, client, templateName), "utf8");
53+
const template = templates[language]?.[client]?.[templateName];
54+
if (!template) {
55+
throw new Error(`Template not found: ${language}/${client}/${templateName}`);
56+
}
7557
return (data: TemplateParams) => new Template(template).render({ ...data });
7658
};
7759

packages/inference/tsup.config.ts

Lines changed: 0 additions & 23 deletions
This file was deleted.

0 commit comments

Comments
 (0)