11import { ModelRecommendation } from "./utils/resolveModelRecommendationFileOptions.js" ;
22
33export const recommendedModels : ModelRecommendation [ ] = [ {
4+ name : "DeepSeek R1 Distill Qwen 7B" ,
5+ abilities : [ "chat" , "complete" , "functionCalling" , "reasoning" ] ,
6+ description : "DeepSeek R1 model was created by DeepSeek and is using chain of though (CoT) to reason across a wide variety of topics.\n" +
7+ "It's optimized for an assistant-like chat use cases, with support for function calling.\n" +
8+ "This model is censored, but its responses quality on many topics is extremely high.\n" +
9+ "This is the 7 billion parameters version of the model - a fine tuned Qwen 2.5 7B base model with distillation from the 671B DeepSeek R1 version." ,
10+
11+ fileOptions : [
12+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q8_0" ,
13+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q6_K" ,
14+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q5_K_M" ,
15+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q5_K_S" ,
16+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M"
17+ ]
18+ } , {
419 name : "DeepSeek R1 Distill Qwen 14B" ,
520 abilities : [ "chat" , "complete" , "functionCalling" , "reasoning" ] ,
621 description : "DeepSeek R1 model was created by DeepSeek and is using chain of though (CoT) to reason across a wide variety of topics.\n" +
722 "It's optimized for an assistant-like chat use cases, with support for function calling.\n" +
823 "This model is censored, but its responses quality on many topics is extremely high.\n" +
9- "This is the 14 billion parameters version of the model - a fine tuned Qwen 2.5 14B base model with distillation from the 617B DeepSeek R1 version." ,
24+ "This is the 14 billion parameters version of the model - a fine tuned Qwen 2.5 14B base model with distillation from the 671B DeepSeek R1 version." ,
25+
26+ fileOptions : [
27+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q8_0" ,
28+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q6_K" ,
29+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q5_K_M" ,
30+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q5_K_S" ,
31+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q4_K_M"
32+ ]
33+ } , {
34+ name : "DeepSeek R1 Distill Qwen 32B" ,
35+ abilities : [ "chat" , "complete" , "functionCalling" , "reasoning" ] ,
36+ description : "DeepSeek R1 model was created by DeepSeek and is using chain of though (CoT) to reason across a wide variety of topics.\n" +
37+ "It's optimized for an assistant-like chat use cases, with support for function calling.\n" +
38+ "This model is censored, but its responses quality on many topics is extremely high.\n" +
39+ "This is the 32 billion parameters version of the model - a fine tuned Qwen 2.5 32B base model with distillation from the 671B DeepSeek R1 version." ,
40+
41+ fileOptions : [
42+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-32B-GGUF:Q8_0" ,
43+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-32B-GGUF:Q6_K" ,
44+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-32B-GGUF:Q5_K_M" ,
45+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-32B-GGUF:Q5_K_S" ,
46+ "hf:mradermacher/DeepSeek-R1-Distill-Qwen-32B-GGUF:Q4_K_M"
47+ ]
48+ } , {
49+ name : "DeepSeek R1 Distill Llama 8B" ,
50+ abilities : [ "chat" , "complete" , "functionCalling" , "reasoning" ] ,
51+ description : "DeepSeek R1 model was created by DeepSeek and is using chain of though (CoT) to reason across a wide variety of topics.\n" +
52+ "It's optimized for an assistant-like chat use cases, with support for function calling.\n" +
53+ "This model is censored, even though it's based on Llama 3.1.\n" +
54+ "This is the 8 billion parameters version of the model - a fine tuned Llama 3.1 8B base model with distillation from the 671B DeepSeek R1 version." ,
55+
56+ fileOptions : [
57+ "hf:mradermacher/DeepSeek-R1-Distill-Llama-8B-GGUF:Q8_0" ,
58+ "hf:mradermacher/DeepSeek-R1-Distill-Llama-8B-GGUF:Q6_K" ,
59+ "hf:mradermacher/DeepSeek-R1-Distill-Llama-8B-GGUF:Q5_K_M" ,
60+ "hf:mradermacher/DeepSeek-R1-Distill-Llama-8B-GGUF:Q5_K_S" ,
61+ "hf:mradermacher/DeepSeek-R1-Distill-Llama-8B-GGUF:Q4_K_M"
62+ ]
63+ } , {
64+ name : "DeepSeek R1 Distill Llama 70B" ,
65+ abilities : [ "chat" , "complete" , "functionCalling" , "reasoning" ] ,
66+ description : "DeepSeek R1 model was created by DeepSeek and is using chain of though (CoT) to reason across a wide variety of topics.\n" +
67+ "It's optimized for an assistant-like chat use cases, with support for function calling.\n" +
68+ "This model is censored, even though it's based on Llama 3.3.\n" +
69+ "This is the 70 billion parameters version of the model - a fine tuned Llama 3.3 70B base model with distillation from the 671B DeepSeek R1 version." ,
1070
1171 fileOptions : [
12- "hf:bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q8_0" ,
13- "hf:bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q6_K_L" ,
14- "hf:bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q6_K" ,
15- "hf:bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q5_K_L" ,
16- "hf:bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q5_K_M" ,
17- "hf:bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q5_K_S" ,
18- "hf:bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF:Q4_K_M"
72+ "hf:mradermacher/DeepSeek-R1-Distill-Llama-70B-GGUF/DeepSeek-R1-Distill-Llama-70B.Q8_0.gguf.part1of2" ,
73+ "hf:mradermacher/DeepSeek-R1-Distill-Llama-70B-GGUF/DeepSeek-R1-Distill-Llama-70B.Q6_K.gguf.part1of2" ,
74+ "hf:mradermacher/DeepSeek-R1-Distill-Llama-70B-GGUF:Q5_K_M" ,
75+ "hf:mradermacher/DeepSeek-R1-Distill-Llama-70B-GGUF:Q5_K_S" ,
76+ "hf:mradermacher/DeepSeek-R1-Distill-Llama-70B-GGUF:Q4_K_M"
1977 ]
2078} , {
2179 name : "Llama 3.1 8B" ,
@@ -28,17 +86,6 @@ export const recommendedModels: ModelRecommendation[] = [{
2886 "hf:mradermacher/Meta-Llama-3.1-8B-Instruct-GGUF:Q6_K" ,
2987 "hf:mradermacher/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M"
3088 ]
31- } , {
32- name : "Phi 4 14B" ,
33- abilities : [ "chat" , "complete" , "functionCalling" ] ,
34- description : "Phi 4 model was created by Microsoft and is optimized for complex reasoning in areas such as math." ,
35-
36- fileOptions : [
37- "hf:mradermacher/phi-4-GGUF:Q8_0" ,
38- "hf:mradermacher/phi-4-GGUF:Q6_K" ,
39- "hf:mradermacher/phi-4-GGUF:Q4_K_M" ,
40- "hf:mradermacher/phi-4-GGUF:Q4_K_S"
41- ]
4289} , {
4390 name : "Llama 3.1 70B" ,
4491 abilities : [ "chat" , "complete" , "functionCalling" ] ,
@@ -63,6 +110,17 @@ export const recommendedModels: ModelRecommendation[] = [{
63110 "hf:mradermacher/Meta-Llama-3.1-405B-Instruct-GGUF/Meta-Llama-3.1-405B-Instruct.Q3_K_L.gguf.part1of5" ,
64111 "hf:mradermacher/Meta-Llama-3.1-405B-Instruct-GGUF/Meta-Llama-3.1-405B-Instruct.Q3_K_M.gguf.part1of4"
65112 ]
113+ } , {
114+ name : "Phi 4 14B" ,
115+ abilities : [ "chat" , "complete" , "functionCalling" ] ,
116+ description : "Phi 4 model was created by Microsoft and is optimized for complex reasoning in areas such as math." ,
117+
118+ fileOptions : [
119+ "hf:mradermacher/phi-4-GGUF:Q8_0" ,
120+ "hf:mradermacher/phi-4-GGUF:Q6_K" ,
121+ "hf:mradermacher/phi-4-GGUF:Q4_K_M" ,
122+ "hf:mradermacher/phi-4-GGUF:Q4_K_S"
123+ ]
66124} , {
67125 name : "Mistral Nemo 12B" ,
68126 abilities : [ "chat" , "complete" , "functionCalling" ] ,
0 commit comments