11spring.ai.ollama.base-url =${OLLAMA-BASE-URL:http://localhost:11434}
22spring.ai.ollama.embedding.enabled =false
33spring.ai.embedding.transformer.enabled =true
4- document-token-limit =150
4+ document-token-limit =9000
55embedding-token-limit =500
66spring.liquibase.change-log =classpath:/dbchangelog/db.changelog-master-ollama.xml
77
@@ -11,7 +11,11 @@ spring.liquibase.change-log=classpath:/dbchangelog/db.changelog-master-ollama.xm
1111
1212# document processing
1313# falcon model config free production use
14- # spring.ai.ollama.chat.model=falcon:40b
14+ # spring.ai.ollama.chat.model=qwen2.5:32b
15+ spring.ai.ollama.chat.model =llama3.1:8b
16+ spring.ai.ollama.chat.options.num-ctx =12288
17+ # spring.ai.embedding.transformer.onnx.modelUri=https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1/tree/main/onnx/model_quantized.onnx
18+ # spring.ai.embedding.transformer.tokenizer.uri=https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1/tree/main/tokenizer.json
1519# beluga model config only for non production/commercial use
1620# spring.ai.ollama.chat.model=stable-beluga:13b
1721
@@ -23,8 +27,6 @@ spring.liquibase.change-log=classpath:/dbchangelog/db.changelog-master-ollama.xm
2327# spring.ai.ollama.chat.options.num-thread=8
2428# spring.ai.ollama.chat.options.keep_alive=1s
2529
26- # spring.ai.ollama.embedding.options.model=mxbai-embed-large:335m #test with Spring AI 1.0
27-
2830# generate code
2931# spring.ai.ollama.chat.model=granite-code:20b
3032# spring.ai.ollama.chat.options.num-ctx=8192
@@ -35,8 +37,8 @@ spring.liquibase.change-log=classpath:/dbchangelog/db.changelog-master-ollama.xm
3537# spring.ai.ollama.chat.model=deepseek-coder-v2:16b
3638# spring.ai.ollama.chat.options.num-ctx=65536
3739
38- spring.ai.ollama.chat.model =codestral:22b
39- spring.ai.ollama.chat.options.num-ctx =32768
40+ # spring.ai.ollama.chat.model=codestral:22b
41+ # spring.ai.ollama.chat.options.num-ctx=32768
4042
4143# generate book summaries
4244spring.ai.ollama.chat.options.num-thread =8
0 commit comments