@@ -103,16 +103,18 @@ The LLM response is available as the first choice under the `result.getOrchestra
103103Use a prepared template and execute requests with by passing only the input parameters:
104104
105105``` java
106- var template = ChatMessage . create(). role(" user" ). content(" {{?input }}" );
106+ var template = ChatMessage . create(). role(" user" ). content(" Reply with 'Orchestration Service is working!' in {{?language }}" );
107107var templatingConfig = TemplatingModuleConfig . create(). template(template);
108+ var configWithTemplate = config. withTemplateConfig(templatingConfig);
108109
109- var inputParams =
110- Map . of(" input" , " Reply with 'Orchestration Service is working!' in German" );
110+ var inputParams = Map . of(" language" , " German" );
111111var prompt = new OrchestrationPrompt (inputParams);
112112
113- var result = client. chatCompletion(prompt, config . withTemplateConfig(templatingConfig) );
113+ var result = client. chatCompletion(prompt, configWithTemplate );
114114```
115115
116+ In this case the template is defined with the placeholder ` {{?language}} ` which is replaced by the value ` German ` in the input parameters.
117+
116118### Message history
117119
118120Include a message history to maintain context in the conversation:
@@ -168,9 +170,11 @@ var filteringConfig =
168170 .input(InputFilteringConfig . create(). filters(filterStrict))
169171 .output(OutputFilteringConfig . create(). filters(filterStrict));
170172
173+ var configWithFilter = config. withFilteringConfig(filteringConfig);
174+
171175// this fails with Bad Request because the strict filter prohibits the input message
172176var result =
173- new OrchestrationClient (). chatCompletion(prompt, config . withFilteringConfig(filteringConfig) );
177+ new OrchestrationClient (). chatCompletion(prompt, configWithFilter );
174178```
175179
176180### Data masking
@@ -186,6 +190,7 @@ var maskingProvider =
186190 DPIEntityConfig . create(). type(DPIEntities . PHONE ),
187191 DPIEntityConfig . create(). type(DPIEntities . PERSON ));
188192var maskingConfig = MaskingModuleConfig . create(). maskingProviders(maskingProvider);
193+ var configWithMasking = config. withMaskingConfig(maskingConfig);
189194
190195var systemMessage = ChatMessage . create()
191196 .role(" system" )
@@ -200,7 +205,7 @@ var userMessage = ChatMessage.create()
200205var prompt = new OrchestrationPrompt (systemMessage, userMessage);
201206
202207var result =
203- new OrchestrationClient (). chatCompletion(prompt, config . withMaskingConfig(maskingConfig) );
208+ new OrchestrationClient (). chatCompletion(prompt, configWithMasking );
204209```
205210
206211In this example, the input will be masked before the call to the LLM. Note that data cannot be unmasked in the LLM output.
0 commit comments