Skip to content

Commit 2df7380

Browse files
committed
apply lint
1 parent 61b530e commit 2df7380

File tree

4 files changed

+104
-124
lines changed
  • appsignals-tests
    • contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/awssdk/base
    • images/aws-sdk
      • aws-sdk-base/src/main/java/com/amazon/sampleapp
      • aws-sdk-v1/src/main/java/com/amazon/sampleapp
      • aws-sdk-v2/src/main/java/com/amazon/sampleapp

4 files changed

+104
-124
lines changed

appsignals-tests/contract-tests/src/test/java/software/amazon/opentelemetry/appsignals/test/awssdk/base/AwsSdkBaseTest.java

Lines changed: 58 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -1833,8 +1833,7 @@ protected void doTestBedrockRuntimeAi21Jamba() {
18331833
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_TOP_P, "0.8"),
18341834
assertAttribute(SemanticConventionsConstants.GEN_AI_RESPONSE_FINISH_REASONS, "[stop]"),
18351835
assertAttribute(SemanticConventionsConstants.GEN_AI_USAGE_INPUT_TOKENS, "5"),
1836-
assertAttribute(SemanticConventionsConstants.GEN_AI_USAGE_OUTPUT_TOKENS, "42")
1837-
));
1836+
assertAttribute(SemanticConventionsConstants.GEN_AI_USAGE_OUTPUT_TOKENS, "42")));
18381837
assertMetricClientAttributes(
18391838
metrics,
18401839
AppSignalsConstants.LATENCY_METRIC,
@@ -1897,20 +1896,15 @@ protected void doTestBedrockRuntimeAmazonTitan() {
18971896
200,
18981897
List.of(
18991898
assertAttribute(
1900-
SemanticConventionsConstants.GEN_AI_REQUEST_MODEL, "amazon.titan-text-premier-v1:0"),
1901-
assertAttribute(
1902-
SemanticConventionsConstants.GEN_AI_REQUEST_MAX_TOKENS, "100"),
1903-
assertAttribute(
1904-
SemanticConventionsConstants.GEN_AI_REQUEST_TEMPERATURE, "0.7"),
1905-
assertAttribute(
1906-
SemanticConventionsConstants.GEN_AI_REQUEST_TOP_P, "0.9"),
1899+
SemanticConventionsConstants.GEN_AI_REQUEST_MODEL,
1900+
"amazon.titan-text-premier-v1:0"),
1901+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_MAX_TOKENS, "100"),
1902+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_TEMPERATURE, "0.7"),
1903+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_TOP_P, "0.9"),
19071904
assertAttribute(
19081905
SemanticConventionsConstants.GEN_AI_RESPONSE_FINISH_REASONS, "[FINISHED]"),
1909-
assertAttribute(
1910-
SemanticConventionsConstants.GEN_AI_USAGE_INPUT_TOKENS, "10"),
1911-
assertAttribute(
1912-
SemanticConventionsConstants.GEN_AI_USAGE_OUTPUT_TOKENS, "15")
1913-
));
1906+
assertAttribute(SemanticConventionsConstants.GEN_AI_USAGE_INPUT_TOKENS, "10"),
1907+
assertAttribute(SemanticConventionsConstants.GEN_AI_USAGE_OUTPUT_TOKENS, "15")));
19141908
assertMetricClientAttributes(
19151909
metrics,
19161910
AppSignalsConstants.LATENCY_METRIC,
@@ -1947,12 +1941,12 @@ protected void doTestBedrockRuntimeAnthropicClaude() {
19471941
var response = appClient.get("/bedrockruntime/invokeModel/anthropicClaude").aggregate().join();
19481942

19491943
var traces = mockCollectorClient.getTraces();
1950-
var metrics = mockCollectorClient.getMetrics(
1951-
Set.of(
1952-
AppSignalsConstants.ERROR_METRIC,
1953-
AppSignalsConstants.FAULT_METRIC,
1954-
AppSignalsConstants.LATENCY_METRIC
1955-
));
1944+
var metrics =
1945+
mockCollectorClient.getMetrics(
1946+
Set.of(
1947+
AppSignalsConstants.ERROR_METRIC,
1948+
AppSignalsConstants.FAULT_METRIC,
1949+
AppSignalsConstants.LATENCY_METRIC));
19561950

19571951
var localService = getApplicationOtelServiceName();
19581952
var localOperation = "GET /bedrockruntime/invokeModel/anthropicClaude";
@@ -1975,20 +1969,15 @@ protected void doTestBedrockRuntimeAnthropicClaude() {
19751969
200,
19761970
List.of(
19771971
assertAttribute(
1978-
SemanticConventionsConstants.GEN_AI_REQUEST_MODEL, "anthropic.claude-3-haiku-20240307-v1:0"),
1979-
assertAttribute(
1980-
SemanticConventionsConstants.GEN_AI_REQUEST_MAX_TOKENS, "512"),
1981-
assertAttribute(
1982-
SemanticConventionsConstants.GEN_AI_REQUEST_TEMPERATURE, "0.6"),
1983-
assertAttribute(
1984-
SemanticConventionsConstants.GEN_AI_REQUEST_TOP_P, "0.53"),
1972+
SemanticConventionsConstants.GEN_AI_REQUEST_MODEL,
1973+
"anthropic.claude-3-haiku-20240307-v1:0"),
1974+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_MAX_TOKENS, "512"),
1975+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_TEMPERATURE, "0.6"),
1976+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_TOP_P, "0.53"),
19851977
assertAttribute(
19861978
SemanticConventionsConstants.GEN_AI_RESPONSE_FINISH_REASONS, "[end_turn]"),
1987-
assertAttribute(
1988-
SemanticConventionsConstants.GEN_AI_USAGE_INPUT_TOKENS, "2095"),
1989-
assertAttribute(
1990-
SemanticConventionsConstants.GEN_AI_USAGE_OUTPUT_TOKENS, "503")
1991-
));
1979+
assertAttribute(SemanticConventionsConstants.GEN_AI_USAGE_INPUT_TOKENS, "2095"),
1980+
assertAttribute(SemanticConventionsConstants.GEN_AI_USAGE_OUTPUT_TOKENS, "503")));
19921981
assertMetricClientAttributes(
19931982
metrics,
19941983
AppSignalsConstants.LATENCY_METRIC,
@@ -2025,11 +2014,12 @@ protected void doTestBedrockRuntimeCohereCommandR() {
20252014
var response = appClient.get("/bedrockruntime/invokeModel/cohereCommandR").aggregate().join();
20262015

20272016
var traces = mockCollectorClient.getTraces();
2028-
var metrics = mockCollectorClient.getMetrics(
2029-
Set.of(
2030-
AppSignalsConstants.ERROR_METRIC,
2031-
AppSignalsConstants.FAULT_METRIC,
2032-
AppSignalsConstants.LATENCY_METRIC));
2017+
var metrics =
2018+
mockCollectorClient.getMetrics(
2019+
Set.of(
2020+
AppSignalsConstants.ERROR_METRIC,
2021+
AppSignalsConstants.FAULT_METRIC,
2022+
AppSignalsConstants.LATENCY_METRIC));
20332023

20342024
var localService = getApplicationOtelServiceName();
20352025
var localOperation = "GET /bedrockruntime/invokeModel/cohereCommandR";
@@ -2053,19 +2043,13 @@ protected void doTestBedrockRuntimeCohereCommandR() {
20532043
List.of(
20542044
assertAttribute(
20552045
SemanticConventionsConstants.GEN_AI_REQUEST_MODEL, "cohere.command-r-v1:0"),
2056-
assertAttribute(
2057-
SemanticConventionsConstants.GEN_AI_REQUEST_MAX_TOKENS, "4096"),
2058-
assertAttribute(
2059-
SemanticConventionsConstants.GEN_AI_REQUEST_TEMPERATURE, "0.8"),
2060-
assertAttribute(
2061-
SemanticConventionsConstants.GEN_AI_REQUEST_TOP_P, "0.45"),
2046+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_MAX_TOKENS, "4096"),
2047+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_TEMPERATURE, "0.8"),
2048+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_TOP_P, "0.45"),
20622049
assertAttribute(
20632050
SemanticConventionsConstants.GEN_AI_RESPONSE_FINISH_REASONS, "[COMPLETE]"),
2064-
assertAttribute(
2065-
SemanticConventionsConstants.GEN_AI_USAGE_INPUT_TOKENS, "9"),
2066-
assertAttribute(
2067-
SemanticConventionsConstants.GEN_AI_USAGE_OUTPUT_TOKENS, "16")
2068-
));
2051+
assertAttribute(SemanticConventionsConstants.GEN_AI_USAGE_INPUT_TOKENS, "9"),
2052+
assertAttribute(SemanticConventionsConstants.GEN_AI_USAGE_OUTPUT_TOKENS, "16")));
20692053
assertMetricClientAttributes(
20702054
metrics,
20712055
AppSignalsConstants.LATENCY_METRIC,
@@ -2102,12 +2086,12 @@ protected void doTestBedrockRuntimeMetaLlama() {
21022086
var response = appClient.get("/bedrockruntime/invokeModel/metaLlama").aggregate().join();
21032087

21042088
var traces = mockCollectorClient.getTraces();
2105-
var metrics = mockCollectorClient.getMetrics(
2106-
Set.of(
2107-
AppSignalsConstants.ERROR_METRIC,
2108-
AppSignalsConstants.FAULT_METRIC,
2109-
AppSignalsConstants.LATENCY_METRIC)
2110-
);
2089+
var metrics =
2090+
mockCollectorClient.getMetrics(
2091+
Set.of(
2092+
AppSignalsConstants.ERROR_METRIC,
2093+
AppSignalsConstants.FAULT_METRIC,
2094+
AppSignalsConstants.LATENCY_METRIC));
21112095

21122096
var localService = getApplicationOtelServiceName();
21132097
var localOperation = "GET /bedrockruntime/invokeModel/metaLlama";
@@ -2131,19 +2115,12 @@ protected void doTestBedrockRuntimeMetaLlama() {
21312115
List.of(
21322116
assertAttribute(
21332117
SemanticConventionsConstants.GEN_AI_REQUEST_MODEL, "meta.llama3-70b-instruct-v1:0"),
2134-
assertAttribute(
2135-
SemanticConventionsConstants.GEN_AI_REQUEST_MAX_TOKENS, "128"),
2136-
assertAttribute(
2137-
SemanticConventionsConstants.GEN_AI_REQUEST_TEMPERATURE, "0.1"),
2138-
assertAttribute(
2139-
SemanticConventionsConstants.GEN_AI_REQUEST_TOP_P, "0.9"),
2140-
assertAttribute(
2141-
SemanticConventionsConstants.GEN_AI_RESPONSE_FINISH_REASONS, "[stop]"),
2142-
assertAttribute(
2143-
SemanticConventionsConstants.GEN_AI_USAGE_INPUT_TOKENS, "2095"),
2144-
assertAttribute(
2145-
SemanticConventionsConstants.GEN_AI_USAGE_OUTPUT_TOKENS, "503")
2146-
));
2118+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_MAX_TOKENS, "128"),
2119+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_TEMPERATURE, "0.1"),
2120+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_TOP_P, "0.9"),
2121+
assertAttribute(SemanticConventionsConstants.GEN_AI_RESPONSE_FINISH_REASONS, "[stop]"),
2122+
assertAttribute(SemanticConventionsConstants.GEN_AI_USAGE_INPUT_TOKENS, "2095"),
2123+
assertAttribute(SemanticConventionsConstants.GEN_AI_USAGE_OUTPUT_TOKENS, "503")));
21472124
assertMetricClientAttributes(
21482125
metrics,
21492126
AppSignalsConstants.LATENCY_METRIC,
@@ -2180,12 +2157,12 @@ protected void doTestBedrockRuntimeMistral() {
21802157
var response = appClient.get("/bedrockruntime/invokeModel/mistralAi").aggregate().join();
21812158

21822159
var traces = mockCollectorClient.getTraces();
2183-
var metrics = mockCollectorClient.getMetrics(
2184-
Set.of(
2185-
AppSignalsConstants.ERROR_METRIC,
2186-
AppSignalsConstants.FAULT_METRIC,
2187-
AppSignalsConstants.LATENCY_METRIC)
2188-
);
2160+
var metrics =
2161+
mockCollectorClient.getMetrics(
2162+
Set.of(
2163+
AppSignalsConstants.ERROR_METRIC,
2164+
AppSignalsConstants.FAULT_METRIC,
2165+
AppSignalsConstants.LATENCY_METRIC));
21892166

21902167
var localService = getApplicationOtelServiceName();
21912168
var localOperation = "GET /bedrockruntime/invokeModel/mistralAi";
@@ -2208,20 +2185,14 @@ protected void doTestBedrockRuntimeMistral() {
22082185
200,
22092186
List.of(
22102187
assertAttribute(
2211-
SemanticConventionsConstants.GEN_AI_REQUEST_MODEL, "mistral.mistral-large-2402-v1:0"),
2212-
assertAttribute(
2213-
SemanticConventionsConstants.GEN_AI_REQUEST_MAX_TOKENS, "4096"),
2214-
assertAttribute(
2215-
SemanticConventionsConstants.GEN_AI_REQUEST_TEMPERATURE, "0.75"),
2216-
assertAttribute(
2217-
SemanticConventionsConstants.GEN_AI_REQUEST_TOP_P, "0.25"),
2218-
assertAttribute(
2219-
SemanticConventionsConstants.GEN_AI_RESPONSE_FINISH_REASONS, "[stop]"),
2220-
assertAttribute(
2221-
SemanticConventionsConstants.GEN_AI_USAGE_INPUT_TOKENS, "15"),
2222-
assertAttribute(
2223-
SemanticConventionsConstants.GEN_AI_USAGE_OUTPUT_TOKENS, "24")
2224-
));
2188+
SemanticConventionsConstants.GEN_AI_REQUEST_MODEL,
2189+
"mistral.mistral-large-2402-v1:0"),
2190+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_MAX_TOKENS, "4096"),
2191+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_TEMPERATURE, "0.75"),
2192+
assertAttribute(SemanticConventionsConstants.GEN_AI_REQUEST_TOP_P, "0.25"),
2193+
assertAttribute(SemanticConventionsConstants.GEN_AI_RESPONSE_FINISH_REASONS, "[stop]"),
2194+
assertAttribute(SemanticConventionsConstants.GEN_AI_USAGE_INPUT_TOKENS, "15"),
2195+
assertAttribute(SemanticConventionsConstants.GEN_AI_USAGE_OUTPUT_TOKENS, "24")));
22252196
assertMetricClientAttributes(
22262197
metrics,
22272198
AppSignalsConstants.LATENCY_METRIC,

appsignals-tests/images/aws-sdk/aws-sdk-base/src/main/java/com/amazon/sampleapp/Utils.java

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,9 @@ public static void setupInvokeModelRoute(int status) {
229229
usage.put("output_tokens", 503);
230230
jsonResponse.set("usage", usage);
231231
} else if (modelId.contains("cohere.command")) {
232-
jsonResponse.put("text", "LISP's elegant simplicity and powerful macro system make it perfect for building interpreters!");
232+
jsonResponse.put(
233+
"text",
234+
"LISP's elegant simplicity and powerful macro system make it perfect for building interpreters!");
233235
jsonResponse.put("finish_reason", "COMPLETE");
234236
} else if (modelId.contains("meta.llama")) {
235237
jsonResponse.put("prompt_token_count", 2095);
@@ -239,7 +241,9 @@ public static void setupInvokeModelRoute(int status) {
239241
ArrayNode outputs = mapper.createArrayNode();
240242
ObjectNode output = mapper.createObjectNode();
241243

242-
output.put("text", "A compiler translates the entire source code to machine code before execution, while an interpreter executes the code line by line in real-time.");
244+
output.put(
245+
"text",
246+
"A compiler translates the entire source code to machine code before execution, while an interpreter executes the code line by line in real-time.");
243247
output.put("stop_reason", "stop");
244248

245249
outputs.add(output);

appsignals-tests/images/aws-sdk/aws-sdk-v1/src/main/java/com/amazon/sampleapp/App.java

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@
5656
import com.amazonaws.services.sqs.model.CreateQueueRequest;
5757
import com.amazonaws.services.sqs.model.ReceiveMessageRequest;
5858
import com.amazonaws.services.sqs.model.SendMessageRequest;
59+
import com.fasterxml.jackson.databind.ObjectMapper;
5960
import java.io.File;
6061
import java.io.IOException;
6162
import java.net.http.HttpClient;
@@ -66,8 +67,6 @@
6667
import java.util.HashMap;
6768
import java.util.List;
6869
import java.util.Map;
69-
70-
import com.fasterxml.jackson.databind.ObjectMapper;
7170
import org.slf4j.Logger;
7271
import org.slf4j.LoggerFactory;
7372

@@ -667,7 +666,7 @@ private static void setupBedrock() {
667666
.withBody(StandardCharsets.UTF_8.encode(mapper.writeValueAsString(request)));
668667

669668
var response = bedrockRuntimeClient.invokeModel(invokeModelRequest);
670-
var responseBody = new String(response.getBody().array(), StandardCharsets.UTF_8);
669+
var responseBody = new String(response.getBody().array(), StandardCharsets.UTF_8);
671670

672671
return "";
673672
});
@@ -705,10 +704,10 @@ private static void setupBedrock() {
705704
Map<String, Object> request = new HashMap<>();
706705

707706
String prompt = "Describe the purpose of a 'hello world' program in one line";
708-
String instruction = String.format(
709-
"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n%s<|eot_id|>\n<|start_header_id|>assistant<|end_header_id|>\n",
710-
prompt
711-
);
707+
String instruction =
708+
String.format(
709+
"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n%s<|eot_id|>\n<|start_header_id|>assistant<|end_header_id|>\n",
710+
prompt);
712711

713712
request.put("prompt", instruction);
714713
request.put("max_gen_len", 128);

appsignals-tests/images/aws-sdk/aws-sdk-v2/src/main/java/com/amazon/sampleapp/App.java

Lines changed: 34 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -622,10 +622,11 @@ private static void setupBedrock() {
622622
request.put("top_p", 0.8);
623623
request.put("temperature", 0.7);
624624

625-
InvokeModelRequest invokeModelRequest = InvokeModelRequest.builder()
626-
.modelId("ai21.jamba-1-5-mini-v1:0")
627-
.body(SdkBytes.fromUtf8String(mapper.writeValueAsString(request)))
628-
.build();
625+
InvokeModelRequest invokeModelRequest =
626+
InvokeModelRequest.builder()
627+
.modelId("ai21.jamba-1-5-mini-v1:0")
628+
.body(SdkBytes.fromUtf8String(mapper.writeValueAsString(request)))
629+
.build();
629630

630631
bedrockRuntimeClient.invokeModel(invokeModelRequest);
631632

@@ -647,10 +648,11 @@ private static void setupBedrock() {
647648

648649
request.put("textGenerationConfig", config);
649650

650-
InvokeModelRequest invokeModelRequest = InvokeModelRequest.builder()
651-
.modelId("amazon.titan-text-premier-v1:0")
652-
.body(SdkBytes.fromUtf8String(mapper.writeValueAsString(request)))
653-
.build();
651+
InvokeModelRequest invokeModelRequest =
652+
InvokeModelRequest.builder()
653+
.modelId("amazon.titan-text-premier-v1:0")
654+
.body(SdkBytes.fromUtf8String(mapper.writeValueAsString(request)))
655+
.build();
654656

655657
bedrockRuntimeClient.invokeModel(invokeModelRequest);
656658

@@ -676,10 +678,11 @@ private static void setupBedrock() {
676678
request.put("top_p", 0.53);
677679
request.put("temperature", 0.6);
678680

679-
InvokeModelRequest invokeModelRequest = InvokeModelRequest.builder()
680-
.modelId("anthropic.claude-3-haiku-20240307-v1:0")
681-
.body(SdkBytes.fromUtf8String(mapper.writeValueAsString(request)))
682-
.build();
681+
InvokeModelRequest invokeModelRequest =
682+
InvokeModelRequest.builder()
683+
.modelId("anthropic.claude-3-haiku-20240307-v1:0")
684+
.body(SdkBytes.fromUtf8String(mapper.writeValueAsString(request)))
685+
.build();
683686

684687
bedrockRuntimeClient.invokeModel(invokeModelRequest);
685688

@@ -698,10 +701,11 @@ private static void setupBedrock() {
698701
request.put("max_tokens", 4096);
699702
request.put("p", 0.45);
700703

701-
InvokeModelRequest invokeModelRequest = InvokeModelRequest.builder()
702-
.modelId("cohere.command-r-v1:0")
703-
.body(SdkBytes.fromUtf8String(mapper.writeValueAsString(request)))
704-
.build();
704+
InvokeModelRequest invokeModelRequest =
705+
InvokeModelRequest.builder()
706+
.modelId("cohere.command-r-v1:0")
707+
.body(SdkBytes.fromUtf8String(mapper.writeValueAsString(request)))
708+
.build();
705709

706710
bedrockRuntimeClient.invokeModel(invokeModelRequest);
707711

@@ -716,20 +720,21 @@ private static void setupBedrock() {
716720
Map<String, Object> request = new HashMap<>();
717721

718722
String prompt = "Describe the purpose of a 'hello world' program in one line";
719-
String instruction = String.format(
720-
"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n%s<|eot_id|>\n<|start_header_id|>assistant<|end_header_id|>\n",
721-
prompt
722-
);
723+
String instruction =
724+
String.format(
725+
"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n%s<|eot_id|>\n<|start_header_id|>assistant<|end_header_id|>\n",
726+
prompt);
723727

724728
request.put("prompt", instruction);
725729
request.put("max_gen_len", 128);
726730
request.put("temperature", 0.1);
727731
request.put("top_p", 0.9);
728732

729-
InvokeModelRequest invokeModelRequest = InvokeModelRequest.builder()
730-
.modelId("meta.llama3-70b-instruct-v1:0")
731-
.body(SdkBytes.fromUtf8String(mapper.writeValueAsString(request)))
732-
.build();
733+
InvokeModelRequest invokeModelRequest =
734+
InvokeModelRequest.builder()
735+
.modelId("meta.llama3-70b-instruct-v1:0")
736+
.body(SdkBytes.fromUtf8String(mapper.writeValueAsString(request)))
737+
.build();
733738

734739
bedrockRuntimeClient.invokeModel(invokeModelRequest);
735740

@@ -751,10 +756,11 @@ private static void setupBedrock() {
751756
request.put("temperature", 0.75);
752757
request.put("top_p", 0.25);
753758

754-
InvokeModelRequest invokeModelRequest = InvokeModelRequest.builder()
755-
.modelId("mistral.mistral-large-2402-v1:0")
756-
.body(SdkBytes.fromUtf8String(mapper.writeValueAsString(request)))
757-
.build();
759+
InvokeModelRequest invokeModelRequest =
760+
InvokeModelRequest.builder()
761+
.modelId("mistral.mistral-large-2402-v1:0")
762+
.body(SdkBytes.fromUtf8String(mapper.writeValueAsString(request)))
763+
.build();
758764

759765
bedrockRuntimeClient.invokeModel(invokeModelRequest);
760766

0 commit comments

Comments
 (0)