@@ -1833,8 +1833,7 @@ protected void doTestBedrockRuntimeAi21Jamba() {
18331833 assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_TOP_P , "0.8" ),
18341834 assertAttribute (SemanticConventionsConstants .GEN_AI_RESPONSE_FINISH_REASONS , "[stop]" ),
18351835 assertAttribute (SemanticConventionsConstants .GEN_AI_USAGE_INPUT_TOKENS , "5" ),
1836- assertAttribute (SemanticConventionsConstants .GEN_AI_USAGE_OUTPUT_TOKENS , "42" )
1837- ));
1836+ assertAttribute (SemanticConventionsConstants .GEN_AI_USAGE_OUTPUT_TOKENS , "42" )));
18381837 assertMetricClientAttributes (
18391838 metrics ,
18401839 AppSignalsConstants .LATENCY_METRIC ,
@@ -1897,20 +1896,15 @@ protected void doTestBedrockRuntimeAmazonTitan() {
18971896 200 ,
18981897 List .of (
18991898 assertAttribute (
1900- SemanticConventionsConstants .GEN_AI_REQUEST_MODEL , "amazon.titan-text-premier-v1:0" ),
1901- assertAttribute (
1902- SemanticConventionsConstants .GEN_AI_REQUEST_MAX_TOKENS , "100" ),
1903- assertAttribute (
1904- SemanticConventionsConstants .GEN_AI_REQUEST_TEMPERATURE , "0.7" ),
1905- assertAttribute (
1906- SemanticConventionsConstants .GEN_AI_REQUEST_TOP_P , "0.9" ),
1899+ SemanticConventionsConstants .GEN_AI_REQUEST_MODEL ,
1900+ "amazon.titan-text-premier-v1:0" ),
1901+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_MAX_TOKENS , "100" ),
1902+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_TEMPERATURE , "0.7" ),
1903+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_TOP_P , "0.9" ),
19071904 assertAttribute (
19081905 SemanticConventionsConstants .GEN_AI_RESPONSE_FINISH_REASONS , "[FINISHED]" ),
1909- assertAttribute (
1910- SemanticConventionsConstants .GEN_AI_USAGE_INPUT_TOKENS , "10" ),
1911- assertAttribute (
1912- SemanticConventionsConstants .GEN_AI_USAGE_OUTPUT_TOKENS , "15" )
1913- ));
1906+ assertAttribute (SemanticConventionsConstants .GEN_AI_USAGE_INPUT_TOKENS , "10" ),
1907+ assertAttribute (SemanticConventionsConstants .GEN_AI_USAGE_OUTPUT_TOKENS , "15" )));
19141908 assertMetricClientAttributes (
19151909 metrics ,
19161910 AppSignalsConstants .LATENCY_METRIC ,
@@ -1947,12 +1941,12 @@ protected void doTestBedrockRuntimeAnthropicClaude() {
19471941 var response = appClient .get ("/bedrockruntime/invokeModel/anthropicClaude" ).aggregate ().join ();
19481942
19491943 var traces = mockCollectorClient .getTraces ();
1950- var metrics = mockCollectorClient . getMetrics (
1951- Set . of (
1952- AppSignalsConstants . ERROR_METRIC ,
1953- AppSignalsConstants .FAULT_METRIC ,
1954- AppSignalsConstants .LATENCY_METRIC
1955- ));
1944+ var metrics =
1945+ mockCollectorClient . getMetrics (
1946+ Set . of (
1947+ AppSignalsConstants .ERROR_METRIC ,
1948+ AppSignalsConstants .FAULT_METRIC ,
1949+ AppSignalsConstants . LATENCY_METRIC ));
19561950
19571951 var localService = getApplicationOtelServiceName ();
19581952 var localOperation = "GET /bedrockruntime/invokeModel/anthropicClaude" ;
@@ -1975,20 +1969,15 @@ protected void doTestBedrockRuntimeAnthropicClaude() {
19751969 200 ,
19761970 List .of (
19771971 assertAttribute (
1978- SemanticConventionsConstants .GEN_AI_REQUEST_MODEL , "anthropic.claude-3-haiku-20240307-v1:0" ),
1979- assertAttribute (
1980- SemanticConventionsConstants .GEN_AI_REQUEST_MAX_TOKENS , "512" ),
1981- assertAttribute (
1982- SemanticConventionsConstants .GEN_AI_REQUEST_TEMPERATURE , "0.6" ),
1983- assertAttribute (
1984- SemanticConventionsConstants .GEN_AI_REQUEST_TOP_P , "0.53" ),
1972+ SemanticConventionsConstants .GEN_AI_REQUEST_MODEL ,
1973+ "anthropic.claude-3-haiku-20240307-v1:0" ),
1974+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_MAX_TOKENS , "512" ),
1975+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_TEMPERATURE , "0.6" ),
1976+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_TOP_P , "0.53" ),
19851977 assertAttribute (
19861978 SemanticConventionsConstants .GEN_AI_RESPONSE_FINISH_REASONS , "[end_turn]" ),
1987- assertAttribute (
1988- SemanticConventionsConstants .GEN_AI_USAGE_INPUT_TOKENS , "2095" ),
1989- assertAttribute (
1990- SemanticConventionsConstants .GEN_AI_USAGE_OUTPUT_TOKENS , "503" )
1991- ));
1979+ assertAttribute (SemanticConventionsConstants .GEN_AI_USAGE_INPUT_TOKENS , "2095" ),
1980+ assertAttribute (SemanticConventionsConstants .GEN_AI_USAGE_OUTPUT_TOKENS , "503" )));
19921981 assertMetricClientAttributes (
19931982 metrics ,
19941983 AppSignalsConstants .LATENCY_METRIC ,
@@ -2025,11 +2014,12 @@ protected void doTestBedrockRuntimeCohereCommandR() {
20252014 var response = appClient .get ("/bedrockruntime/invokeModel/cohereCommandR" ).aggregate ().join ();
20262015
20272016 var traces = mockCollectorClient .getTraces ();
2028- var metrics = mockCollectorClient .getMetrics (
2029- Set .of (
2030- AppSignalsConstants .ERROR_METRIC ,
2031- AppSignalsConstants .FAULT_METRIC ,
2032- AppSignalsConstants .LATENCY_METRIC ));
2017+ var metrics =
2018+ mockCollectorClient .getMetrics (
2019+ Set .of (
2020+ AppSignalsConstants .ERROR_METRIC ,
2021+ AppSignalsConstants .FAULT_METRIC ,
2022+ AppSignalsConstants .LATENCY_METRIC ));
20332023
20342024 var localService = getApplicationOtelServiceName ();
20352025 var localOperation = "GET /bedrockruntime/invokeModel/cohereCommandR" ;
@@ -2053,19 +2043,13 @@ protected void doTestBedrockRuntimeCohereCommandR() {
20532043 List .of (
20542044 assertAttribute (
20552045 SemanticConventionsConstants .GEN_AI_REQUEST_MODEL , "cohere.command-r-v1:0" ),
2056- assertAttribute (
2057- SemanticConventionsConstants .GEN_AI_REQUEST_MAX_TOKENS , "4096" ),
2058- assertAttribute (
2059- SemanticConventionsConstants .GEN_AI_REQUEST_TEMPERATURE , "0.8" ),
2060- assertAttribute (
2061- SemanticConventionsConstants .GEN_AI_REQUEST_TOP_P , "0.45" ),
2046+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_MAX_TOKENS , "4096" ),
2047+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_TEMPERATURE , "0.8" ),
2048+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_TOP_P , "0.45" ),
20622049 assertAttribute (
20632050 SemanticConventionsConstants .GEN_AI_RESPONSE_FINISH_REASONS , "[COMPLETE]" ),
2064- assertAttribute (
2065- SemanticConventionsConstants .GEN_AI_USAGE_INPUT_TOKENS , "9" ),
2066- assertAttribute (
2067- SemanticConventionsConstants .GEN_AI_USAGE_OUTPUT_TOKENS , "16" )
2068- ));
2051+ assertAttribute (SemanticConventionsConstants .GEN_AI_USAGE_INPUT_TOKENS , "9" ),
2052+ assertAttribute (SemanticConventionsConstants .GEN_AI_USAGE_OUTPUT_TOKENS , "16" )));
20692053 assertMetricClientAttributes (
20702054 metrics ,
20712055 AppSignalsConstants .LATENCY_METRIC ,
@@ -2102,12 +2086,12 @@ protected void doTestBedrockRuntimeMetaLlama() {
21022086 var response = appClient .get ("/bedrockruntime/invokeModel/metaLlama" ).aggregate ().join ();
21032087
21042088 var traces = mockCollectorClient .getTraces ();
2105- var metrics = mockCollectorClient . getMetrics (
2106- Set . of (
2107- AppSignalsConstants . ERROR_METRIC ,
2108- AppSignalsConstants .FAULT_METRIC ,
2109- AppSignalsConstants .LATENCY_METRIC )
2110- );
2089+ var metrics =
2090+ mockCollectorClient . getMetrics (
2091+ Set . of (
2092+ AppSignalsConstants .ERROR_METRIC ,
2093+ AppSignalsConstants .FAULT_METRIC ,
2094+ AppSignalsConstants . LATENCY_METRIC ) );
21112095
21122096 var localService = getApplicationOtelServiceName ();
21132097 var localOperation = "GET /bedrockruntime/invokeModel/metaLlama" ;
@@ -2131,19 +2115,12 @@ protected void doTestBedrockRuntimeMetaLlama() {
21312115 List .of (
21322116 assertAttribute (
21332117 SemanticConventionsConstants .GEN_AI_REQUEST_MODEL , "meta.llama3-70b-instruct-v1:0" ),
2134- assertAttribute (
2135- SemanticConventionsConstants .GEN_AI_REQUEST_MAX_TOKENS , "128" ),
2136- assertAttribute (
2137- SemanticConventionsConstants .GEN_AI_REQUEST_TEMPERATURE , "0.1" ),
2138- assertAttribute (
2139- SemanticConventionsConstants .GEN_AI_REQUEST_TOP_P , "0.9" ),
2140- assertAttribute (
2141- SemanticConventionsConstants .GEN_AI_RESPONSE_FINISH_REASONS , "[stop]" ),
2142- assertAttribute (
2143- SemanticConventionsConstants .GEN_AI_USAGE_INPUT_TOKENS , "2095" ),
2144- assertAttribute (
2145- SemanticConventionsConstants .GEN_AI_USAGE_OUTPUT_TOKENS , "503" )
2146- ));
2118+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_MAX_TOKENS , "128" ),
2119+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_TEMPERATURE , "0.1" ),
2120+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_TOP_P , "0.9" ),
2121+ assertAttribute (SemanticConventionsConstants .GEN_AI_RESPONSE_FINISH_REASONS , "[stop]" ),
2122+ assertAttribute (SemanticConventionsConstants .GEN_AI_USAGE_INPUT_TOKENS , "2095" ),
2123+ assertAttribute (SemanticConventionsConstants .GEN_AI_USAGE_OUTPUT_TOKENS , "503" )));
21472124 assertMetricClientAttributes (
21482125 metrics ,
21492126 AppSignalsConstants .LATENCY_METRIC ,
@@ -2180,12 +2157,12 @@ protected void doTestBedrockRuntimeMistral() {
21802157 var response = appClient .get ("/bedrockruntime/invokeModel/mistralAi" ).aggregate ().join ();
21812158
21822159 var traces = mockCollectorClient .getTraces ();
2183- var metrics = mockCollectorClient . getMetrics (
2184- Set . of (
2185- AppSignalsConstants . ERROR_METRIC ,
2186- AppSignalsConstants .FAULT_METRIC ,
2187- AppSignalsConstants .LATENCY_METRIC )
2188- );
2160+ var metrics =
2161+ mockCollectorClient . getMetrics (
2162+ Set . of (
2163+ AppSignalsConstants .ERROR_METRIC ,
2164+ AppSignalsConstants .FAULT_METRIC ,
2165+ AppSignalsConstants . LATENCY_METRIC ) );
21892166
21902167 var localService = getApplicationOtelServiceName ();
21912168 var localOperation = "GET /bedrockruntime/invokeModel/mistralAi" ;
@@ -2208,20 +2185,14 @@ protected void doTestBedrockRuntimeMistral() {
22082185 200 ,
22092186 List .of (
22102187 assertAttribute (
2211- SemanticConventionsConstants .GEN_AI_REQUEST_MODEL , "mistral.mistral-large-2402-v1:0" ),
2212- assertAttribute (
2213- SemanticConventionsConstants .GEN_AI_REQUEST_MAX_TOKENS , "4096" ),
2214- assertAttribute (
2215- SemanticConventionsConstants .GEN_AI_REQUEST_TEMPERATURE , "0.75" ),
2216- assertAttribute (
2217- SemanticConventionsConstants .GEN_AI_REQUEST_TOP_P , "0.25" ),
2218- assertAttribute (
2219- SemanticConventionsConstants .GEN_AI_RESPONSE_FINISH_REASONS , "[stop]" ),
2220- assertAttribute (
2221- SemanticConventionsConstants .GEN_AI_USAGE_INPUT_TOKENS , "15" ),
2222- assertAttribute (
2223- SemanticConventionsConstants .GEN_AI_USAGE_OUTPUT_TOKENS , "24" )
2224- ));
2188+ SemanticConventionsConstants .GEN_AI_REQUEST_MODEL ,
2189+ "mistral.mistral-large-2402-v1:0" ),
2190+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_MAX_TOKENS , "4096" ),
2191+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_TEMPERATURE , "0.75" ),
2192+ assertAttribute (SemanticConventionsConstants .GEN_AI_REQUEST_TOP_P , "0.25" ),
2193+ assertAttribute (SemanticConventionsConstants .GEN_AI_RESPONSE_FINISH_REASONS , "[stop]" ),
2194+ assertAttribute (SemanticConventionsConstants .GEN_AI_USAGE_INPUT_TOKENS , "15" ),
2195+ assertAttribute (SemanticConventionsConstants .GEN_AI_USAGE_OUTPUT_TOKENS , "24" )));
22252196 assertMetricClientAttributes (
22262197 metrics ,
22272198 AppSignalsConstants .LATENCY_METRIC ,
0 commit comments