@@ -1007,7 +1007,7 @@ mod tests {
10071007 // Verify bucketed string attributes have the same content
10081008 // "str_attr" should be in one of the string buckets
10091009 let str_bucket = ( fnv_1a ( "str_attr" . as_bytes ( ) ) as usize ) % 40 ;
1010- let json_field = format ! ( "attributes_string_{}" , str_bucket ) ;
1010+ let json_field = format ! ( "attributes_string_{str_bucket}" ) ;
10111011 let json_str_map: HashMap < String , String > = json_row
10121012 . get ( & json_field)
10131013 . map ( |v| serde_json:: from_value ( v. clone ( ) ) . unwrap ( ) )
@@ -1070,8 +1070,7 @@ mod tests {
10701070 assert_eq ! (
10711071 rb_str_val,
10721072 Some ( & "hello" . to_string( ) ) ,
1073- "str_attr not found in RowBinary output bucket {}" ,
1074- str_bucket
1073+ "str_attr not found in RowBinary output bucket {str_bucket}"
10751074 ) ;
10761075 assert_eq ! (
10771076 json_str_map. get( "str_attr" ) ,
@@ -1082,7 +1081,7 @@ mod tests {
10821081 // Verify float attribute is in the correct bucket (int_attr and bool_attr are
10831082 // double-written as floats)
10841083 let float_bucket = ( fnv_1a ( "float_attr" . as_bytes ( ) ) as usize ) % 40 ;
1085- let json_float_field = format ! ( "attributes_float_{}" , float_bucket ) ;
1084+ let json_float_field = format ! ( "attributes_float_{float_bucket}" ) ;
10861085 let json_float_map: HashMap < String , f64 > = json_row
10871086 . get ( & json_float_field)
10881087 . map ( |v| serde_json:: from_value ( v. clone ( ) ) . unwrap ( ) )
@@ -1127,7 +1126,7 @@ mod tests {
11271126 let database = std:: env:: var ( "CLICKHOUSE_DATABASE" ) . unwrap_or ( "default" . to_string ( ) ) ;
11281127
11291128 let client = clickhouse:: Client :: default ( )
1130- . with_url ( format ! ( "http://{}:{}" , host , http_port ) )
1129+ . with_url ( format ! ( "http://{host }:{http_port}" ) )
11311130 . with_database ( & database)
11321131 . with_option ( "input_format_binary_read_json_as_string" , "1" )
11331132 . with_option ( "insert_deduplicate" , "0" ) ;
@@ -1193,8 +1192,7 @@ mod tests {
11931192 // Read it back using organization_id (primary key prefix) for reliable lookup
11941193 let count: u64 = client
11951194 . query ( & format ! (
1196- "SELECT count() FROM eap_items_1_local WHERE organization_id = {}" ,
1197- unique_org_id
1195+ "SELECT count() FROM eap_items_1_local WHERE organization_id = {unique_org_id}"
11981196 ) )
11991197 . fetch_one ( )
12001198 . await
@@ -1208,9 +1206,8 @@ mod tests {
12081206 . query ( & format ! (
12091207 "SELECT organization_id, project_id, item_type, sampling_weight \
12101208 FROM eap_items_1_local \
1211- WHERE organization_id = {} \
1212- LIMIT 1",
1213- unique_org_id
1209+ WHERE organization_id = {unique_org_id} \
1210+ LIMIT 1"
12141211 ) )
12151212 . fetch_one :: < ( u64 , u64 , u8 , u64 ) > ( )
12161213 . await
@@ -1224,8 +1221,7 @@ mod tests {
12241221 // Clean up
12251222 client
12261223 . query ( & format ! (
1227- "ALTER TABLE eap_items_1_local DELETE WHERE organization_id = {}" ,
1228- unique_org_id
1224+ "ALTER TABLE eap_items_1_local DELETE WHERE organization_id = {unique_org_id}"
12291225 ) )
12301226 . execute ( )
12311227 . await
0 commit comments