Skip to content

Commit 5c9c50b

Browse files
committed
metadata: deduplicate pk, ck logic and avoid HashMap
The logic is deduplicated (at least a bit), and HashMap is no longer used - (lightweight) Vec + sorting is a perfect fit for this use case.
1 parent 53c79ce commit 5c9c50b

File tree

1 file changed

+51
-37
lines changed

1 file changed

+51
-37
lines changed

scylla/src/cluster/metadata.rs

Lines changed: 51 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1585,21 +1585,21 @@ async fn query_tables_schema(
15851585
.entry((keyspace_name, table_name))
15861586
.or_insert(Ok((
15871587
HashMap::new(), // columns
1588-
HashMap::new(), // partition key
1589-
HashMap::new(), // clustering key
1588+
Vec::new(), // partition key
1589+
Vec::new(), // clustering key
15901590
)))
15911591
else {
15921592
// This table was previously marked as broken, no way to insert anything.
15931593
return Ok::<_, MetadataError>(());
15941594
};
15951595

15961596
if kind == ColumnKind::PartitionKey || kind == ColumnKind::Clustering {
1597-
let key_map = if kind == ColumnKind::PartitionKey {
1597+
let key_list: &mut Vec<(i32, String)> = if kind == ColumnKind::PartitionKey {
15981598
entry.1.borrow_mut()
15991599
} else {
16001600
entry.2.borrow_mut()
16011601
};
1602-
key_map.insert(position, column_name.clone());
1602+
key_list.push((position, column_name.clone()));
16031603
}
16041604

16051605
entry.0.insert(
@@ -1621,49 +1621,63 @@ async fn query_tables_schema(
16211621
'tables_loop: for ((keyspace_name, table_name), table_result) in tables_schema {
16221622
let keyspace_and_table_name = (keyspace_name, table_name);
16231623

1624-
let (columns, mut partition_key_columns, mut clustering_key_columns) = match table_result {
1624+
#[allow(clippy::type_complexity)]
1625+
let (columns, partition_key_columns, clustering_key_columns): (
1626+
HashMap<String, Column>,
1627+
Vec<(i32, String)>,
1628+
Vec<(i32, String)>,
1629+
) = match table_result {
16251630
Ok(table) => table,
16261631
Err(e) => {
16271632
let _ = result.insert(keyspace_and_table_name, Err(e));
16281633
continue;
16291634
}
16301635
};
16311636

1632-
let mut partition_key = Vec::with_capacity(partition_key_columns.len());
1633-
// unwrap: I don't see the point of handling the scenario of fetching over
1634-
// 2 * 10^9 columns.
1635-
for position in 0..partition_key_columns.len().try_into().unwrap() {
1636-
match partition_key_columns.remove(&position) {
1637-
Some(column_name) => partition_key.push(column_name),
1638-
None => {
1639-
result.insert(
1640-
keyspace_and_table_name,
1641-
Err(SingleKeyspaceMetadataError::IncompletePartitionKey(
1642-
position,
1643-
)),
1644-
);
1645-
continue 'tables_loop;
1646-
}
1647-
}
1637+
fn validate_key_columns(mut key_columns: Vec<(i32, String)>) -> Result<Vec<String>, i32> {
1638+
key_columns.sort_unstable_by_key(|(position, _)| *position);
1639+
1640+
key_columns
1641+
.into_iter()
1642+
.enumerate()
1643+
.map(|(idx, (position, column_name))| {
1644+
// unwrap: I don't see the point of handling the scenario of fetching over
1645+
// 2 * 10^9 columns.
1646+
let idx: i32 = idx.try_into().unwrap();
1647+
if idx == position {
1648+
Ok(column_name)
1649+
} else {
1650+
Err(idx)
1651+
}
1652+
})
1653+
.collect::<Result<Vec<_>, _>>()
16481654
}
16491655

1650-
let mut clustering_key = Vec::with_capacity(clustering_key_columns.len());
1651-
// unwrap: I don't see the point of handling the scenario of fetching over
1652-
// 2 * 10^9 columns.
1653-
for position in 0..clustering_key_columns.len().try_into().unwrap() {
1654-
match clustering_key_columns.remove(&position) {
1655-
Some(column_name) => clustering_key.push(column_name),
1656-
None => {
1657-
result.insert(
1658-
keyspace_and_table_name,
1659-
Err(SingleKeyspaceMetadataError::IncompleteClusteringKey(
1660-
position,
1661-
)),
1662-
);
1663-
continue 'tables_loop;
1664-
}
1656+
let partition_key = match validate_key_columns(partition_key_columns) {
1657+
Ok(partition_key_columns) => partition_key_columns,
1658+
Err(position) => {
1659+
result.insert(
1660+
keyspace_and_table_name,
1661+
Err(SingleKeyspaceMetadataError::IncompletePartitionKey(
1662+
position,
1663+
)),
1664+
);
1665+
continue 'tables_loop;
16651666
}
1666-
}
1667+
};
1668+
1669+
let clustering_key = match validate_key_columns(clustering_key_columns) {
1670+
Ok(clustering_key_columns) => clustering_key_columns,
1671+
Err(position) => {
1672+
result.insert(
1673+
keyspace_and_table_name,
1674+
Err(SingleKeyspaceMetadataError::IncompleteClusteringKey(
1675+
position,
1676+
)),
1677+
);
1678+
continue 'tables_loop;
1679+
}
1680+
};
16671681

16681682
let partitioner = all_partitioners
16691683
.remove(&keyspace_and_table_name)

0 commit comments

Comments
 (0)