From a5152ceed91cff83d73aec546588fdd69438e30e Mon Sep 17 00:00:00 2001 From: olegvallas Date: Mon, 17 Nov 2025 06:58:03 +0100 Subject: [PATCH 1/5] Fix depth balance decompression --- crypto/vm/boc-compression.cpp | 316 ++++++++++++++++++++++++++++++++-- 1 file changed, 298 insertions(+), 18 deletions(-) diff --git a/crypto/vm/boc-compression.cpp b/crypto/vm/boc-compression.cpp index 41095efde..4070f49ab 100644 --- a/crypto/vm/boc-compression.cpp +++ b/crypto/vm/boc-compression.cpp @@ -18,6 +18,7 @@ */ #include #include +#include #include "td/utils/Slice-decl.h" #include "td/utils/lz4.h" @@ -27,6 +28,9 @@ #include "vm/cellslice.h" #include "boc-compression.h" +#include "common/refint.h" +#include "crypto/block/block-auto.h" +#include "crypto/block/block-parse.h" namespace vm { @@ -77,7 +81,88 @@ inline td::Result read_uint(td::BitSlice& bs, int bits) { return result; } +// Decode DepthBalanceInfo and extract grams using TLB methods +td::RefInt256 extract_balance_from_depth_balance_info(vm::CellSlice& cs) { + int split_depth; + Ref balance_cs_ref; + if (!block::gen::t_DepthBalanceInfo.unpack_depth_balance(cs, split_depth, balance_cs_ref)) { + return td::RefInt256{}; + } + if (split_depth != 0) { + return td::RefInt256{}; + } + if (!cs.empty()) { + return td::RefInt256{}; + } + auto balance_cs = balance_cs_ref.write(); + auto res = block::tlb::t_Grams.as_integer_skip(balance_cs); + if (balance_cs.size() != 1) { + return td::RefInt256{}; + } + int last_bit = balance_cs.fetch_ulong(1); + if (last_bit != 0) { + return td::RefInt256{}; + } + return res; +} + +// Skip Hashmap label for ShardAccounts (expecting '00' for empty label at root) +bool skip_hashmap_label(vm::CellSlice& cs, int /*max_bits*/) { + int k = cs.fetch_ulong(2); + if (k != 0) { + return false; + } + return true; +} + +// Process ShardAccounts vertex and compute balance difference (right - left) +td::RefInt256 process_shard_accounts_vertex(vm::CellSlice& cs_left, vm::CellSlice& cs_right) { + if (skip_hashmap_label(cs_left, 256) && skip_hashmap_label(cs_right, 256)) { + auto balance_left = extract_balance_from_depth_balance_info(cs_left); + auto balance_right = extract_balance_from_depth_balance_info(cs_right); + if (balance_left.not_null() && balance_right.not_null()) { + td::RefInt256 diff = balance_right; + diff -= balance_left; + return diff; + } + } + return td::RefInt256{}; +} + +// Rebuild the ShardAccounts vertex from left + children diffs and compare with right +bool reconstruct_shard_vertex_and_compare(vm::CellSlice cs_left, vm::CellSlice cs_right, const td::RefInt256& sum_child_diff) { + if (!skip_hashmap_label(cs_left, 256)) { + return false; + } + auto left_grams = extract_balance_from_depth_balance_info(cs_left); + if (left_grams.is_null()) { + return false; + } + td::RefInt256 expected_right_grams = left_grams; + expected_right_grams += sum_child_diff; + + vm::CellBuilder cb; + if (!cb.store_zeroes_bool(2)) { + return false; + } + if (!cb.store_zeroes_bool(5)) { + return false; + } + if (!block::tlb::t_CurrencyCollection.pack_special(cb, expected_right_grams, td::Ref())) { + return false; + } + td::Ref built_cell; + try { + built_cell = cb.finalize(false); + } catch (vm::CellBuilder::CellWriteError&) { + return false; + } + vm::CellSlice built_cs(NoVm(), built_cell); + return built_cs.as_bitslice().to_hex() == cs_right.as_bitslice().to_hex(); +} + td::Result boc_compress_improved_structure_lz4(const std::vector>& boc_roots) { + const bool kMURemoveSubtreeSums = true; // Input validation if (boc_roots.empty()) { return td::Status::Error("No root cells were provided for serialization"); @@ -99,7 +184,12 @@ td::Result boc_compress_improved_structure_lz4(const std::vecto size_t total_size_estimate = 0; // Build graph representation using recursive lambda - const auto build_graph = [&](auto&& self, td::Ref cell) -> td::Result { + // When traversing RIGHT side of a MerkleUpdate, pass corresponding left_cell and non-null sum_diff_out + const auto build_graph = [&](auto&& self, + td::Ref cell, + td::Ref left_cell = td::Ref(), + bool under_mu_right = false, + td::RefInt256* sum_diff_out = nullptr) -> td::Result { if (cell.is_null()) { return td::Status::Error("Error while importing a cell during serialization: cell is null"); } @@ -139,9 +229,51 @@ td::Result boc_compress_improved_structure_lz4(const std::vecto total_size_estimate += cell_bitslice.size(); // Process cell references - for (int i = 0; i < cell_slice.size_refs(); ++i) { - TRY_RESULT(child_id, self(self, cell_slice.prefetch_ref(i))); - boc_graph[current_cell_id][i] = child_id; + if (kMURemoveSubtreeSums && cell_slice.special_type() == vm::CellTraits::SpecialType::MerkleUpdate) { + // Left branch: traverse normally + TRY_RESULT(child_left_id, self(self, cell_slice.prefetch_ref(0))); + boc_graph[current_cell_id][0] = child_left_id; + // Right branch: traverse paired with left and compute diffs inline + td::RefInt256 right_sum_diff = td::make_refint(0); + TRY_RESULT(child_right_id, self(self, + cell_slice.prefetch_ref(1), + cell_slice.prefetch_ref(0), + true, + &right_sum_diff)); + boc_graph[current_cell_id][1] = child_right_id; + // Any extra refs if present + for (int i = 2; i < cell_slice.size_refs(); ++i) { + TRY_RESULT(child_id, self(self, cell_slice.prefetch_ref(i))); + boc_graph[current_cell_id][i] = child_id; + } + } else if (under_mu_right && left_cell.not_null()) { + // Inline computation for RIGHT subtree nodes under MerkleUpdate + vm::CellSlice cs_left(NoVm(), left_cell); + td::RefInt256 sum_child_diff = td::make_refint(0); + // Recurse children first + for (int i = 0; i < cell_slice.size_refs(); ++i) { + TRY_RESULT(child_id, self(self, + cell_slice.prefetch_ref(i), + cs_left.prefetch_ref(i), + true, + &sum_child_diff)); + boc_graph[current_cell_id][i] = child_id; + } + + // Compute this vertex diff and check skippable condition + td::RefInt256 vertex_diff = process_shard_accounts_vertex(cs_left, cell_slice); + if (!is_special && vertex_diff.not_null() && sum_child_diff.not_null() && cmp(sum_child_diff, vertex_diff) == 0) { + cell_data[current_cell_id] = td::BitSlice(); + prunned_branch_level[current_cell_id] = 9; + } + if (sum_diff_out && vertex_diff.not_null()) { + *sum_diff_out += vertex_diff; + } + } else { + for (int i = 0; i < cell_slice.size_refs(); ++i) { + TRY_RESULT(child_id, self(self, cell_slice.prefetch_ref(i))); + boc_graph[current_cell_id][i] = child_id; + } } return current_cell_id; @@ -241,11 +373,11 @@ td::Result boc_compress_improved_structure_lz4(const std::vecto // Store cell types and sizes for (int i = 0; i < node_count; ++i) { size_t node = topo_order[i]; - size_t currrent_cell_type = bool(cell_type[node]) + prunned_branch_level[node]; - append_uint(result, currrent_cell_type, 4); + size_t current_cell_type = bool(cell_type[node]) + prunned_branch_level[node]; + append_uint(result, current_cell_type, 4); append_uint(result, refs_cnt[node], 4); - if (cell_type[node] != 1) { + if (cell_type[node] != 1 && current_cell_type != 9) { if (is_data_small[node]) { append_uint(result, 1, 1); append_uint(result, cell_data[node].size(), 7); @@ -404,7 +536,7 @@ td::Result>> boc_decompress_improved_structure_lz4 // Initialize data structures std::vector cell_data_length(node_count), is_data_small(node_count); - std::vector is_special(node_count), cell_refs_cnt(node_count); + std::vector is_special(node_count), cell_refs_cnt(node_count), is_depth_balance(node_count); std::vector prunned_branch_level(node_count, 0); std::vector cell_builders(node_count); @@ -418,7 +550,11 @@ td::Result>> boc_decompress_improved_structure_lz4 } size_t cell_type = bit_reader.bits().get_uint(4); - is_special[i] = bool(cell_type); + is_special[i] = (cell_type == 9 ? false : bool(cell_type)); + is_depth_balance[i] = cell_type == 9; + if (is_depth_balance[i]) { + std::cerr << "NODE " << i << " is depth balance" << std::endl; + } if (is_special[i]) { prunned_branch_level[i] = cell_type - 1; } @@ -429,8 +565,9 @@ td::Result>> boc_decompress_improved_structure_lz4 if (cell_refs_cnt[i] > 4) { return td::Status::Error("BOC decompression failed: invalid cell refs count"); } - - if (prunned_branch_level[i]) { + if (is_depth_balance[i]) { + cell_data_length[i] = 0; + } else if (prunned_branch_level[i]) { size_t coef = std::bitset<4>(prunned_branch_level[i]).count(); cell_data_length[i] = (256 + 16) * coef; } else { @@ -470,6 +607,9 @@ td::Result>> boc_decompress_improved_structure_lz4 // Read initial cell data for (int i = 0; i < node_count; ++i) { + if (is_depth_balance[i]) { + continue; + } if (prunned_branch_level[i]) { cell_builders[i].store_long((1 << 8) + prunned_branch_level[i], 16); } @@ -538,6 +678,9 @@ td::Result>> boc_decompress_improved_structure_lz4 // Read remaining cell data for (int i = 0; i < node_count; ++i) { + if (is_depth_balance[i]) { + continue; + } size_t padding_bits = 0; if (!prunned_branch_level[i] && !is_data_small[i]) { while (bit_reader.size() > 0 && bit_reader.bits()[0] == 0) { @@ -561,20 +704,157 @@ td::Result>> boc_decompress_improved_structure_lz4 // Build cell tree std::vector> nodes(node_count); - for (int i = node_count - 1; i >= 0; --i) { + + // Helper: write ShardAccounts augmentation (DepthBalanceInfo with grams) into builder + auto write_depth_balance_grams = [&](vm::CellBuilder& cb, const td::RefInt256& grams) -> bool { + if (!cb.store_zeroes_bool(2)) { // empty HmLabel + return false; + } + if (!cb.store_zeroes_bool(5)) { // split_depth = 0 (<= 30) + return false; + } + if (!block::tlb::t_CurrencyCollection.pack_special(cb, grams, td::Ref())) { + return false; + } + return true; + }; + + // Helper: detect MerkleUpdate (is_special AND first byte == 0x04) without finalizing + auto is_merkle_update_node = [&](size_t idx) -> bool { + if (!is_special[idx]) { + return false; + } + // Need at least one full byte in data to read the tag + if (cell_builders[idx].get_bits() < 8) { + return false; + } + unsigned first_byte = cell_builders[idx].get_data()[0]; + return first_byte == 0x04; + }; + + // Recursively build RIGHT subtree under MerkleUpdate, pairing with LEFT subtree, computing sum diffs. + // Note: left_idx can be SIZE_MAX (as -1 sentinel) when the left subtree has no corresponding child. + // Sum is accumulated into sum_diff_out (if non-null), similar to compression flow. + std::function build_right_under_mu = + [&](size_t right_idx, size_t left_idx, td::RefInt256* sum_diff_out) -> void { + std::cerr << "BUILDINGG NODE " << right_idx << " is_depth_balance: " << is_depth_balance[right_idx] << " length: " << cell_data_length[right_idx] << " prunned_branch_level: " << prunned_branch_level[right_idx] << std::endl; + if (right_idx == 1259) { + std::cerr << "RIGHT INDEX IS " << right_idx << std::endl; + } + // Build children first + td::RefInt256 sum_child_diff = td::make_refint(0); + for (int j = 0; j < cell_refs_cnt[right_idx]; ++j) { + size_t right_child = boc_graph[right_idx][j]; + size_t left_child = (left_idx != std::numeric_limits::max() && j < cell_refs_cnt[left_idx]) + ? boc_graph[left_idx][j] + : std::numeric_limits::max(); + build_right_under_mu(right_child, left_child, &sum_child_diff); + } + + // If this vertex was depth-balance-compressed, reconstruct its data from left + children sum + if (is_depth_balance[right_idx] && left_idx != std::numeric_limits::max()) { + // Extract left grams + vm::CellSlice cs_left(NoVm(), nodes[left_idx]); + td::RefInt256 left_grams = extract_balance_from_depth_balance_info(cs_left); + if (left_grams.not_null()) { + td::RefInt256 expected_right_grams = left_grams; + expected_right_grams += sum_child_diff; + // Write reconstructed augmentation into builder + if (!write_depth_balance_grams(cell_builders[right_idx], expected_right_grams)) { + std::cerr << "BOC decompression failed: failed to write depth-balance grams" << std::endl; + exit(1); + } + } else { + std::cerr << "BOC decompression failed: depth-balance left vertex has no grams" << std::endl; + exit(1); + } + } + + // Store children refs and finalize this right node + try { + for (int j = 0; j < cell_refs_cnt[right_idx]; ++j) { + cell_builders[right_idx].store_ref(nodes[boc_graph[right_idx][j]]); + } + try { + nodes[right_idx] = cell_builders[right_idx].finalize(is_special[right_idx]); + } catch (vm::CellBuilder::CellWriteError& e) { + std::cerr << "BOC decompression failed: failed to finalize right vertex" << std::endl; + exit(228); + } + } catch (vm::VmError& e) { + std::cerr << "BOC decompression failed: failed to finalize right vertex" << std::endl; + exit(228); + } + + // Compute this vertex diff (right - left) to propagate upward + if (left_idx != std::numeric_limits::max()) { + vm::CellSlice cs_left(NoVm(), nodes[left_idx]); + vm::CellSlice cs_right(NoVm(), nodes[right_idx]); + td::RefInt256 vertex_diff = process_shard_accounts_vertex(cs_left, cs_right); + if (sum_diff_out && vertex_diff.not_null()) { + *sum_diff_out += vertex_diff; + } + } + }; + + // General recursive build that handles MerkleUpdate by pairing left/right subtrees + std::function build_node = [&](size_t idx) { + if (nodes[idx].not_null()) { + return; + } + // If this node is a MerkleUpdate (special and first byte == 0x04), + // build left subtree normally first, then right subtree paired with left + if (cell_refs_cnt[idx] >= 2) { + size_t left_idx = boc_graph[idx][0]; + size_t right_idx = boc_graph[idx][1]; + if (is_merkle_update_node(idx)) { + std::cerr << "FOUND MU NODE" << std::endl; + // Build left subtree + build_node(left_idx); + std::cerr << "BUILT LEFT SUBTREE" << std::endl; + // Build right subtree under MU pairing with left + td::RefInt256 right_sum_diff = td::make_refint(0); + build_right_under_mu(right_idx, left_idx, &right_sum_diff); + std::cerr << "BUILT RIGHT SUBTREE" << std::endl; + // Finalize current MU node with built children + try { + for (int j = 0; j < cell_refs_cnt[idx]; ++j) { + cell_builders[idx].store_ref(nodes[boc_graph[idx][j]]); + } + try { + nodes[idx] = cell_builders[idx].finalize(is_special[idx]); + } catch (vm::CellBuilder::CellWriteError& e) { + // propagate by leaving null; caller will error later if needed + return; + } + } catch (vm::VmError& e) { + return; + } + return; + } + } + // Default: build children normally then finalize + for (int j = 0; j < cell_refs_cnt[idx]; ++j) { + build_node(boc_graph[idx][j]); + } try { - for (int child_index = 0; child_index < cell_refs_cnt[i]; ++child_index) { - size_t child = boc_graph[i][child_index]; - cell_builders[i].store_ref(nodes[child]); + for (int j = 0; j < cell_refs_cnt[idx]; ++j) { + cell_builders[idx].store_ref(nodes[boc_graph[idx][j]]); } try { - nodes[i] = cell_builders[i].finalize(is_special[i]); + nodes[idx] = cell_builders[idx].finalize(is_special[idx]); } catch (vm::CellBuilder::CellWriteError& e) { - return td::Status::Error("BOC decompression failed: write error while finalizing cell."); + return; } } catch (vm::VmError& e) { - return td::Status::Error("BOC decompression failed: VM error during cell construction"); + return; } + }; + + std::cerr << "Building nodes from roots" << std::endl; + // Build from roots using DFS + for (size_t index : root_indexes) { + build_node(index); } std::vector> root_nodes; From 76ababb91451dadd563917415ce7bc5a94f7ec23 Mon Sep 17 00:00:00 2001 From: olegvallas Date: Mon, 17 Nov 2025 12:47:40 +0100 Subject: [PATCH 2/5] [compression-depth-balance] Bug fix and refactor depth balance decompression --- crypto/vm/boc-compression.cpp | 86 ++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/crypto/vm/boc-compression.cpp b/crypto/vm/boc-compression.cpp index 4070f49ab..19bd30d91 100644 --- a/crypto/vm/boc-compression.cpp +++ b/crypto/vm/boc-compression.cpp @@ -401,6 +401,9 @@ td::Result boc_compress_improved_structure_lz4(const std::vecto // Store cell data for (size_t node : topo_order) { + if (prunned_branch_level[node] == 9) { + continue; + } if (cell_type[node] != 1 && !is_data_small[node]) { continue; } @@ -440,6 +443,9 @@ td::Result boc_compress_improved_structure_lz4(const std::vecto // Store remaining cell data for (size_t node : topo_order) { + if (prunned_branch_level[node] == 9) { + continue; + } if (cell_type[node] == 1 || is_data_small[node]) { size_t prefix_size = cell_data[node].size() % 8; result.append(cell_data[node].subslice(prefix_size, cell_data[node].size() - prefix_size)); @@ -552,9 +558,6 @@ td::Result>> boc_decompress_improved_structure_lz4 size_t cell_type = bit_reader.bits().get_uint(4); is_special[i] = (cell_type == 9 ? false : bool(cell_type)); is_depth_balance[i] = cell_type == 9; - if (is_depth_balance[i]) { - std::cerr << "NODE " << i << " is depth balance" << std::endl; - } if (is_special[i]) { prunned_branch_level[i] = cell_type - 1; } @@ -735,11 +738,18 @@ td::Result>> boc_decompress_improved_structure_lz4 // Recursively build RIGHT subtree under MerkleUpdate, pairing with LEFT subtree, computing sum diffs. // Note: left_idx can be SIZE_MAX (as -1 sentinel) when the left subtree has no corresponding child. // Sum is accumulated into sum_diff_out (if non-null), similar to compression flow. - std::function build_right_under_mu = - [&](size_t right_idx, size_t left_idx, td::RefInt256* sum_diff_out) -> void { - std::cerr << "BUILDINGG NODE " << right_idx << " is_depth_balance: " << is_depth_balance[right_idx] << " length: " << cell_data_length[right_idx] << " prunned_branch_level: " << prunned_branch_level[right_idx] << std::endl; - if (right_idx == 1259) { - std::cerr << "RIGHT INDEX IS " << right_idx << std::endl; + std::function build_right_under_mu = + [&](size_t right_idx, size_t left_idx, td::RefInt256* sum_diff_out) -> td::Status { + if (nodes[right_idx].not_null()) { + if (left_idx != std::numeric_limits::max() && sum_diff_out) { + vm::CellSlice cs_left(NoVm(), nodes[left_idx]); + vm::CellSlice cs_right(NoVm(), nodes[right_idx]); + td::RefInt256 vertex_diff = process_shard_accounts_vertex(cs_left, cs_right); + if (vertex_diff.not_null()) { + *sum_diff_out += vertex_diff; + } + } + return td::Status::OK(); } // Build children first td::RefInt256 sum_child_diff = td::make_refint(0); @@ -748,25 +758,24 @@ td::Result>> boc_decompress_improved_structure_lz4 size_t left_child = (left_idx != std::numeric_limits::max() && j < cell_refs_cnt[left_idx]) ? boc_graph[left_idx][j] : std::numeric_limits::max(); - build_right_under_mu(right_child, left_child, &sum_child_diff); + TRY_STATUS(build_right_under_mu(right_child, left_child, &sum_child_diff)); } - // If this vertex was depth-balance-compressed, reconstruct its data from left + children sum if (is_depth_balance[right_idx] && left_idx != std::numeric_limits::max()) { // Extract left grams vm::CellSlice cs_left(NoVm(), nodes[left_idx]); + if (!skip_hashmap_label(cs_left, 256)) { + return td::Status::Error("BOC decompression failed: failed to skip hashmap label"); + } td::RefInt256 left_grams = extract_balance_from_depth_balance_info(cs_left); - if (left_grams.not_null()) { - td::RefInt256 expected_right_grams = left_grams; - expected_right_grams += sum_child_diff; - // Write reconstructed augmentation into builder - if (!write_depth_balance_grams(cell_builders[right_idx], expected_right_grams)) { - std::cerr << "BOC decompression failed: failed to write depth-balance grams" << std::endl; - exit(1); - } - } else { - std::cerr << "BOC decompression failed: depth-balance left vertex has no grams" << std::endl; - exit(1); + if (left_grams.is_null()) { + return td::Status::Error("BOC decompression failed: depth-balance left vertex has no grams"); + } + td::RefInt256 expected_right_grams = left_grams; + expected_right_grams += sum_child_diff; + // Write reconstructed augmentation into builder + if (!write_depth_balance_grams(cell_builders[right_idx], expected_right_grams)) { + return td::Status::Error("BOC decompression failed: failed to write depth-balance grams"); } } @@ -778,12 +787,10 @@ td::Result>> boc_decompress_improved_structure_lz4 try { nodes[right_idx] = cell_builders[right_idx].finalize(is_special[right_idx]); } catch (vm::CellBuilder::CellWriteError& e) { - std::cerr << "BOC decompression failed: failed to finalize right vertex" << std::endl; - exit(228); + return td::Status::Error("BOC decompression failed: failed to finalize right vertex (CellWriteError)"); } } catch (vm::VmError& e) { - std::cerr << "BOC decompression failed: failed to finalize right vertex" << std::endl; - exit(228); + return td::Status::Error("BOC decompression failed: failed to finalize right vertex (VmError)"); } // Compute this vertex diff (right - left) to propagate upward @@ -795,12 +802,13 @@ td::Result>> boc_decompress_improved_structure_lz4 *sum_diff_out += vertex_diff; } } + return td::Status::OK(); }; // General recursive build that handles MerkleUpdate by pairing left/right subtrees - std::function build_node = [&](size_t idx) { + std::function build_node = [&](size_t idx) -> td::Status { if (nodes[idx].not_null()) { - return; + return td::Status::OK(); } // If this node is a MerkleUpdate (special and first byte == 0x04), // build left subtree normally first, then right subtree paired with left @@ -808,14 +816,11 @@ td::Result>> boc_decompress_improved_structure_lz4 size_t left_idx = boc_graph[idx][0]; size_t right_idx = boc_graph[idx][1]; if (is_merkle_update_node(idx)) { - std::cerr << "FOUND MU NODE" << std::endl; // Build left subtree - build_node(left_idx); - std::cerr << "BUILT LEFT SUBTREE" << std::endl; + TRY_STATUS(build_node(left_idx)); // Build right subtree under MU pairing with left td::RefInt256 right_sum_diff = td::make_refint(0); - build_right_under_mu(right_idx, left_idx, &right_sum_diff); - std::cerr << "BUILT RIGHT SUBTREE" << std::endl; + TRY_STATUS(build_right_under_mu(right_idx, left_idx, &right_sum_diff)); // Finalize current MU node with built children try { for (int j = 0; j < cell_refs_cnt[idx]; ++j) { @@ -824,18 +829,17 @@ td::Result>> boc_decompress_improved_structure_lz4 try { nodes[idx] = cell_builders[idx].finalize(is_special[idx]); } catch (vm::CellBuilder::CellWriteError& e) { - // propagate by leaving null; caller will error later if needed - return; + return td::Status::Error("BOC decompression failed: failed to finalize MU node (CellWriteError)"); } } catch (vm::VmError& e) { - return; + return td::Status::Error("BOC decompression failed: failed to finalize MU node (VmError)"); } - return; + return td::Status::OK(); } } // Default: build children normally then finalize for (int j = 0; j < cell_refs_cnt[idx]; ++j) { - build_node(boc_graph[idx][j]); + TRY_STATUS(build_node(boc_graph[idx][j])); } try { for (int j = 0; j < cell_refs_cnt[idx]; ++j) { @@ -844,17 +848,17 @@ td::Result>> boc_decompress_improved_structure_lz4 try { nodes[idx] = cell_builders[idx].finalize(is_special[idx]); } catch (vm::CellBuilder::CellWriteError& e) { - return; + return td::Status::Error("BOC decompression failed: failed to finalize node (CellWriteError)"); } } catch (vm::VmError& e) { - return; + return td::Status::Error("BOC decompression failed: failed to finalize node (VmError)"); } + return td::Status::OK(); }; - std::cerr << "Building nodes from roots" << std::endl; // Build from roots using DFS for (size_t index : root_indexes) { - build_node(index); + TRY_STATUS(build_node(index)); } std::vector> root_nodes; From 88a23bc358cfb383d5da6b27c4f0ef9c3841ceb2 Mon Sep 17 00:00:00 2001 From: olegvallas Date: Thu, 20 Nov 2025 13:17:39 +0100 Subject: [PATCH 3/5] [compression-depth-balance] Refactor balance parsing, change CMakeLists to avoid cyclic dependencies with block-auto.h --- crypto/CMakeLists.txt | 2 +- crypto/vm/boc-compression.cpp | 78 ++++++----------------------------- 2 files changed, 14 insertions(+), 66 deletions(-) diff --git a/crypto/CMakeLists.txt b/crypto/CMakeLists.txt index 1051f0d18..fb42810f4 100644 --- a/crypto/CMakeLists.txt +++ b/crypto/CMakeLists.txt @@ -10,7 +10,6 @@ set(TON_CRYPTO_CORE_SOURCE openssl/residue.cpp openssl/rand.cpp vm/boc.cpp - vm/boc-compression.cpp vm/large-boc-serializer.cpp tl/tlblib.cpp @@ -82,6 +81,7 @@ set(TON_CRYPTO_CORE_SOURCE ellcurve/p256.cpp) set(TON_CRYPTO_SOURCE + vm/boc-compression.cpp vm/stack.cpp vm/atom.cpp vm/continuation.cpp diff --git a/crypto/vm/boc-compression.cpp b/crypto/vm/boc-compression.cpp index 19bd30d91..65b08b671 100644 --- a/crypto/vm/boc-compression.cpp +++ b/crypto/vm/boc-compression.cpp @@ -83,6 +83,11 @@ inline td::Result read_uint(td::BitSlice& bs, int bits) { // Decode DepthBalanceInfo and extract grams using TLB methods td::RefInt256 extract_balance_from_depth_balance_info(vm::CellSlice& cs) { + // Check hashmap label is empty ('00') + if (cs.size() < 2 || cs.fetch_ulong(2) != 0) { + return td::RefInt256{}; + } + int split_depth; Ref balance_cs_ref; if (!block::gen::t_DepthBalanceInfo.unpack_depth_balance(cs, split_depth, balance_cs_ref)) { @@ -96,71 +101,24 @@ td::RefInt256 extract_balance_from_depth_balance_info(vm::CellSlice& cs) { } auto balance_cs = balance_cs_ref.write(); auto res = block::tlb::t_Grams.as_integer_skip(balance_cs); - if (balance_cs.size() != 1) { - return td::RefInt256{}; - } - int last_bit = balance_cs.fetch_ulong(1); - if (last_bit != 0) { + if (balance_cs.size() != 1 || balance_cs.fetch_ulong(1) != 0) { return td::RefInt256{}; } return res; } -// Skip Hashmap label for ShardAccounts (expecting '00' for empty label at root) -bool skip_hashmap_label(vm::CellSlice& cs, int /*max_bits*/) { - int k = cs.fetch_ulong(2); - if (k != 0) { - return false; - } - return true; -} - // Process ShardAccounts vertex and compute balance difference (right - left) td::RefInt256 process_shard_accounts_vertex(vm::CellSlice& cs_left, vm::CellSlice& cs_right) { - if (skip_hashmap_label(cs_left, 256) && skip_hashmap_label(cs_right, 256)) { - auto balance_left = extract_balance_from_depth_balance_info(cs_left); - auto balance_right = extract_balance_from_depth_balance_info(cs_right); - if (balance_left.not_null() && balance_right.not_null()) { - td::RefInt256 diff = balance_right; - diff -= balance_left; - return diff; - } + auto balance_left = extract_balance_from_depth_balance_info(cs_left); + auto balance_right = extract_balance_from_depth_balance_info(cs_right); + if (balance_left.not_null() && balance_right.not_null()) { + td::RefInt256 diff = balance_right; + diff -= balance_left; + return diff; } return td::RefInt256{}; } -// Rebuild the ShardAccounts vertex from left + children diffs and compare with right -bool reconstruct_shard_vertex_and_compare(vm::CellSlice cs_left, vm::CellSlice cs_right, const td::RefInt256& sum_child_diff) { - if (!skip_hashmap_label(cs_left, 256)) { - return false; - } - auto left_grams = extract_balance_from_depth_balance_info(cs_left); - if (left_grams.is_null()) { - return false; - } - td::RefInt256 expected_right_grams = left_grams; - expected_right_grams += sum_child_diff; - - vm::CellBuilder cb; - if (!cb.store_zeroes_bool(2)) { - return false; - } - if (!cb.store_zeroes_bool(5)) { - return false; - } - if (!block::tlb::t_CurrencyCollection.pack_special(cb, expected_right_grams, td::Ref())) { - return false; - } - td::Ref built_cell; - try { - built_cell = cb.finalize(false); - } catch (vm::CellBuilder::CellWriteError&) { - return false; - } - vm::CellSlice built_cs(NoVm(), built_cell); - return built_cs.as_bitslice().to_hex() == cs_right.as_bitslice().to_hex(); -} - td::Result boc_compress_improved_structure_lz4(const std::vector>& boc_roots) { const bool kMURemoveSubtreeSums = true; // Input validation @@ -234,18 +192,11 @@ td::Result boc_compress_improved_structure_lz4(const std::vecto TRY_RESULT(child_left_id, self(self, cell_slice.prefetch_ref(0))); boc_graph[current_cell_id][0] = child_left_id; // Right branch: traverse paired with left and compute diffs inline - td::RefInt256 right_sum_diff = td::make_refint(0); TRY_RESULT(child_right_id, self(self, cell_slice.prefetch_ref(1), cell_slice.prefetch_ref(0), - true, - &right_sum_diff)); + true)); boc_graph[current_cell_id][1] = child_right_id; - // Any extra refs if present - for (int i = 2; i < cell_slice.size_refs(); ++i) { - TRY_RESULT(child_id, self(self, cell_slice.prefetch_ref(i))); - boc_graph[current_cell_id][i] = child_id; - } } else if (under_mu_right && left_cell.not_null()) { // Inline computation for RIGHT subtree nodes under MerkleUpdate vm::CellSlice cs_left(NoVm(), left_cell); @@ -764,9 +715,6 @@ td::Result>> boc_decompress_improved_structure_lz4 if (is_depth_balance[right_idx] && left_idx != std::numeric_limits::max()) { // Extract left grams vm::CellSlice cs_left(NoVm(), nodes[left_idx]); - if (!skip_hashmap_label(cs_left, 256)) { - return td::Status::Error("BOC decompression failed: failed to skip hashmap label"); - } td::RefInt256 left_grams = extract_balance_from_depth_balance_info(cs_left); if (left_grams.is_null()) { return td::Status::Error("BOC decompression failed: depth-balance left vertex has no grams"); From 32189f154d7b50a853b7ee4de6b9a6626ced4517 Mon Sep 17 00:00:00 2001 From: olegvallas Date: Thu, 20 Nov 2025 13:54:45 +0100 Subject: [PATCH 4/5] [compression-depth-balance] Optimize decompression --- crypto/vm/boc-compression.cpp | 263 +++++++++++++++------------------- 1 file changed, 119 insertions(+), 144 deletions(-) diff --git a/crypto/vm/boc-compression.cpp b/crypto/vm/boc-compression.cpp index 65b08b671..5ebe27cfc 100644 --- a/crypto/vm/boc-compression.cpp +++ b/crypto/vm/boc-compression.cpp @@ -656,168 +656,143 @@ td::Result>> boc_decompress_improved_structure_lz4 bit_reader.advance(remaining_data_bits); } - // Build cell tree - std::vector> nodes(node_count); + // Build cell tree + std::vector> nodes(node_count); - // Helper: write ShardAccounts augmentation (DepthBalanceInfo with grams) into builder - auto write_depth_balance_grams = [&](vm::CellBuilder& cb, const td::RefInt256& grams) -> bool { - if (!cb.store_zeroes_bool(2)) { // empty HmLabel - return false; - } - if (!cb.store_zeroes_bool(5)) { // split_depth = 0 (<= 30) - return false; - } - if (!block::tlb::t_CurrencyCollection.pack_special(cb, grams, td::Ref())) { - return false; - } - return true; - }; - - // Helper: detect MerkleUpdate (is_special AND first byte == 0x04) without finalizing - auto is_merkle_update_node = [&](size_t idx) -> bool { - if (!is_special[idx]) { - return false; - } - // Need at least one full byte in data to read the tag - if (cell_builders[idx].get_bits() < 8) { - return false; - } - unsigned first_byte = cell_builders[idx].get_data()[0]; - return first_byte == 0x04; - }; - - // Recursively build RIGHT subtree under MerkleUpdate, pairing with LEFT subtree, computing sum diffs. - // Note: left_idx can be SIZE_MAX (as -1 sentinel) when the left subtree has no corresponding child. - // Sum is accumulated into sum_diff_out (if non-null), similar to compression flow. - std::function build_right_under_mu = - [&](size_t right_idx, size_t left_idx, td::RefInt256* sum_diff_out) -> td::Status { - if (nodes[right_idx].not_null()) { - if (left_idx != std::numeric_limits::max() && sum_diff_out) { - vm::CellSlice cs_left(NoVm(), nodes[left_idx]); - vm::CellSlice cs_right(NoVm(), nodes[right_idx]); - td::RefInt256 vertex_diff = process_shard_accounts_vertex(cs_left, cs_right); - if (vertex_diff.not_null()) { - *sum_diff_out += vertex_diff; + // Helper: write ShardAccounts augmentation (DepthBalanceInfo with grams) into builder + auto write_depth_balance_grams = [&](vm::CellBuilder& cb, const td::RefInt256& grams) -> bool { + if (!cb.store_zeroes_bool(7)) { // empty HmLabel and split_depth + return false; + } + if (!block::tlb::t_CurrencyCollection.pack_special(cb, grams, td::Ref())) { + return false; + } + return true; + }; + + // Helper: detect MerkleUpdate (is_special AND first byte == 0x04) without finalizing + auto is_merkle_update_node = [&](size_t idx) -> bool { + if (!is_special[idx]) { + return false; + } + // Need at least one full byte in data to read the tag + if (cell_builders[idx].get_bits() < 8) { + return false; + } + unsigned first_byte = cell_builders[idx].get_data()[0]; + return first_byte == 0x04; + }; + + // Helper: finalize a node by storing refs and finalizing the builder + auto finalize_node = [&](size_t idx) -> td::Status { + try { + for (int j = 0; j < cell_refs_cnt[idx]; ++j) { + cell_builders[idx].store_ref(nodes[boc_graph[idx][j]]); } + try { + nodes[idx] = cell_builders[idx].finalize(is_special[idx]); + } catch (vm::CellBuilder::CellWriteError& e) { + return td::Status::Error(PSTRING() << "BOC decompression failed: failed to finalize node (CellWriteError)"); + } + } catch (vm::VmError& e) { + return td::Status::Error(PSTRING() << "BOC decompression failed: failed to finalize node (VmError)"); } return td::Status::OK(); - } - // Build children first - td::RefInt256 sum_child_diff = td::make_refint(0); - for (int j = 0; j < cell_refs_cnt[right_idx]; ++j) { - size_t right_child = boc_graph[right_idx][j]; - size_t left_child = (left_idx != std::numeric_limits::max() && j < cell_refs_cnt[left_idx]) - ? boc_graph[left_idx][j] - : std::numeric_limits::max(); - TRY_STATUS(build_right_under_mu(right_child, left_child, &sum_child_diff)); - } - // If this vertex was depth-balance-compressed, reconstruct its data from left + children sum - if (is_depth_balance[right_idx] && left_idx != std::numeric_limits::max()) { - // Extract left grams - vm::CellSlice cs_left(NoVm(), nodes[left_idx]); - td::RefInt256 left_grams = extract_balance_from_depth_balance_info(cs_left); - if (left_grams.is_null()) { - return td::Status::Error("BOC decompression failed: depth-balance left vertex has no grams"); - } - td::RefInt256 expected_right_grams = left_grams; - expected_right_grams += sum_child_diff; - // Write reconstructed augmentation into builder - if (!write_depth_balance_grams(cell_builders[right_idx], expected_right_grams)) { - return td::Status::Error("BOC decompression failed: failed to write depth-balance grams"); + }; + + // Recursively build right subtree under MerkleUpdate, pairing with left subtree, computing sum diffs. + // Sum is accumulated into sum_diff_out (if non-null), similar to compression flow. + std::function build_right_under_mu = + [&](size_t right_idx, size_t left_idx, td::RefInt256* sum_diff_out) -> td::Status { + if (nodes[right_idx].not_null()) { + if (left_idx != std::numeric_limits::max() && sum_diff_out) { + vm::CellSlice cs_left(NoVm(), nodes[left_idx]); + vm::CellSlice cs_right(NoVm(), nodes[right_idx]); + td::RefInt256 vertex_diff = process_shard_accounts_vertex(cs_left, cs_right); + if (vertex_diff.not_null()) { + *sum_diff_out += vertex_diff; + } + } + return td::Status::OK(); } - } - - // Store children refs and finalize this right node - try { + td::RefInt256 cur_right_left_diff; + // Build children first + td::RefInt256 sum_child_diff = td::make_refint(0); for (int j = 0; j < cell_refs_cnt[right_idx]; ++j) { - cell_builders[right_idx].store_ref(nodes[boc_graph[right_idx][j]]); + size_t right_child = boc_graph[right_idx][j]; + size_t left_child = (left_idx != std::numeric_limits::max() && j < cell_refs_cnt[left_idx]) + ? boc_graph[left_idx][j] + : std::numeric_limits::max(); + TRY_STATUS(build_right_under_mu(right_child, left_child, &sum_child_diff)); } - try { - nodes[right_idx] = cell_builders[right_idx].finalize(is_special[right_idx]); - } catch (vm::CellBuilder::CellWriteError& e) { - return td::Status::Error("BOC decompression failed: failed to finalize right vertex (CellWriteError)"); + // If this vertex was depth-balance-compressed, reconstruct its data from left + children sum + if (is_depth_balance[right_idx]) { + vm::CellSlice cs_left(NoVm(), nodes[left_idx]); + td::RefInt256 left_grams = extract_balance_from_depth_balance_info(cs_left); + if (left_grams.is_null()) { + return td::Status::Error("BOC decompression failed: depth-balance left vertex has no grams"); + } + td::RefInt256 expected_right_grams = left_grams; + expected_right_grams += sum_child_diff; + if (!write_depth_balance_grams(cell_builders[right_idx], expected_right_grams)) { + return td::Status::Error("BOC decompression failed: failed to write depth-balance grams"); + } + cur_right_left_diff = sum_child_diff; } - } catch (vm::VmError& e) { - return td::Status::Error("BOC decompression failed: failed to finalize right vertex (VmError)"); - } - - // Compute this vertex diff (right - left) to propagate upward - if (left_idx != std::numeric_limits::max()) { - vm::CellSlice cs_left(NoVm(), nodes[left_idx]); - vm::CellSlice cs_right(NoVm(), nodes[right_idx]); - td::RefInt256 vertex_diff = process_shard_accounts_vertex(cs_left, cs_right); - if (sum_diff_out && vertex_diff.not_null()) { - *sum_diff_out += vertex_diff; + + // Store children refs and finalize this right node + TRY_STATUS(finalize_node(right_idx)); + + // Compute this vertex diff (right - left) to propagate upward + if (cur_right_left_diff.is_null() &&left_idx != std::numeric_limits::max()) { + vm::CellSlice cs_left(NoVm(), nodes[left_idx]); + vm::CellSlice cs_right(NoVm(), nodes[right_idx]); + cur_right_left_diff = process_shard_accounts_vertex(cs_left, cs_right); + } + if (sum_diff_out && cur_right_left_diff.not_null()) { + *sum_diff_out += cur_right_left_diff; } - } - return td::Status::OK(); - }; - - // General recursive build that handles MerkleUpdate by pairing left/right subtrees - std::function build_node = [&](size_t idx) -> td::Status { - if (nodes[idx].not_null()) { return td::Status::OK(); - } - // If this node is a MerkleUpdate (special and first byte == 0x04), - // build left subtree normally first, then right subtree paired with left - if (cell_refs_cnt[idx] >= 2) { - size_t left_idx = boc_graph[idx][0]; - size_t right_idx = boc_graph[idx][1]; + }; + + // General recursive build that handles MerkleUpdate by pairing left/right subtrees + std::function build_node = [&](size_t idx) -> td::Status { + if (nodes[idx].not_null()) { + return td::Status::OK(); + } + // If this node is a MerkleUpdate, build left subtree normally first, then right subtree paired with left if (is_merkle_update_node(idx)) { - // Build left subtree + size_t left_idx = boc_graph[idx][0]; + size_t right_idx = boc_graph[idx][1]; TRY_STATUS(build_node(left_idx)); - // Build right subtree under MU pairing with left - td::RefInt256 right_sum_diff = td::make_refint(0); - TRY_STATUS(build_right_under_mu(right_idx, left_idx, &right_sum_diff)); - // Finalize current MU node with built children - try { - for (int j = 0; j < cell_refs_cnt[idx]; ++j) { - cell_builders[idx].store_ref(nodes[boc_graph[idx][j]]); - } - try { - nodes[idx] = cell_builders[idx].finalize(is_special[idx]); - } catch (vm::CellBuilder::CellWriteError& e) { - return td::Status::Error("BOC decompression failed: failed to finalize MU node (CellWriteError)"); - } - } catch (vm::VmError& e) { - return td::Status::Error("BOC decompression failed: failed to finalize MU node (VmError)"); - } + TRY_STATUS(build_right_under_mu(right_idx, left_idx, nullptr)); + TRY_STATUS(finalize_node(idx)); return td::Status::OK(); + } else { + // Default: build children normally then finalize + for (int j = 0; j < cell_refs_cnt[idx]; ++j) { + TRY_STATUS(build_node(boc_graph[idx][j])); + } } + + TRY_STATUS(finalize_node(idx)); + return td::Status::OK(); + }; + + // Build from roots using DFS + for (size_t index : root_indexes) { + TRY_STATUS(build_node(index)); } - // Default: build children normally then finalize - for (int j = 0; j < cell_refs_cnt[idx]; ++j) { - TRY_STATUS(build_node(boc_graph[idx][j])); - } - try { - for (int j = 0; j < cell_refs_cnt[idx]; ++j) { - cell_builders[idx].store_ref(nodes[boc_graph[idx][j]]); - } - try { - nodes[idx] = cell_builders[idx].finalize(is_special[idx]); - } catch (vm::CellBuilder::CellWriteError& e) { - return td::Status::Error("BOC decompression failed: failed to finalize node (CellWriteError)"); - } - } catch (vm::VmError& e) { - return td::Status::Error("BOC decompression failed: failed to finalize node (VmError)"); + + std::vector> root_nodes; + root_nodes.reserve(root_count); + for (size_t index : root_indexes) { + root_nodes.push_back(nodes[index]); } - return td::Status::OK(); - }; - - // Build from roots using DFS - for (size_t index : root_indexes) { - TRY_STATUS(build_node(index)); + + return root_nodes; } - std::vector> root_nodes; - root_nodes.reserve(root_count); - for (size_t index : root_indexes) { - root_nodes.push_back(nodes[index]); - } - - return root_nodes; -} - td::Result boc_compress(const std::vector>& boc_roots, CompressionAlgorithm algo) { // Check for empty input if (boc_roots.empty()) { From eaba927f469968e75d85a2c1135f397c0408f3f1 Mon Sep 17 00:00:00 2001 From: olegvallas Date: Thu, 20 Nov 2025 19:09:49 +0100 Subject: [PATCH 5/5] [compression-depth-balance] Add benchmark logs for testing on devnet --- validator-session/candidate-serializer.cpp | 35 ++++++-- validator-session/candidate-serializer.h | 5 +- validator/collator-node/utils.cpp | 8 +- validator/full-node-fast-sync-overlays.cpp | 8 +- validator/full-node-private-overlay.cpp | 16 ++-- validator/full-node-serializer.cpp | 100 ++++++++++++++++----- validator/full-node-serializer.hpp | 11 ++- validator/full-node-shard.cpp | 8 +- 8 files changed, 137 insertions(+), 54 deletions(-) diff --git a/validator-session/candidate-serializer.cpp b/validator-session/candidate-serializer.cpp index 94dfe2048..56a5797b7 100644 --- a/validator-session/candidate-serializer.cpp +++ b/validator-session/candidate-serializer.cpp @@ -27,10 +27,16 @@ namespace ton::validatorsession { td::Result serialize_candidate(const tl_object_ptr& block, bool compression_enabled) { if (!compression_enabled) { - return serialize_tl_object(block, true); + LOG(INFO) << "COMPR_BENCHMARK serialize_candidate START_COMPRESS block_id=" << block->root_hash_.to_hex(); + auto res = serialize_tl_object(block, true); + LOG(INFO) << "COMPR_BENCHMARK serialize_candidate END_COMPRESS block_id=" << block->root_hash_.to_hex() + << " compression_enabled=" << compression_enabled + << " data_size_bytes=" << block->data_.size() + block->collated_data_.size() + << " res_size=" << block->data_.size() + block->collated_data_.size(); + return res; } size_t decompressed_size; - TRY_RESULT(compressed, compress_candidate_data(block->data_, block->collated_data_, decompressed_size)) + TRY_RESULT(compressed, compress_candidate_data(block->data_, block->collated_data_, decompressed_size, block->root_hash_.to_hex())) return create_serialize_tl_object( 0, block->src_, block->round_, block->root_hash_, (int)decompressed_size, std::move(compressed)); } @@ -40,7 +46,12 @@ td::Result> deserialize_candi int max_decompressed_data_size, int proto_version) { if (!compression_enabled) { - return fetch_tl_object(data, true); + TRY_RESULT(res, fetch_tl_object(data, true)); + LOG(INFO) << "COMPR_BENCHMARK deserialize_candidate START_DECOMPRESS block_id=" << res->root_hash_.to_hex(); + LOG(INFO) << "COMPR_BENCHMARK deserialize_candidate END_DECOMPRESS block_id=" << res->root_hash_.to_hex() + << " compression_enabled=" << compression_enabled + << " received_size=" << res->data_.size() + res->collated_data_.size(); + return std::move(res); } TRY_RESULT(f, fetch_tl_object(data, true)); td::Result> res; @@ -55,7 +66,7 @@ td::Result> deserialize_candi return td::Status::Error("decompressed size is too big"); } TRY_RESULT(p, decompress_candidate_data(c.data_, false, c.decompressed_size_, - max_decompressed_data_size, proto_version)); + max_decompressed_data_size, proto_version, c.root_hash_.to_hex())); return create_tl_object(c.src_, c.round_, c.root_hash_, std::move(p.first), std::move(p.second)); }(); @@ -65,7 +76,7 @@ td::Result> deserialize_candi if (c.data_.size() > max_decompressed_data_size) { return td::Status::Error("Compressed data is too big"); } - TRY_RESULT(p, decompress_candidate_data(c.data_, true, 0, max_decompressed_data_size, proto_version)); + TRY_RESULT(p, decompress_candidate_data(c.data_, true, 0, max_decompressed_data_size, proto_version, c.root_hash_.to_hex())); return create_tl_object(c.src_, c.round_, c.root_hash_, std::move(p.first), std::move(p.second)); }(); @@ -74,7 +85,7 @@ td::Result> deserialize_candi } td::Result compress_candidate_data(td::Slice block, td::Slice collated_data, - size_t& decompressed_size) { + size_t& decompressed_size, std::string root_hash) { vm::BagOfCells boc1, boc2; TRY_STATUS(boc1.deserialize(block)); if (boc1.get_root_count() != 1) { @@ -85,10 +96,15 @@ td::Result compress_candidate_data(td::Slice block, td::Slice c for (int i = 0; i < boc2.get_root_count(); ++i) { roots.push_back(boc2.get_root_cell(i)); } + LOG(INFO) << "COMPR_BENCHMARK compress_candidate_data START_COMPRESS block_id=" << root_hash; TRY_RESULT(data, vm::std_boc_serialize_multi(std::move(roots), 2)); decompressed_size = data.size(); td::BufferSlice compressed = td::lz4_compress(data); LOG(DEBUG) << "Compressing block candidate: " << block.size() + collated_data.size() << " -> " << compressed.size(); + LOG(INFO) << "COMPR_BENCHMARK compress_candidate_data END_COMPRESS block_id=" << root_hash + << " compression_enabled=" << true + << " data_size_bytes=" << block.size() + collated_data.size() + << " res_size=" << compressed.size(); return compressed; } @@ -96,14 +112,19 @@ td::Result> decompress_candidate_dat bool improved_compression, int decompressed_size, int max_decompressed_size, - int proto_version) { + int proto_version, + std::string root_hash) { std::vector> roots; if (!improved_compression) { + LOG(INFO) << "COMPR_BENCHMARK decompress_candidate_data START_DECOMPRESS block_id=" << root_hash; TRY_RESULT(decompressed, td::lz4_decompress(compressed, decompressed_size)); if (decompressed.size() != (size_t)decompressed_size) { return td::Status::Error("decompressed size mismatch"); } TRY_RESULT_ASSIGN(roots, vm::std_boc_deserialize_multi(decompressed)); + LOG(INFO) << "COMPR_BENCHMARK decompress_candidate_data END_DECOMPRESS block_id=" << root_hash + << " compression_enabled=" << true + << " received_size=" << compressed.size(); } else { TRY_RESULT_ASSIGN(roots, vm::boc_decompress(compressed, max_decompressed_size)); } diff --git a/validator-session/candidate-serializer.h b/validator-session/candidate-serializer.h index cf688a692..31640cba1 100644 --- a/validator-session/candidate-serializer.h +++ b/validator-session/candidate-serializer.h @@ -28,11 +28,12 @@ td::Result> deserialize_candi int proto_version); td::Result compress_candidate_data(td::Slice block, td::Slice collated_data, - size_t& decompressed_size); + size_t& decompressed_size, std::string root_hash); td::Result> decompress_candidate_data(td::Slice compressed, bool improved_compression, int decompressed_size, int max_decompressed_size, - int proto_version); + int proto_version, + std::string root_hash); } // namespace ton::validatorsession diff --git a/validator/collator-node/utils.cpp b/validator/collator-node/utils.cpp index 41b6dc432..1fe2d3bef 100644 --- a/validator/collator-node/utils.cpp +++ b/validator/collator-node/utils.cpp @@ -31,7 +31,7 @@ tl_object_ptr serialize_candidate(const BlockCa } size_t decompressed_size; td::BufferSlice compressed = - validatorsession::compress_candidate_data(block.data, block.collated_data, decompressed_size).move_as_ok(); + validatorsession::compress_candidate_data(block.data, block.collated_data, decompressed_size, block.id.to_str()).move_as_ok(); return create_tl_object( 0, PublicKey{pubkeys::Ed25519{block.pubkey.as_bits256()}}.tl(), create_tl_block_id(block.id), (int)decompressed_size, std::move(compressed)); @@ -63,7 +63,8 @@ td::Result deserialize_candidate(tl_object_ptr deserialize_candidate(tl_object_ptr td::Result { TRY_RESULT(p, validatorsession::decompress_candidate_data(c.data_, true, 0, - max_decompressed_data_size, proto_version)); + max_decompressed_data_size, proto_version, + create_block_id(c.id_).to_str())); auto collated_data_hash = td::sha256_bits256(p.second); auto key = PublicKey{c.source_}; if (!key.is_ed25519()) { diff --git a/validator/full-node-fast-sync-overlays.cpp b/validator/full-node-fast-sync-overlays.cpp index e25f63623..069e2fd8f 100644 --- a/validator/full-node-fast-sync-overlays.cpp +++ b/validator/full-node-fast-sync-overlays.cpp @@ -40,7 +40,7 @@ void FullNodeFastSyncOverlay::process_broadcast(PublicKeyHash src, ton_api::tonN } void FullNodeFastSyncOverlay::process_block_broadcast(PublicKeyHash src, ton_api::tonNode_Broadcast &query) { - auto B = deserialize_block_broadcast(query, overlay::Overlays::max_fec_broadcast_size()); + auto B = deserialize_block_broadcast(query, overlay::Overlays::max_fec_broadcast_size(), "fast-sync"); if (B.is_error()) { LOG(DEBUG) << "dropped broadcast: " << B.move_as_error(); return; @@ -109,7 +109,7 @@ void FullNodeFastSyncOverlay::process_block_candidate_broadcast(PublicKeyHash sr td::uint32 validator_set_hash; td::BufferSlice data; auto S = deserialize_block_candidate_broadcast(query, block_id, cc_seqno, validator_set_hash, data, - overlay::Overlays::max_fec_broadcast_size()); + overlay::Overlays::max_fec_broadcast_size(), "fast-sync"); if (S.is_error()) { LOG(DEBUG) << "dropped broadcast: " << S; return; @@ -191,7 +191,7 @@ void FullNodeFastSyncOverlay::send_broadcast(BlockBroadcast broadcast) { } VLOG(FULL_NODE_DEBUG) << "Sending block broadcast in fast sync overlay (with compression): " << broadcast.block_id.to_str(); - auto B = serialize_block_broadcast(broadcast, true); // compression_enabled = true + auto B = serialize_block_broadcast(broadcast, true, "fast-sync"); // compression_enabled = true if (B.is_error()) { VLOG(FULL_NODE_WARNING) << "failed to serialize block broadcast: " << B.move_as_error(); return; @@ -206,7 +206,7 @@ void FullNodeFastSyncOverlay::send_block_candidate(BlockIdExt block_id, Catchain return; } auto B = - serialize_block_candidate_broadcast(block_id, cc_seqno, validator_set_hash, data, true); // compression enabled + serialize_block_candidate_broadcast(block_id, cc_seqno, validator_set_hash, data, true, "fast-sync"); // compression enabled if (B.is_error()) { VLOG(FULL_NODE_WARNING) << "failed to serialize block candidate broadcast: " << B.move_as_error(); return; diff --git a/validator/full-node-private-overlay.cpp b/validator/full-node-private-overlay.cpp index b62a7c190..ad1700a93 100644 --- a/validator/full-node-private-overlay.cpp +++ b/validator/full-node-private-overlay.cpp @@ -41,7 +41,7 @@ void FullNodePrivateBlockOverlay::process_broadcast(PublicKeyHash src, } void FullNodePrivateBlockOverlay::process_block_broadcast(PublicKeyHash src, ton_api::tonNode_Broadcast &query) { - auto B = deserialize_block_broadcast(query, overlay::Overlays::max_fec_broadcast_size()); + auto B = deserialize_block_broadcast(query, overlay::Overlays::max_fec_broadcast_size(), "private"); if (B.is_error()) { LOG(DEBUG) << "dropped broadcast: " << B.move_as_error(); return; @@ -81,7 +81,7 @@ void FullNodePrivateBlockOverlay::process_block_candidate_broadcast(PublicKeyHas td::uint32 validator_set_hash; td::BufferSlice data; auto S = deserialize_block_candidate_broadcast(query, block_id, cc_seqno, validator_set_hash, data, - overlay::Overlays::max_fec_broadcast_size()); + overlay::Overlays::max_fec_broadcast_size(), "private"); if (S.is_error()) { LOG(DEBUG) << "dropped broadcast: " << S; return; @@ -166,7 +166,7 @@ void FullNodePrivateBlockOverlay::send_block_candidate(BlockIdExt block_id, Catc return; } auto B = - serialize_block_candidate_broadcast(block_id, cc_seqno, validator_set_hash, data, true); // compression enabled + serialize_block_candidate_broadcast(block_id, cc_seqno, validator_set_hash, data, true, "private"); // compression enabled if (B.is_error()) { VLOG(FULL_NODE_WARNING) << "failed to serialize block candidate broadcast: " << B.move_as_error(); return; @@ -182,7 +182,7 @@ void FullNodePrivateBlockOverlay::send_broadcast(BlockBroadcast broadcast) { } VLOG(FULL_NODE_DEBUG) << "Sending block broadcast in private overlay" << (enable_compression_ ? " (with compression)" : "") << ": " << broadcast.block_id.to_str(); - auto B = serialize_block_broadcast(broadcast, enable_compression_); + auto B = serialize_block_broadcast(broadcast, enable_compression_, "private"); if (B.is_error()) { VLOG(FULL_NODE_WARNING) << "failed to serialize block broadcast: " << B.move_as_error(); return; @@ -323,7 +323,7 @@ void FullNodeCustomOverlay::process_block_broadcast(PublicKeyHash src, ton_api:: << src; return; } - auto B = deserialize_block_broadcast(query, overlay::Overlays::max_fec_broadcast_size()); + auto B = deserialize_block_broadcast(query, overlay::Overlays::max_fec_broadcast_size(), "custom"); if (B.is_error()) { LOG(DEBUG) << "dropped broadcast: " << B.move_as_error(); return; @@ -371,7 +371,7 @@ void FullNodeCustomOverlay::process_block_candidate_broadcast(PublicKeyHash src, td::uint32 validator_set_hash; td::BufferSlice data; auto S = deserialize_block_candidate_broadcast(query, block_id, cc_seqno, validator_set_hash, data, - overlay::Overlays::max_fec_broadcast_size()); + overlay::Overlays::max_fec_broadcast_size(), "custom"); if (S.is_error()) { LOG(DEBUG) << "dropped broadcast: " << S; return; @@ -423,7 +423,7 @@ void FullNodeCustomOverlay::send_broadcast(BlockBroadcast broadcast) { } VLOG(FULL_NODE_DEBUG) << "Sending block broadcast to custom overlay \"" << name_ << "\": " << broadcast.block_id.to_str(); - auto B = serialize_block_broadcast(broadcast, true); // compression_enabled = true + auto B = serialize_block_broadcast(broadcast, true, "custom"); // compression_enabled = true if (B.is_error()) { VLOG(FULL_NODE_WARNING) << "failed to serialize block broadcast: " << B.move_as_error(); return; @@ -438,7 +438,7 @@ void FullNodeCustomOverlay::send_block_candidate(BlockIdExt block_id, CatchainSe return; } auto B = - serialize_block_candidate_broadcast(block_id, cc_seqno, validator_set_hash, data, true); // compression enabled + serialize_block_candidate_broadcast(block_id, cc_seqno, validator_set_hash, data, true, "custom"); // compression enabled if (B.is_error()) { VLOG(FULL_NODE_WARNING) << "failed to serialize block candidate broadcast: " << B.move_as_error(); return; diff --git a/validator/full-node-serializer.cpp b/validator/full-node-serializer.cpp index adce0fe22..4a5a0caee 100644 --- a/validator/full-node-serializer.cpp +++ b/validator/full-node-serializer.cpp @@ -28,19 +28,30 @@ namespace ton::validator::fullnode { -td::Result serialize_block_broadcast(const BlockBroadcast& broadcast, bool compression_enabled) { +td::Result serialize_block_broadcast(const BlockBroadcast& broadcast, bool compression_enabled, + std::string overlay) { std::vector> sigs; for (auto& sig : broadcast.signatures) { sigs.emplace_back(create_tl_object(sig.node, sig.signature.clone())); } if (!compression_enabled) { - return create_serialize_tl_object( + LOG(INFO) << "COMPR_BENCHMARK serialize_block_broadcast START_COMPRESS block_id=" << broadcast.block_id.to_str(); + + auto res = create_serialize_tl_object( create_tl_block_id(broadcast.block_id), broadcast.catchain_seqno, broadcast.validator_set_hash, std::move(sigs), broadcast.proof.clone(), broadcast.data.clone()); + LOG(INFO) << "COMPR_BENCHMARK serialize_block_broadcast END_COMPRESS block_id=" << broadcast.block_id.to_str() + << " overlay=" << overlay + << " compression_enabled=" << compression_enabled + << " data_size_bytes=" << broadcast.data.size() + broadcast.proof.size() + << " res_size=" << broadcast.data.size() + broadcast.proof.size(); + return res; } TRY_RESULT(proof_root, vm::std_boc_deserialize(broadcast.proof)); TRY_RESULT(data_root, vm::std_boc_deserialize(broadcast.data)); + + LOG(INFO) << "COMPR_BENCHMARK serialize_block_broadcast START_COMPRESS block_id=" << broadcast.block_id.to_str(); TRY_RESULT(boc, vm::std_boc_serialize_multi({proof_root, data_root}, 2)); td::BufferSlice data = create_serialize_tl_object(std::move(sigs), std::move(boc)); @@ -48,26 +59,43 @@ td::Result serialize_block_broadcast(const BlockBroadcast& broa VLOG(FULL_NODE_DEBUG) << "Compressing block broadcast: " << broadcast.data.size() + broadcast.proof.size() + broadcast.signatures.size() * 96 << " -> " << compressed.size(); - return create_serialize_tl_object( + auto res = create_serialize_tl_object( create_tl_block_id(broadcast.block_id), broadcast.catchain_seqno, broadcast.validator_set_hash, 0, std::move(compressed)); + LOG(INFO) << "COMPR_BENCHMARK serialize_block_broadcast END_COMPRESS block_id=" << broadcast.block_id.to_str() + << " overlay=" << overlay + << " compression_enabled=" << compression_enabled + << " data_size_bytes=" << broadcast.data.size() + broadcast.proof.size() + broadcast.signatures.size() * 96 + << " res_size=" << compressed.size(); + return res; } -static td::Result deserialize_block_broadcast(ton_api::tonNode_blockBroadcast& f) { +static td::Result deserialize_block_broadcast(ton_api::tonNode_blockBroadcast& f, std::string overlay) { + auto block_id = create_block_id(f.id_); + LOG(INFO) << "COMPR_BENCHMARK deserialize_block_broadcast START_DECOMPRESS block_id=" << block_id.to_str(); + std::vector signatures; for (auto& sig : f.signatures_) { signatures.emplace_back(BlockSignature{sig->who_, std::move(sig->signature_)}); } - return BlockBroadcast{create_block_id(f.id_), - std::move(signatures), - static_cast(f.catchain_seqno_), - static_cast(f.validator_set_hash_), - std::move(f.data_), - std::move(f.proof_)}; + auto result = BlockBroadcast{create_block_id(f.id_), + std::move(signatures), + static_cast(f.catchain_seqno_), + static_cast(f.validator_set_hash_), + std::move(f.data_), + std::move(f.proof_)}; + LOG(INFO) << "COMPR_BENCHMARK deserialize_block_broadcast END_DECOMPRESS block_id=" << block_id.to_str() + << " overlay=" << overlay + << " compression_enabled=" << false + << " received_size=" << f.data_.size() + f.proof_.size(); + return result; } static td::Result deserialize_block_broadcast(ton_api::tonNode_blockBroadcastCompressed& f, - int max_decompressed_size) { + int max_decompressed_size, std::string overlay) { + auto block_id = create_block_id(f.id_); + LOG(INFO) << "COMPR_BENCHMARK deserialize_block_broadcast START_DECOMPRESS block_id=" << block_id.to_str(); + TRY_RESULT(decompressed, td::lz4_decompress(f.compressed_, max_decompressed_size)); TRY_RESULT(f2, fetch_tl_object(decompressed, true)); std::vector signatures; @@ -78,6 +106,10 @@ static td::Result deserialize_block_broadcast(ton_api::tonNode_b if (roots.size() != 2) { return td::Status::Error("expected 2 roots in boc"); } + LOG(INFO) << "COMPR_BENCHMARK deserialize_block_broadcast END_DECOMPRESS block_id=" << block_id.to_str() + << " overlay=" << overlay + << " compression_enabled=" << true + << " received_size=" << f.compressed_.size(); TRY_RESULT(proof, vm::std_boc_serialize(roots[0], 0)); TRY_RESULT(data, vm::std_boc_serialize(roots[1], 31)); VLOG(FULL_NODE_DEBUG) << "Decompressing block broadcast: " << f.compressed_.size() << " -> " @@ -113,12 +145,12 @@ static td::Result deserialize_block_broadcast(ton_api::tonNode_b } td::Result deserialize_block_broadcast(ton_api::tonNode_Broadcast& obj, - int max_decompressed_data_size) { + int max_decompressed_data_size, std::string overlay) { td::Result B; ton_api::downcast_call(obj, - td::overloaded([&](ton_api::tonNode_blockBroadcast& f) { B = deserialize_block_broadcast(f); }, + td::overloaded([&](ton_api::tonNode_blockBroadcast& f) { B = deserialize_block_broadcast(f, overlay); }, [&](ton_api::tonNode_blockBroadcastCompressed& f) { - B = deserialize_block_broadcast(f, max_decompressed_data_size); + B = deserialize_block_broadcast(f, max_decompressed_data_size, overlay); }, [&](ton_api::tonNode_blockBroadcastCompressedV2& f) { B = deserialize_block_broadcast(f, max_decompressed_data_size); @@ -130,8 +162,14 @@ td::Result deserialize_block_broadcast(ton_api::tonNode_Broadcas td::Result serialize_block_full(const BlockIdExt& id, td::Slice proof, td::Slice data, bool is_proof_link, bool compression_enabled) { if (!compression_enabled) { - return create_serialize_tl_object(create_tl_block_id(id), td::BufferSlice(proof), - td::BufferSlice(data), is_proof_link); + LOG(INFO) << "COMPR_BENCHMARK serialize_block_full START_COMPRESS block_id=" << id.to_str(); + auto res = create_serialize_tl_object(create_tl_block_id(id), td::BufferSlice(proof), + td::BufferSlice(data), is_proof_link); + LOG(INFO) << "COMPR_BENCHMARK serialize_block_full END_COMPRESS block_id=" << id.to_str() + << " compression_enabled=" << compression_enabled + << " data_size_bytes=" << data.size() + proof.size() + << " res_size=" << data.size() + proof.size(); + return res; } TRY_RESULT(proof_root, vm::std_boc_deserialize(proof)); TRY_RESULT(data_root, vm::std_boc_deserialize(data)); @@ -145,9 +183,13 @@ td::Result serialize_block_full(const BlockIdExt& id, td::Slice static td::Status deserialize_block_full(ton_api::tonNode_dataFull& f, BlockIdExt& id, td::BufferSlice& proof, td::BufferSlice& data, bool& is_proof_link) { id = create_block_id(f.id_); + LOG(INFO) << "COMPR_BENCHMARK deserialize_block_full START_DECOMPRESS block_id=" << id.to_str(); proof = std::move(f.proof_); data = std::move(f.block_); is_proof_link = f.is_link_; + LOG(INFO) << "COMPR_BENCHMARK deserialize_block_full END_DECOMPRESS block_id=" << id.to_str() + << " compression_enabled=" << false + << " received_size=" << proof.size() + data.size(); return td::Status::OK(); } @@ -200,19 +242,27 @@ td::Status deserialize_block_full(ton_api::tonNode_DataFull& obj, BlockIdExt& id td::Result serialize_block_candidate_broadcast(BlockIdExt block_id, CatchainSeqno cc_seqno, td::uint32 validator_set_hash, td::Slice data, - bool compression_enabled) { + bool compression_enabled, std::string overlay) { if (!compression_enabled) { return create_serialize_tl_object( create_tl_block_id(block_id), cc_seqno, validator_set_hash, create_tl_object(Bits256::zero(), td::BufferSlice()), td::BufferSlice(data)); } TRY_RESULT(root, vm::std_boc_deserialize(data)); + LOG(INFO) << "COMPR_BENCHMARK serialize_block_candidate_broadcast START_COMPRESS block_id=" << block_id.to_str(); TRY_RESULT(data_new, vm::std_boc_serialize(root, 2)); td::BufferSlice compressed = td::lz4_compress(data_new); - VLOG(FULL_NODE_DEBUG) << "Compressing block candidate broadcast: " << data.size() << " -> " << compressed.size(); - return create_serialize_tl_object( + auto compressed_size = compressed.size(); + VLOG(FULL_NODE_DEBUG) << "Compressing block candidate broadcast: " << data.size() << " -> " << compressed_size; + auto res = create_serialize_tl_object( create_tl_block_id(block_id), cc_seqno, validator_set_hash, create_tl_object(Bits256::zero(), td::BufferSlice()), 0, std::move(compressed)); + LOG(INFO) << "COMPR_BENCHMARK serialize_block_candidate_broadcast END_COMPRESS block_id=" << block_id.to_str() + << " overlay=" << overlay + << " compression_enabled=" << compression_enabled + << " data_size_bytes=" << data.size() + << " res_size=" << compressed_size; + return res; } static td::Status deserialize_block_candidate_broadcast(ton_api::tonNode_newBlockCandidateBroadcast& obj, @@ -228,12 +278,17 @@ static td::Status deserialize_block_candidate_broadcast(ton_api::tonNode_newBloc static td::Status deserialize_block_candidate_broadcast(ton_api::tonNode_newBlockCandidateBroadcastCompressed& obj, BlockIdExt& block_id, CatchainSeqno& cc_seqno, td::uint32& validator_set_hash, td::BufferSlice& data, - int max_decompressed_data_size) { + int max_decompressed_data_size, std::string overlay) { block_id = create_block_id(obj.id_); cc_seqno = obj.catchain_seqno_; validator_set_hash = obj.validator_set_hash_; + LOG(INFO) << "COMPR_BENCHMARK deserialize_block_candidate_broadcast START_DECOMPRESS block_id=" << block_id.to_str(); TRY_RESULT(decompressed, td::lz4_decompress(obj.compressed_, max_decompressed_data_size)); TRY_RESULT(root, vm::std_boc_deserialize(decompressed)); + LOG(INFO) << "COMPR_BENCHMARK deserialize_block_candidate_broadcast END_DECOMPRESS block_id=" << block_id.to_str() + << " overlay=" << overlay + << " compression_enabled=" << true + << " received_size=" << obj.compressed_.size(); TRY_RESULT_ASSIGN(data, vm::std_boc_serialize(root, 31)); VLOG(FULL_NODE_DEBUG) << "Decompressing block candidate broadcast: " << obj.compressed_.size() << " -> " << data.size(); @@ -260,7 +315,8 @@ static td::Status deserialize_block_candidate_broadcast(ton_api::tonNode_newBloc td::Status deserialize_block_candidate_broadcast(ton_api::tonNode_Broadcast& obj, BlockIdExt& block_id, CatchainSeqno& cc_seqno, td::uint32& validator_set_hash, - td::BufferSlice& data, int max_decompressed_data_size) { + td::BufferSlice& data, int max_decompressed_data_size, + std::string overlay) { td::Status S; ton_api::downcast_call(obj, td::overloaded( [&](ton_api::tonNode_newBlockCandidateBroadcast& f) { @@ -269,7 +325,7 @@ td::Status deserialize_block_candidate_broadcast(ton_api::tonNode_Broadcast& obj }, [&](ton_api::tonNode_newBlockCandidateBroadcastCompressed& f) { S = deserialize_block_candidate_broadcast(f, block_id, cc_seqno, validator_set_hash, - data, max_decompressed_data_size); + data, max_decompressed_data_size, overlay); }, [&](ton_api::tonNode_newBlockCandidateBroadcastCompressedV2& f) { S = deserialize_block_candidate_broadcast(f, block_id, cc_seqno, validator_set_hash, diff --git a/validator/full-node-serializer.hpp b/validator/full-node-serializer.hpp index d28495f69..dc63676a4 100644 --- a/validator/full-node-serializer.hpp +++ b/validator/full-node-serializer.hpp @@ -20,8 +20,10 @@ namespace ton::validator::fullnode { -td::Result serialize_block_broadcast(const BlockBroadcast& broadcast, bool compression_enabled); -td::Result deserialize_block_broadcast(ton_api::tonNode_Broadcast& obj, int max_decompressed_data_size); +td::Result serialize_block_broadcast(const BlockBroadcast& broadcast, bool compression_enabled, + std::string overlay); +td::Result deserialize_block_broadcast(ton_api::tonNode_Broadcast& obj, int max_decompressed_data_size, + std::string overlay); td::Result serialize_block_full(const BlockIdExt& id, td::Slice proof, td::Slice data, bool is_proof_link, bool compression_enabled); @@ -30,9 +32,10 @@ td::Status deserialize_block_full(ton_api::tonNode_DataFull& obj, BlockIdExt& id td::Result serialize_block_candidate_broadcast(BlockIdExt block_id, CatchainSeqno cc_seqno, td::uint32 validator_set_hash, td::Slice data, - bool compression_enabled); + bool compression_enabled, std::string overlay); td::Status deserialize_block_candidate_broadcast(ton_api::tonNode_Broadcast& obj, BlockIdExt& block_id, CatchainSeqno& cc_seqno, td::uint32& validator_set_hash, - td::BufferSlice& data, int max_decompressed_data_size); + td::BufferSlice& data, int max_decompressed_data_size, + std::string overlay); } // namespace ton::validator::fullnode diff --git a/validator/full-node-shard.cpp b/validator/full-node-shard.cpp index 98fc39344..503b4767c 100644 --- a/validator/full-node-shard.cpp +++ b/validator/full-node-shard.cpp @@ -810,7 +810,7 @@ void FullNodeShardImpl::process_block_candidate_broadcast(PublicKeyHash src, ton td::uint32 validator_set_hash; td::BufferSlice data; auto S = deserialize_block_candidate_broadcast(query, block_id, cc_seqno, validator_set_hash, data, - overlay::Overlays::max_fec_broadcast_size()); + overlay::Overlays::max_fec_broadcast_size(), "public"); if (data.size() > FullNode::max_block_size()) { VLOG(FULL_NODE_WARNING) << "received block candidate with too big size from " << src; return; @@ -837,7 +837,7 @@ void FullNodeShardImpl::process_broadcast(PublicKeyHash src, ton_api::tonNode_bl } void FullNodeShardImpl::process_block_broadcast(PublicKeyHash src, ton_api::tonNode_Broadcast &query) { - auto B = deserialize_block_broadcast(query, overlay::Overlays::max_fec_broadcast_size()); + auto B = deserialize_block_broadcast(query, overlay::Overlays::max_fec_broadcast_size(), "public"); if (B.is_error()) { LOG(DEBUG) << "dropped broadcast: " << B.move_as_error(); return; @@ -935,7 +935,7 @@ void FullNodeShardImpl::send_block_candidate(BlockIdExt block_id, CatchainSeqno return; } auto B = - serialize_block_candidate_broadcast(block_id, cc_seqno, validator_set_hash, data, true); // compression enabled + serialize_block_candidate_broadcast(block_id, cc_seqno, validator_set_hash, data, true, "public"); // compression enabled if (B.is_error()) { VLOG(FULL_NODE_WARNING) << "failed to serialize block candidate broadcast: " << B.move_as_error(); return; @@ -951,7 +951,7 @@ void FullNodeShardImpl::send_broadcast(BlockBroadcast broadcast) { return; } VLOG(FULL_NODE_DEBUG) << "Sending block broadcast in private overlay: " << broadcast.block_id.to_str(); - auto B = serialize_block_broadcast(broadcast, false); // compression_enabled = false + auto B = serialize_block_broadcast(broadcast, false, "public"); // compression_enabled = false if (B.is_error()) { VLOG(FULL_NODE_WARNING) << "failed to serialize block broadcast: " << B.move_as_error(); return;