1111#pragma once
1212
1313#include " srsran/adt/detail/byte_buffer_range_helpers.h"
14- #include " srsran/adt/detail/byte_buffer_segment_pool.h"
1514#include " fmt/format.h"
16- #include < vector>
1715
1816namespace srsran {
1917
18+ // / \brief Sets the default size of the default byte_buffer segment pool.
19+ // /
20+ // / This function asserts if the pool has already been initialized.
21+ // / \param nof_segments Number of byte_buffer_segments stored in the pool.
22+ // / \param memory_block_size Number, in bytes, for each memory block on which a buffer_byte_segment header and payload
23+ // / will be stored.
24+ void init_byte_buffer_segment_pool (std::size_t nof_segments, std::size_t memory_block_size = 1024 );
25+
2026// / \brief Non-owning view to a byte sequence.
2127// / The underlying byte sequence is not contiguous in memory. Instead, it is represented as an intrusive linked list of
2228// / byte buffer segments, where each segment contains a span of bytes.
@@ -134,101 +140,9 @@ class byte_buffer
134140 // / One of the segments shares the same memory block with the byte_buffer control block.
135141 node_t * segment_in_cb_memory_block = nullptr ;
136142
137- void destroy_node (node_t * node)
138- {
139- node->~node_t ();
140- if (node != segment_in_cb_memory_block) {
141- detail::byte_buffer_segment_pool::get_instance ().deallocate_node (node);
142- }
143- }
144-
145- ~control_block ()
146- {
147- // Destroy and return all segments back to the segment memory pool.
148- for (node_t * node = segments.head ; node != nullptr ; node = node->next ) {
149- destroy_node (node);
150- }
151- }
152- };
153-
154- // / \brief Linear allocator for memory_block obtained from byte_buffer_segment_pool.
155- struct memory_arena_linear_allocator {
156- // / Pointer to the memory block obtained from byte_buffer_segment_pool.
157- void * mem_block = nullptr ;
158- // / Offset in bytes from the beginning of the memory block, determining where the next allocation will be made.
159- size_t offset = 0 ;
160-
161- memory_arena_linear_allocator () noexcept :
162- mem_block ([]() {
163- static auto & pool = detail::get_default_byte_buffer_segment_pool ();
164- return pool.allocate_node (pool.memory_block_size ());
165- }())
166- {
167- }
168-
169- void * allocate (size_t sz, size_t al) noexcept
170- {
171- void * p = align_next (static_cast <char *>(mem_block) + offset, al);
172- offset = (static_cast <char *>(p) - static_cast <char *>(mem_block)) + sz;
173- return p;
174- }
175-
176- bool empty () const { return mem_block == nullptr ; }
177-
178- size_t space_left () const { return detail::get_default_byte_buffer_segment_pool ().memory_block_size () - offset; }
179- };
180-
181- // / Allocator for byte_buffer control_block that will leverage the \c memory_arena_linear_allocator.
182- template <typename T>
183- struct control_block_allocator {
184- public:
185- using value_type = T;
186-
187- template <typename U>
188- struct rebind {
189- typedef control_block_allocator<U> other;
190- };
191-
192- control_block_allocator (memory_arena_linear_allocator& arena_) noexcept : arena(&arena_) {}
193-
194- control_block_allocator (const control_block_allocator<T>& other) noexcept = default ;
195-
196- template <typename U, std::enable_if_t <not std::is_same<U, T>::value, int > = 0 >
197- control_block_allocator (const control_block_allocator<U>& other) noexcept : arena(other.arena)
198- {
199- }
200-
201- control_block_allocator& operator =(const control_block_allocator<T>& other) noexcept = default ;
202-
203- value_type* allocate (size_t n) noexcept
204- {
205- srsran_sanity_check (n == 1 , " control_block_allocator can only allocate one control block at a time." );
206- srsran_sanity_check (not arena->empty (), " Memory arena is empty" );
207- srsran_assert (arena->space_left () >= sizeof (value_type),
208- " control_block_allocator memory block size is too small." );
209-
210- return static_cast <value_type*>(arena->allocate (sizeof (value_type), alignof (std::max_align_t )));
211- }
212-
213- void deallocate (value_type* p, size_t n) noexcept
214- {
215- // Note: at this stage the arena ptr is probably dangling. Do not touch it.
216-
217- static auto & pool = detail::get_default_byte_buffer_segment_pool ();
218-
219- srsran_assert (n == 1 , " control_block_allocator can only deallocate one control block at a time." );
143+ void destroy_node (node_t * node) const ;
220144
221- pool.deallocate_node (static_cast <void *>(p));
222- }
223-
224- bool operator ==(const control_block_allocator& other) const { return arena == other.arena ; }
225- bool operator !=(const control_block_allocator& other) const { return !(*this == other); }
226-
227- private:
228- template <typename U>
229- friend struct control_block_allocator ;
230-
231- memory_arena_linear_allocator* arena;
145+ ~control_block ();
232146 };
233147
234148 // / Headroom given to the first segment of the byte_buffer.
@@ -430,116 +344,19 @@ class byte_buffer
430344 bool append (const byte_buffer_slice& view);
431345
432346 // / Prepends bytes to byte_buffer. This function may allocate new segments.
433- bool prepend (span<const uint8_t > bytes)
434- {
435- if (empty ()) {
436- // the byte buffer is empty. Prepending is the same as appending.
437- return append (bytes);
438- }
439- for (size_t count = 0 ; count < bytes.size ();) {
440- if (ctrl_blk_ptr->segments .head ->headroom () == 0 ) {
441- if (not prepend_segment (bytes.size () - count)) {
442- return false ;
443- }
444- }
445- size_t to_write = std::min (ctrl_blk_ptr->segments .head ->headroom (), bytes.size () - count);
446- span<const uint8_t > subspan = bytes.subspan (bytes.size () - to_write - count, to_write);
447- ctrl_blk_ptr->segments .head ->prepend (subspan);
448- ctrl_blk_ptr->pkt_len += to_write;
449- count += to_write;
450- }
451- return true ;
452- }
347+ bool prepend (span<const uint8_t > bytes);
453348
454349 // / \brief Prepend data of byte buffer to this byte buffer.
455- bool prepend (const byte_buffer& other)
456- {
457- srsran_assert (&other != this , " Self-append not supported" );
458- if (other.empty ()) {
459- return true ;
460- }
461- if (empty ()) {
462- // the byte buffer is empty. Prepending is the same as appending.
463- return append (other);
464- }
465- for (span<const uint8_t > seg : other.segments ()) {
466- node_t * node = create_segment (0 );
467- if (node == nullptr ) {
468- return false ;
469- }
470- node->append (seg);
471- node->next = ctrl_blk_ptr->segments .head ;
472- ctrl_blk_ptr->segments .head = node;
473- ctrl_blk_ptr->pkt_len += seg.size ();
474- }
475- return true ;
476- }
350+ bool prepend (const byte_buffer& other);
477351
478352 // / \brief Prepend data of r-value byte buffer to this byte buffer. The segments of the provided byte buffer can get
479353 // / "stolen" if the byte buffer is the last reference to the segments.
480- bool prepend (byte_buffer&& other)
481- {
482- srsran_assert (&other != this , " Self-append not supported" );
483- if (other.empty ()) {
484- return true ;
485- }
486- if (empty ()) {
487- // the byte buffer is empty. Prepending is the same as appending.
488- append (std::move (other));
489- return true ;
490- }
491- if (not other.ctrl_blk_ptr .unique ()) {
492- // Deep copy of segments.
493- prepend (other);
494- return true ;
495- }
496-
497- // This is the last reference to "other". Shallow copy, except control segment.
498- node_t * node = create_segment (0 );
499- if (node == nullptr ) {
500- return false ;
501- }
502- node->append (span<uint8_t >{other.ctrl_blk_ptr ->segment_in_cb_memory_block ->data (),
503- other.ctrl_blk_ptr ->segment_in_cb_memory_block ->length ()});
504- ctrl_blk_ptr->pkt_len += other.ctrl_blk_ptr ->pkt_len ;
505- other.ctrl_blk_ptr ->segments .tail ->next = ctrl_blk_ptr->segments .head ;
506- node->next = other.ctrl_blk_ptr ->segment_in_cb_memory_block ->next ;
507- if (other.ctrl_blk_ptr ->segment_in_cb_memory_block == other.ctrl_blk_ptr ->segments .head ) {
508- ctrl_blk_ptr->segments .head = node;
509- } else {
510- for (node_t * seg = other.ctrl_blk_ptr ->segments .head ; seg->next != nullptr ; seg = seg->next ) {
511- if (seg->next == other.ctrl_blk_ptr ->segment_in_cb_memory_block ) {
512- seg->next = node;
513- break ;
514- }
515- }
516- }
517- other.ctrl_blk_ptr ->segments .head = other.ctrl_blk_ptr ->segment_in_cb_memory_block ;
518- other.ctrl_blk_ptr ->segments .tail = other.ctrl_blk_ptr ->segment_in_cb_memory_block ;
519- other.ctrl_blk_ptr ->segments .head ->next = nullptr ;
520- other.ctrl_blk_ptr .reset ();
521- return true ;
522- }
354+ bool prepend (byte_buffer&& other);
523355
524356 // / Prepends space in byte_buffer. This function may allocate new segments.
525357 // / \param nof_bytes Number of bytes to reserve at header.
526358 // / \return range of bytes that were reserved.
527- byte_buffer_view reserve_prepend (size_t nof_bytes)
528- {
529- size_t rem_bytes = nof_bytes;
530- while (rem_bytes > 0 ) {
531- if (empty () or ctrl_blk_ptr->segments .head ->headroom () == 0 ) {
532- if (not prepend_segment (rem_bytes)) {
533- return {};
534- }
535- }
536- size_t to_reserve = std::min (ctrl_blk_ptr->segments .head ->headroom (), rem_bytes);
537- ctrl_blk_ptr->segments .head ->reserve_prepend (to_reserve);
538- rem_bytes -= to_reserve;
539- }
540- ctrl_blk_ptr->pkt_len += nof_bytes;
541- return byte_buffer_view{begin (), begin () + nof_bytes};
542- }
359+ byte_buffer_view reserve_prepend (size_t nof_bytes);
543360
544361 // / Clear byte buffer.
545362 void clear () { ctrl_blk_ptr.reset (); }
@@ -712,79 +529,13 @@ class byte_buffer
712529 }
713530
714531private:
715- node_t * create_head_segment (size_t headroom)
716- {
717- static auto & pool = detail::get_default_byte_buffer_segment_pool ();
718- static const size_t block_size = pool.memory_block_size ();
719-
720- // Create control block using allocator.
721- memory_arena_linear_allocator arena;
722- if (arena.empty ()) {
723- byte_buffer::warn_alloc_failure ();
724- return nullptr ;
725- }
726- ctrl_blk_ptr = std::allocate_shared<control_block>(control_block_allocator<control_block>{arena});
727- if (ctrl_blk_ptr == nullptr ) {
728- return nullptr ;
729- }
730-
731- // For first segment of byte_buffer, add a headroom.
732- void * segment_start = arena.allocate (sizeof (node_t ), alignof (node_t ));
733- srsran_assert (block_size > arena.offset , " The memory block provided by the pool is too small" );
734- size_t segment_size = block_size - arena.offset ;
735- void * payload_start = arena.allocate (segment_size, 1 );
736- node_t * node = new (segment_start)
737- node_t (span<uint8_t >{static_cast <uint8_t *>(payload_start), segment_size}, std::min (headroom, segment_size));
738-
739- // Register segment as sharing the same memory block with control block.
740- ctrl_blk_ptr->segment_in_cb_memory_block = node;
741-
742- return node;
743- }
744-
745- node_t * create_segment (size_t headroom)
746- {
747- static auto & pool = detail::get_default_byte_buffer_segment_pool ();
748- static const size_t block_size = pool.memory_block_size ();
749-
750- // Allocate memory block.
751- memory_arena_linear_allocator arena;
752- if (arena.empty ()) {
753- byte_buffer::warn_alloc_failure ();
754- return nullptr ;
755- }
756- void * segment_start = arena.allocate (sizeof (node_t ), alignof (node_t ));
757- srsran_assert (block_size > arena.offset , " The memory block provided by the pool is too small" );
758- size_t segment_size = block_size - arena.offset ;
759- void * payload_start = arena.allocate (segment_size, 1 );
760- return new (segment_start)
761- node_t (span<uint8_t >{static_cast <uint8_t *>(payload_start), segment_size}, std::min (headroom, segment_size));
762- }
532+ node_t * create_head_segment (size_t headroom);
763533
764- bool append_segment (size_t headroom_suggestion)
765- {
766- node_t * segment = empty () ? create_head_segment (headroom_suggestion) : create_segment (headroom_suggestion);
767- if (segment == nullptr ) {
768- return false ;
769- }
770-
771- // Append new segment to linked list.
772- ctrl_blk_ptr->segments .push_back (*segment);
773- return true ;
774- }
534+ static node_t * create_segment (size_t headroom);
775535
776- bool prepend_segment (size_t headroom_suggestion)
777- {
778- // Note: Add HEADROOM for first segment.
779- node_t * segment = empty () ? create_head_segment (headroom_suggestion) : create_segment (headroom_suggestion);
780- if (segment == nullptr ) {
781- return false ;
782- }
536+ bool append_segment (size_t headroom_suggestion);
783537
784- // Prepend new segment to linked list.
785- ctrl_blk_ptr->segments .push_front (*segment);
786- return true ;
787- }
538+ bool prepend_segment (size_t headroom_suggestion);
788539
789540 // / \brief Removes last segment of the byte_buffer.
790541 // / Note: This operation is O(N), as it requires recomputing the tail.
@@ -805,11 +556,7 @@ class byte_buffer
805556 ctrl_blk_ptr->destroy_node (tail);
806557 }
807558
808- static void warn_alloc_failure ()
809- {
810- static srslog::basic_logger& logger = srslog::fetch_basic_logger (" ALL" );
811- logger.warning (" POOL: Failure to allocate byte buffer segment" );
812- }
559+ static void warn_alloc_failure ();
813560
814561 // TODO: Optimize. shared_ptr<> has a lot of boilerplate we don't need. It is also hard to determine the size
815562 // of the shared_ptr control block allocation and how much we need to discount in the segment.
0 commit comments