Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 19 additions & 19 deletions src/hotspot/share/gc/shared/freeListAllocator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,26 +41,26 @@ FreeListAllocator::PendingList::PendingList() :

size_t FreeListAllocator::PendingList::add(FreeNode* node) {
assert(node->next() == nullptr, "precondition");
FreeNode* old_head = AtomicAccess::xchg(&_head, node);
FreeNode* old_head = _head.exchange(node);
if (old_head != nullptr) {
node->set_next(old_head);
} else {
assert(_tail == nullptr, "invariant");
_tail = node;
}
return AtomicAccess::add(&_count, size_t(1));
return _count.add_then_fetch(1u);
}

typename FreeListAllocator::NodeList FreeListAllocator::PendingList::take_all() {
NodeList result{AtomicAccess::load(&_head), _tail, AtomicAccess::load(&_count)};
AtomicAccess::store(&_head, (FreeNode*)nullptr);
NodeList result{_head.load_relaxed(), _tail, _count.load_relaxed()};
_head.store_relaxed(nullptr);
_tail = nullptr;
AtomicAccess::store(&_count, size_t(0));
_count.store_relaxed(0u);
return result;
}

size_t FreeListAllocator::PendingList::count() const {
return AtomicAccess::load(&_count);
return _count.load_relaxed();
}

FreeListAllocator::FreeListAllocator(const char* name, FreeListConfig* config) :
Expand All @@ -85,26 +85,26 @@ void FreeListAllocator::delete_list(FreeNode* list) {
}

FreeListAllocator::~FreeListAllocator() {
uint index = AtomicAccess::load(&_active_pending_list);
uint index = _active_pending_list.load_relaxed();
NodeList pending_list = _pending_lists[index].take_all();
delete_list(pending_list._head);
delete_list(_free_list.pop_all());
}

// Drop existing nodes and reset all counters
void FreeListAllocator::reset() {
uint index = AtomicAccess::load(&_active_pending_list);
uint index = _active_pending_list.load_relaxed();
_pending_lists[index].take_all();
_free_list.pop_all();
_free_count = 0;
_free_count.store_relaxed(0u);
}

size_t FreeListAllocator::free_count() const {
return AtomicAccess::load(&_free_count);
return _free_count.load_relaxed();
}

size_t FreeListAllocator::pending_count() const {
uint index = AtomicAccess::load(&_active_pending_list);
uint index = _active_pending_list.load_relaxed();
return _pending_lists[index].count();
}

Expand All @@ -124,7 +124,7 @@ void* FreeListAllocator::allocate() {
// Decrement count after getting buffer from free list. This, along
// with incrementing count before adding to free list, ensures count
// never underflows.
size_t count = AtomicAccess::sub(&_free_count, 1u);
size_t count = _free_count.sub_then_fetch(1u);
assert((count + 1) != 0, "_free_count underflow");
return node;
} else {
Expand All @@ -149,7 +149,7 @@ void FreeListAllocator::release(void* free_node) {
// we're done with what might be the pending list to be transferred.
{
GlobalCounter::CriticalSection cs(Thread::current());
uint index = AtomicAccess::load_acquire(&_active_pending_list);
uint index = _active_pending_list.load_acquire();
size_t count = _pending_lists[index].add(node);
if (count <= _config->transfer_threshold()) return;
}
Expand All @@ -164,17 +164,17 @@ void FreeListAllocator::release(void* free_node) {
// in-progress transfer.
bool FreeListAllocator::try_transfer_pending() {
// Attempt to claim the lock.
if (AtomicAccess::load(&_transfer_lock) || // Skip CAS if likely to fail.
AtomicAccess::cmpxchg(&_transfer_lock, false, true)) {
if (_transfer_lock.load_relaxed() || // Skip CAS if likely to fail.
_transfer_lock.compare_exchange(false, true)) {
return false;
}
// Have the lock; perform the transfer.

// Change which pending list is active. Don't need an atomic RMW since
// we have the lock and we're the only writer.
uint index = AtomicAccess::load(&_active_pending_list);
uint index = _active_pending_list.load_relaxed();
uint new_active = (index + 1) % ARRAY_SIZE(_pending_lists);
AtomicAccess::release_store(&_active_pending_list, new_active);
_active_pending_list.release_store(new_active);

// Wait for all critical sections in the buffer life-cycle to complete.
// This includes _free_list pops and adding to the now inactive pending
Expand All @@ -186,11 +186,11 @@ bool FreeListAllocator::try_transfer_pending() {
size_t count = transfer_list._entry_count;
if (count > 0) {
// Update count first so no underflow in allocate().
AtomicAccess::add(&_free_count, count);
_free_count.add_then_fetch(count);
_free_list.prepend(*transfer_list._head, *transfer_list._tail);
log_trace(gc, freelist)
("Transferred %s pending to free: %zu", name(), count);
}
AtomicAccess::release_store(&_transfer_lock, false);
_transfer_lock.release_store(false);
return true;
}
24 changes: 12 additions & 12 deletions src/hotspot/share/gc/shared/freeListAllocator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@

#include "memory/allocation.hpp"
#include "memory/padded.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/lockFreeStack.hpp"

Expand Down Expand Up @@ -62,15 +62,15 @@ class FreeListConfig {
// to the free list making them available for re-allocation.
class FreeListAllocator {
struct FreeNode {
FreeNode* volatile _next;
Atomic<FreeNode*> _next;

FreeNode() : _next (nullptr) { }

FreeNode* next() { return AtomicAccess::load(&_next); }
FreeNode* next() { return _next.load_relaxed(); }

FreeNode* volatile* next_addr() { return &_next; }
Atomic<FreeNode*>* next_addr() { return &_next; }

void set_next(FreeNode* next) { AtomicAccess::store(&_next, next); }
void set_next(FreeNode* next) { _next.store_relaxed(next); }
};

struct NodeList {
Expand All @@ -85,8 +85,8 @@ class FreeListAllocator {

class PendingList {
FreeNode* _tail;
FreeNode* volatile _head;
volatile size_t _count;
Atomic<FreeNode*> _head;
Atomic<size_t> _count;

NONCOPYABLE(PendingList);

Expand All @@ -105,20 +105,20 @@ class FreeListAllocator {
NodeList take_all();
};

static FreeNode* volatile* next_ptr(FreeNode& node) { return node.next_addr(); }
typedef LockFreeStack<FreeNode, &next_ptr> Stack;
static Atomic<FreeNode*>* next_ptr(FreeNode& node) { return node.next_addr(); }
using Stack = LockFreeStack<FreeNode, &next_ptr>;

FreeListConfig* _config;
char _name[DEFAULT_PADDING_SIZE - sizeof(FreeListConfig*)]; // Use name as padding.

#define DECLARE_PADDED_MEMBER(Id, Type, Name) \
Type Name; DEFINE_PAD_MINUS_SIZE(Id, DEFAULT_PADDING_SIZE, sizeof(Type))
DECLARE_PADDED_MEMBER(1, volatile size_t, _free_count);
DECLARE_PADDED_MEMBER(1, Atomic<size_t>, _free_count);
DECLARE_PADDED_MEMBER(2, Stack, _free_list);
DECLARE_PADDED_MEMBER(3, volatile bool, _transfer_lock);
DECLARE_PADDED_MEMBER(3, Atomic<bool>, _transfer_lock);
#undef DECLARE_PADDED_MEMBER

volatile uint _active_pending_list;
Atomic<uint> _active_pending_list;
PendingList _pending_lists[2];

void delete_list(FreeNode* list);
Expand Down