26 namespace memory_manager_detail {
33 skip_list_(other.skip_list_),
35 nodes_before_skip_list_(other.nodes_before_skip_list_),
42 if (
head_ !=
nullptr) {
52 }
else if (
size_ != 0) {
61 assert(head !=
nullptr);
63 auto* old_head =
head_;
66 if (old_head ==
nullptr) {
79 std::lock_guard<Spinlock> guard(
lock_);
84 assert(head !=
nullptr);
85 assert(tail !=
nullptr);
87 if (
head_ ==
nullptr) {
102 std::lock_guard<Spinlock> guard(
lock_);
107 assert(
head_ !=
nullptr);
108 assert(
tail_ !=
nullptr);
123 std::lock_guard<Spinlock> guard(
lock_);
144 *size = size_n_pages;
152 uint64_t size_n_pages,
real_t growth_rate,
153 uint64_t max_mem_per_thread_factor)
154 : size_n_pages_(size_n_pages),
155 growth_rate_(growth_rate),
156 max_nodes_per_thread_((size_n_pages_ - kMetadataSize) / size *
157 max_mem_per_thread_factor),
158 num_elements_per_n_pages_((size_n_pages_ - kMetadataSize) / size),
162 central_(num_elements_per_n_pages_) {
171 uint64_t size = block.end_pointer_ - block.start_pointer_;
177 assert(
static_cast<uint64_t
>(tid) <
free_lists_.size());
179 if (!tl_list.Empty()) {
180 auto* ret = tl_list.PopFront();
181 assert(ret !=
nullptr);
184 Node *head =
nullptr, *tail =
nullptr;
186 if (head ==
nullptr) {
189 tl_list.PushBackN(head, tail);
190 auto* ret = tl_list.PopFront();
191 assert(ret !=
nullptr);
212 auto* ret = tl_list.PopFront();
213 assert(ret !=
nullptr);
219 auto* node =
new (p)
Node();
222 tl_list.PushFront(node);
226 Node* head =
nullptr;
227 Node* tail =
nullptr;
228 tl_list.PopBackN(&head, &tail);
238 "Size must be a multiple of MemoryManager::kSizeNPages");
240 if (block ==
nullptr) {
241 Log::Fatal(
"NumaPoolAllocator::AllocNewMemoryBlock",
"Allocation failed");
244 auto n_pages_aligned =
246 auto* start =
reinterpret_cast<char*
>(block);
247 char* end = start + size;
249 {start, end,
reinterpret_cast<char*
>(n_pages_aligned)});
253 uint64_t mem_block_size) {
254 assert((
reinterpret_cast<uint64_t
>(block) & (
size_n_pages_ - 1)) == 0 &&
255 "block is not N page aligned");
259 auto* start_pointer =
static_cast<char*
>(block +
kMetadataSize);
260 auto* pointer = start_pointer;
263 if (tl_list->
GetN() == num_elements) {
264 auto* head =
new (pointer)
Node();
265 assert(head->next ==
nullptr);
268 for (uint64_t i = 1; i < num_elements; ++i) {
269 assert(pointer >=
static_cast<char*
>(block));
270 assert(pointer <= start_pointer + mem_block_size -
size_);
271 __builtin_prefetch(pointer +
size_);
272 auto* node =
new (pointer)
Node();
279 for (uint64_t i = 0; i < num_elements; ++i) {
280 assert(pointer >=
static_cast<char*
>(block));
281 assert(pointer <= start_pointer + mem_block_size -
size_);
282 __builtin_prefetch(pointer +
size_);
290 assert((multiple & (multiple - 1)) == 0 && multiple &&
291 "multiple must be a power of two and non-zero");
292 return (number + multiple - 1) &
293 (std::numeric_limits<uint64_t>::max() - (multiple - 1));
299 uint64_t max_mem_per_thread_factor)
300 : size_(size), tinfo_(
ThreadInfo::GetInstance()) {
304 size, nid, size_n_pages, growth_rate, max_mem_per_thread_factor));
309 : size_(other.size_),
310 tinfo_(other.tinfo_),
311 numa_allocators_(std::move(other.numa_allocators_)) {}
315 el->~NumaPoolAllocator();
322 assert(
size_ == size &&
"Requested size does not match this PoolAllocator");
333 uint64_t max_mem_per_thread_factor)
334 : growth_rate_(growth_rate),
335 max_mem_per_thread_factor_(max_mem_per_thread_factor),
336 page_size_(sysconf(_SC_PAGESIZE)),
337 page_shift_(static_cast<uint64_t>(
std::log2(page_size_))),
338 num_threads_(
ThreadInfo::GetInstance()->GetMaxThreads()) {
345 "The parameter mem_mgr_max_mem_per_thread_factor must be "
362 return it->second->New(size);
364 std::lock_guard<Spinlock> guard(
lock_);
375 std::lock_guard<Spinlock> guard(
lock_);
378 return it->second->New(size);
393 auto addr =
reinterpret_cast<uint64_t
>(p);
395 auto* page_addr =
reinterpret_cast<char*
>(