BioDynaMo  v1.05.119-a4ff3934
resource_manager.cc
Go to the documentation of this file.
1 // -----------------------------------------------------------------------------
2 //
3 // Copyright (C) 2021 CERN & University of Surrey for the benefit of the
4 // BioDynaMo collaboration. All Rights Reserved.
5 //
6 // Licensed under the Apache License, Version 2.0 (the "License");
7 // you may not use this file except in compliance with the License.
8 //
9 // See the LICENSE file distributed with this work for details.
10 // See the NOTICE file distributed with this work for additional information
11 // regarding copyright ownership.
12 //
13 // -----------------------------------------------------------------------------
14 
15 #include "core/resource_manager.h"
16 #include <cmath>
17 #ifndef NDEBUG
18 #include <set>
19 #endif // NDEBUG
20 #include "core/algorithm.h"
23 #include "core/simulation.h"
24 #include "core/util/partition.h"
26 #include "core/util/timing.h"
27 
28 namespace bdm {
29 
31  // Must be called prior any other function call to libnuma
32  if (auto ret = numa_available() == -1) {
33  Log::Fatal("ResourceManager",
34  "Call to numa_available failed with return code: ", ret);
35  }
38 
39  auto* param = Simulation::GetActive()->GetParam();
40  if (param->export_visualization || param->insitu_visualization) {
41  type_index_ = new TypeIndex();
42  }
43 }
44 
46  for (auto& el : continuum_models_) {
47  delete el.second;
48  }
49  for (auto& numa_agents : agents_) {
50  for (auto* agent : numa_agents) {
51  delete agent;
52  }
53  }
54  if (type_index_) {
55  delete type_index_;
56  }
57 }
58 
61  Functor<bool, Agent*>* filter) {
62 #pragma omp parallel
63  {
64  auto tid = omp_get_thread_num();
65  auto nid = thread_info_->GetNumaNode(tid);
66  auto threads_in_numa = thread_info_->GetThreadsInNumaNode(nid);
67  auto& numa_agents = agents_[nid];
69 
70  // use static scheduling for now
71  auto correction = numa_agents.size() % threads_in_numa == 0 ? 0 : 1;
72  auto chunk = numa_agents.size() / threads_in_numa + correction;
73  auto start = thread_info_->GetNumaThreadId(tid) * chunk;
74  auto end = std::min(numa_agents.size(), start + chunk);
75 
76  for (uint64_t i = start; i < end; ++i) {
77  auto* a = numa_agents[i];
78  if (!filter || (filter && (*filter)(a))) {
79  function(a, AgentHandle(nid, i));
80  }
81  }
82  }
83 }
84 
85 template <typename TFunctor>
86 struct ForEachAgentParallelFunctor : public Functor<void, Agent*, AgentHandle> {
87  TFunctor& functor_;
88  explicit ForEachAgentParallelFunctor(TFunctor& f) : functor_(f) {}
89  void operator()(Agent* agent, AgentHandle) override { functor_(agent); }
90 };
91 
93  Functor<bool, Agent*>* filter) {
95  ForEachAgentParallel(functor, filter);
96 }
97 
99  Functor<bool, Agent*>* filter) {
101  ForEachAgentParallel(functor, filter);
102 }
103 
105  uint64_t chunk, Functor<void, Agent*, AgentHandle>& function,
106  Functor<bool, Agent*>* filter) {
107  // adapt chunk size
108  auto num_agents = GetNumAgents();
109  uint64_t factor = (num_agents / thread_info_->GetMaxThreads()) / chunk;
110  chunk = (num_agents / thread_info_->GetMaxThreads()) / (factor + 1);
111  chunk = chunk >= 1 ? chunk : 1;
112 
113  // use dynamic scheduling
114  // Unfortunately openmp's built in functionality can't be used, since
115  // threads belong to different numa domains and thus operate on
116  // different containers
117  auto numa_nodes = thread_info_->GetNumaNodes();
118  auto max_threads = omp_get_max_threads();
119  std::vector<uint64_t> num_chunks_per_numa(numa_nodes);
120  for (int n = 0; n < numa_nodes; n++) {
121  auto correction = agents_[n].size() % chunk == 0 ? 0 : 1;
122  num_chunks_per_numa[n] = agents_[n].size() / chunk + correction;
123  }
124 
125  std::vector<std::atomic<uint64_t>*> counters(max_threads, nullptr);
126  std::vector<uint64_t> max_counters(max_threads);
127  for (int thread_cnt = 0; thread_cnt < max_threads; thread_cnt++) {
128  uint64_t current_nid = thread_info_->GetNumaNode(thread_cnt);
129 
130  auto correction =
131  num_chunks_per_numa[current_nid] %
132  thread_info_->GetThreadsInNumaNode(current_nid) ==
133  0
134  ? 0
135  : 1;
136  uint64_t num_chunks_per_thread =
137  num_chunks_per_numa[current_nid] /
138  thread_info_->GetThreadsInNumaNode(current_nid) +
139  correction;
140  auto start =
141  num_chunks_per_thread * thread_info_->GetNumaThreadId(thread_cnt);
142  auto end = std::min(num_chunks_per_numa[current_nid],
143  start + num_chunks_per_thread);
144 
145  counters[thread_cnt] = new std::atomic<uint64_t>(start);
146  max_counters[thread_cnt] = end;
147  }
148 
149 #pragma omp parallel
150  {
151  auto tid = omp_get_thread_num();
152  auto nid = thread_info_->GetNumaNode(tid);
153 
154  // thread private variables (compilation error with
155  // firstprivate(chunk, numa_node_) with some openmp versions clause)
156  auto p_numa_nodes = thread_info_->GetNumaNodes();
157  auto p_max_threads = omp_get_max_threads();
158  auto p_chunk = chunk;
160 
161  // dynamic scheduling
162  uint64_t start = 0;
163  uint64_t end = 0;
164 
165  // this loop implements work stealing from other NUMA nodes if there
166  // are imbalances. Each thread starts with its NUMA domain. Once, it
167  // is finished the thread looks for tasks on other domains
168  for (int n = 0; n < p_numa_nodes; n++) {
169  int current_nid = (nid + n) % p_numa_nodes;
170  for (int thread_cnt = 0; thread_cnt < p_max_threads; thread_cnt++) {
171  uint64_t current_tid = (tid + thread_cnt) % p_max_threads;
172  if (current_nid != thread_info_->GetNumaNode(current_tid)) {
173  continue;
174  }
175 
176  auto& numa_agents = agents_[current_nid];
177  uint64_t old_count = (*(counters[current_tid]))++;
178  while (old_count < max_counters[current_tid]) {
179  start = old_count * p_chunk;
180  end = std::min(static_cast<uint64_t>(numa_agents.size()),
181  start + p_chunk);
182 
183  for (uint64_t i = start; i < end; ++i) {
184  auto* a = numa_agents[i];
185  if (!filter || (filter && (*filter)(a))) {
186  function(a, AgentHandle(current_nid, i));
187  }
188  }
189 
190  old_count = (*(counters[current_tid]))++;
191  }
192  } // work stealing loop numa_nodes_
193  } // work stealing loop threads
194  }
195 
196  for (auto* counter : counters) {
197  delete counter;
198  }
199 }
200 
201 struct LoadBalanceFunctor : public Functor<void, Iterator<AgentHandle>*> {
203  uint64_t offset;
204  uint64_t nid;
205  std::vector<std::vector<Agent*>>& agents;
206  std::vector<Agent*>& dest;
209 
210  LoadBalanceFunctor(bool minimize_memory, uint64_t offset, uint64_t nid,
211  decltype(agents) agents, decltype(dest) dest,
214  offset(offset),
215  nid(nid),
216  agents(agents),
217  dest(dest),
220 
221  void operator()(Iterator<AgentHandle>* it) override {
222  while (it->HasNext()) {
223  auto handle = it->Next();
224  auto* agent = agents[handle.GetNumaNode()][handle.GetElementIdx()];
225  auto* copy = agent->NewCopy();
226  auto el_idx = offset++;
227  dest[el_idx] = copy;
228  uid_ah_map.Insert(copy->GetUid(), AgentHandle(nid, el_idx));
229  if (type_index) {
230  type_index->Update(copy);
231  }
232  if (minimize_memory) {
233  delete agent;
234  }
235  }
236  }
237 };
238 
240  // Load balancing destroys the synchronization between the simulation and the
241  // environment. We mark the environment aus OutOfSync such that we can update
242  // the environment before accessing it again.
244  auto* param = Simulation::GetActive()->GetParam();
245  if (param->plot_memory_layout) {
247  }
248 
249  // balance agents per numa node according to the number of
250  // threads associated with each numa domain
251  auto numa_nodes = thread_info_->GetNumaNodes();
252  std::vector<uint64_t> agent_per_numa(numa_nodes);
253  std::vector<uint64_t> agent_per_numa_cumm(numa_nodes);
254  uint64_t cummulative = 0;
255  auto max_threads = thread_info_->GetMaxThreads();
256  for (int n = 1; n < numa_nodes; ++n) {
257  auto threads_in_numa = thread_info_->GetThreadsInNumaNode(n);
258  uint64_t num_agents = GetNumAgents() * threads_in_numa / max_threads;
259  agent_per_numa[n] = num_agents;
260  cummulative += num_agents;
261  }
262  agent_per_numa[0] = GetNumAgents() - cummulative;
263  agent_per_numa_cumm[0] = 0;
264  for (int n = 1; n < numa_nodes; ++n) {
265  agent_per_numa_cumm[n] = agent_per_numa_cumm[n - 1] + agent_per_numa[n - 1];
266  }
267 
268  // using first touch policy - page will be allocated to the numa domain of
269  // the thread that accesses it first.
270  // alternative, use numa_alloc_onnode.
271  int ret = numa_run_on_node(0);
272  if (ret != 0) {
273  Log::Fatal("ResourceManager",
274  "Run on numa node failed. Return code: ", ret);
275  }
276  auto* env = Simulation::GetActive()->GetEnvironment();
277  auto lbi = env->GetLoadBalanceInfo();
278 
279  const bool minimize_memory = param->minimize_memory_while_rebalancing;
280 
281 // create new agents
282 #pragma omp parallel
283  {
284  auto tid = thread_info_->GetMyThreadId();
285  auto nid = thread_info_->GetNumaNode(tid);
286 
287  auto& dest = agents_lb_[nid];
288  if (thread_info_->GetNumaThreadId(tid) == 0) {
289  if (dest.capacity() < agent_per_numa[nid]) {
290  dest.reserve(agent_per_numa[nid] * 1.5);
291  }
292  dest.resize(agent_per_numa[nid]);
293  }
294 
295 #pragma omp barrier
296 
297  auto threads_in_numa = thread_info_->GetThreadsInNumaNode(nid);
299 
300  // use static scheduling
301  auto correction = agent_per_numa[nid] % threads_in_numa == 0 ? 0 : 1;
302  auto chunk = agent_per_numa[nid] / threads_in_numa + correction;
303  auto start =
304  thread_info_->GetNumaThreadId(tid) * chunk + agent_per_numa_cumm[nid];
305  auto end =
306  std::min(agent_per_numa_cumm[nid] + agent_per_numa[nid], start + chunk);
307 
308  LoadBalanceFunctor f(minimize_memory, start - agent_per_numa_cumm[nid], nid,
310  lbi->CallHandleIteratorConsumer(start, end, f);
311  }
312 
313  // delete old objects. This approach has a high chance that a thread
314  // in the right numa node will delete the object, thus minimizing thread
315  // synchronization overheads. The bdm memory allocator does not have this
316  // issue.
317  if (!minimize_memory) {
318  auto delete_functor = L2F([](Agent* agent) { delete agent; });
319  ForEachAgentParallel(delete_functor);
320  }
321 
322  for (int n = 0; n < numa_nodes; n++) {
323  agents_[n].swap(agents_lb_[n]);
324  if (param->plot_memory_layout) {
325  PlotMemoryLayout(agents_[n], n);
327  }
328  }
329  if (param->plot_memory_layout) {
331  }
332 
333  if (Simulation::GetActive()->GetParam()->debug_numa) {
334  std::cout << *this << std::endl;
335  }
336 }
337 
338 // -----------------------------------------------------------------------------
340  const std::vector<std::vector<AgentUid>*>& uids) {
341  // initialization
342  auto numa_nodes = thread_info_->GetNumaNodes();
343  // cumulative numbers of to be removed agents
344  // dimensions: numa_nodes x num_threads
345  std::vector<std::vector<uint64_t>> tbr_cum(numa_nodes);
346  for (auto& el : tbr_cum) {
347  el.resize(uids.size() + 1);
348  }
349 
350  std::vector<uint64_t> remove(numa_nodes);
351  std::vector<uint64_t> lowest(numa_nodes);
352  parallel_remove_.to_right.resize(numa_nodes);
353  parallel_remove_.not_to_left.resize(numa_nodes);
354  // thread offsets into to_right and not_to_left
355  std::vector<SharedData<uint64_t>> start(numa_nodes);
356  // number of swaps in each block
357  // add one more element to have enough space for exclusive prefix sum
358  std::vector<SharedData<uint64_t>> swaps_to_right(numa_nodes);
359  std::vector<SharedData<uint64_t>> swaps_to_left(numa_nodes);
360 
361 #ifndef NDEBUG
362  std::set<AgentUid> toberemoved;
363 #endif // NDEBUG
364 
365  // determine how many agents will be removed in each numa domain
366 #pragma omp parallel for schedule(static, 1)
367  for (uint64_t i = 0; i < uids.size(); ++i) {
368  for (auto& uid : *uids[i]) {
369  auto ah = uid_ah_map_[uid];
370  tbr_cum[ah.GetNumaNode()][i]++;
371  }
372  }
373 
374 #pragma omp parallel
375  {
376  auto nid = thread_info_->GetMyNumaNode();
377  auto ntid = thread_info_->GetMyNumaThreadId();
378  if (thread_info_->GetMyNumaThreadId() == 0) {
379  ExclusivePrefixSum(&tbr_cum[nid], tbr_cum[nid].size() - 1);
380  remove[nid] = tbr_cum[nid].back();
381  lowest[nid] = agents_[nid].size() - remove[nid];
382 
383  if (remove[nid] != 0) {
384  if (parallel_remove_.to_right[nid].capacity() < remove[nid]) {
385  parallel_remove_.to_right[nid].reserve(remove[nid] * 1.5);
386  }
387  if (parallel_remove_.not_to_left[nid].capacity() < remove[nid]) {
388  parallel_remove_.not_to_left[nid].reserve(remove[nid] * 1.5);
389  }
390  }
391  }
392 #pragma omp barrier
393  auto threads_in_numa = thread_info_->GetThreadsInNumaNode(nid);
394  uint64_t start_init = 0;
395  uint64_t end_init = 0;
396  Partition(remove[nid], threads_in_numa, ntid, &start_init, &end_init);
397  for (uint64_t i = start_init; i < end_init; ++i) {
398  parallel_remove_.to_right[nid][i] = std::numeric_limits<uint64_t>::max();
399  parallel_remove_.not_to_left[nid][i] = 0;
400  }
401  }
402 
403  // find agents that must be swapped
404 #pragma omp parallel for schedule(static, 1)
405  for (uint64_t i = 0; i < uids.size(); ++i) {
406  std::vector<uint64_t> counts(numa_nodes);
407  for (auto& uid : *uids[i]) {
408  assert(ContainsAgent(uid));
409  auto ah = uid_ah_map_[uid];
410  auto nid = ah.GetNumaNode();
411  auto eidx = ah.GetElementIdx();
412 #ifndef NDEBUG
413 #pragma omp critical
414  toberemoved.insert(uid);
415 #endif // NDEBUG
416 
417  if (eidx < lowest[nid]) {
418  parallel_remove_.to_right[nid][tbr_cum[nid][i] + counts[nid]] = eidx;
419  } else {
420  parallel_remove_.not_to_left[nid][eidx - lowest[nid]] = 1;
421  }
422  counts[nid]++;
423  }
424  }
425 
426  // reorder
427 #pragma omp parallel
428  {
429  auto nid = thread_info_->GetMyNumaNode();
430  auto ntid = thread_info_->GetMyNumaThreadId();
431  auto threads_in_numa = thread_info_->GetThreadsInNumaNode(nid);
432 
433  if (remove[nid] != 0) {
434  if (ntid == 0) {
435  start[nid].resize(threads_in_numa);
436  swaps_to_left[nid].resize(threads_in_numa + 1);
437  swaps_to_right[nid].resize(threads_in_numa + 1);
438  }
439  }
440 #pragma omp barrier
441  if (remove[nid] != 0) {
442  uint64_t end = 0;
443  Partition(remove[nid], threads_in_numa, ntid, &start[nid][ntid], &end);
444 
445  for (uint64_t i = start[nid][ntid]; i < end; ++i) {
446  if (parallel_remove_.to_right[nid][i] !=
447  std::numeric_limits<uint64_t>::max()) {
449  .to_right[nid][start[nid][ntid] + swaps_to_right[nid][ntid]++] =
450  parallel_remove_.to_right[nid][i];
451  }
452  if (!parallel_remove_.not_to_left[nid][i]) {
453  // here the interpretation of not_to_left changes to to_left
454  // just to reuse memory
456  .not_to_left[nid][start[nid][ntid] + swaps_to_left[nid][ntid]++] =
457  i;
458  }
459  }
460  }
461 #pragma omp barrier
462  if (remove[nid] != 0) {
463  // calculate exclusive prefix sum for number of swaps in each block
464  if (ntid == 0) {
465  ExclusivePrefixSum(&swaps_to_right[nid],
466  swaps_to_right[nid].size() - 1);
467  ExclusivePrefixSum(&swaps_to_left[nid], swaps_to_left[nid].size() - 1);
468  }
469  }
470 #pragma omp barrier
471  if (remove[nid] != 0) {
472  uint64_t num_swaps = swaps_to_right[nid][threads_in_numa];
473  if (num_swaps != 0) {
474  // perform swaps
475  uint64_t swap_start = 0;
476  uint64_t swap_end = 0;
477  Partition(num_swaps, threads_in_numa, ntid, &swap_start, &swap_end);
478 
479  if (swap_start < swap_end) {
480  auto tr_block = BinarySearch(swap_start, swaps_to_right[nid], 0,
481  swaps_to_right[nid].size() - 1);
482  auto tl_block = BinarySearch(swap_start, swaps_to_left[nid], 0,
483  swaps_to_left[nid].size() - 1);
484 
485  auto tr_block_swaps =
486  swaps_to_right[nid][tr_block + 1] - swaps_to_right[nid][tr_block];
487  auto tl_block_swaps =
488  swaps_to_left[nid][tl_block + 1] - swaps_to_left[nid][tl_block];
489 
490  // number of elements to discard in the beginning
491  auto tr_block_idx = swap_start - swaps_to_right[nid][tr_block];
492  auto tl_block_idx = swap_start - swaps_to_left[nid][tl_block];
493 
494  for (uint64_t s = swap_start; s < swap_end; ++s) {
495  // calculate element indices that should be swapped
496  auto tr_idx = start[nid][tr_block] + tr_block_idx;
497  auto tl_idx = start[nid][tl_block] + tl_block_idx;
498  auto tr_eidx = parallel_remove_.to_right[nid][tr_idx];
499  auto tl_eidx =
500  parallel_remove_.not_to_left[nid][tl_idx] + lowest[nid];
501 
502  // swap
503  assert(tl_eidx < agents_[nid].size());
504  assert(tr_eidx < agents_[nid].size());
505  auto* reordered = agents_[nid][tl_eidx];
506 #ifndef NDEBUG
507  assert(toberemoved.find(agents_[nid][tl_eidx]->GetUid()) ==
508  toberemoved.end());
509  assert(toberemoved.find(agents_[nid][tr_eidx]->GetUid()) !=
510  toberemoved.end());
511 #endif // NDBUG
512  agents_[nid][tl_eidx] = agents_[nid][tr_eidx];
513  agents_[nid][tr_eidx] = reordered;
514  uid_ah_map_.Insert(reordered->GetUid(), AgentHandle(nid, tr_eidx));
515 
516  // find next pair
517  if (swap_end - s > 1) {
518  // right
519  tr_block_idx++;
520  if (tr_block_idx >= tr_block_swaps) {
521  tr_block_idx = 0;
522  tr_block_swaps = 0;
523  while (!tr_block_swaps) {
524  tr_block++;
525  tr_block_swaps = swaps_to_right[nid][tr_block + 1] -
526  swaps_to_right[nid][tr_block];
527  }
528  }
529  // left
530  tl_block_idx++;
531  if (tl_block_idx >= tl_block_swaps) {
532  tl_block_idx = 0;
533  tl_block_swaps = 0;
534  while (!tl_block_swaps) {
535  tl_block++;
536  tl_block_swaps = swaps_to_left[nid][tl_block + 1] -
537  swaps_to_left[nid][tl_block];
538  }
539  }
540  }
541  }
542  }
543  }
544  }
545 #pragma omp barrier
546  if (remove[nid] != 0) {
547  // delete agents
548  uint64_t start_del = 0;
549  uint64_t end_del = 0;
550  Partition(remove[nid], threads_in_numa, ntid, &start_del, &end_del);
551 
552  start_del += lowest[nid];
553  end_del += lowest[nid];
554 
555  auto* uid_generator = Simulation::GetActive()->GetAgentUidGenerator();
556  for (uint64_t i = start_del; i < end_del; ++i) {
557  Agent* agent = agents_[nid][i];
558  auto uid = agent->GetUid();
559  assert(toberemoved.find(uid) != toberemoved.end());
560  uid_ah_map_.Remove(uid);
561  uid_generator->ReuseAgentUid(uid);
562  if (type_index_) {
563  // TODO parallelize type_index removal
564 #pragma omp critical
565  type_index_->Remove(agent);
566  }
567  delete agent;
568  }
569  }
570  }
571  // shrink container
572  for (uint64_t n = 0; n < agents_.size(); ++n) {
573  agents_[n].resize(lowest[n]);
574  }
576 }
577 
578 // -----------------------------------------------------------------------------
580  return agents_[numa_node].capacity();
581 }
582 
583 // -----------------------------------------------------------------------------
584 void ResourceManager::SwapAgents(std::vector<std::vector<Agent*>>* agents) {
585  agents_.swap(*agents);
586 }
587 
589  auto* env = Simulation::GetActive()->GetEnvironment();
590  env->MarkAsOutOfSync();
591 }
592 
593 } // namespace bdm
timing.h
numa_node_of_cpu
int numa_node_of_cpu(int)
Definition: numa.h:30
bdm::ResourceManager::ContainsAgent
bool ContainsAgent(const AgentUid &uid) const
Definition: resource_manager.h:365
algorithm.h
bdm::ResourceManager::agents_lb_
std::vector< std::vector< Agent * > > agents_lb_
Container used during load balancing.
Definition: resource_manager.h:505
bdm::ResourceManager::GetNumAgents
size_t GetNumAgents(int numa_node=-1) const
Definition: resource_manager.h:256
bdm::ResourceManager::LoadBalance
virtual void LoadBalance()
Definition: resource_manager.cc:239
bdm::ResourceManager::type_index_
TypeIndex * type_index_
Definition: resource_manager.h:509
bdm::Partition
void Partition(uint64_t elements, uint64_t batches, uint64_t batch_num, uint64_t *start, uint64_t *end)
Definition: partition.h:22
bdm
Definition: agent.cc:39
bdm::ResourceManager::RemoveAgents
void RemoveAgents(const std::vector< std::vector< AgentUid > * > &uids)
Definition: resource_manager.cc:339
bdm::Iterator< AgentHandle >
bdm::PlotMemoryHistogram
void PlotMemoryHistogram(const std::vector< Agent * > &agents, int numa_node)
Definition: plot_memory_layout.cc:74
bdm::ThreadInfo::GetNumaNodes
int GetNumaNodes() const
Returns the number of NUMA nodes on this machine.
Definition: thread_info.h:48
plot_memory_layout.h
bdm::LoadBalanceFunctor::type_index
TypeIndex * type_index
Definition: resource_manager.cc:208
bdm::LoadBalanceFunctor::nid
uint64_t nid
Definition: resource_manager.cc:204
bdm::ResourceManager::SwapAgents
void SwapAgents(std::vector< std::vector< Agent * >> *agents)
Definition: resource_manager.cc:584
bdm::ResourceManager::ParallelRemovalAuxData::to_right
std::vector< std::vector< uint64_t > > to_right
Definition: resource_manager.h:512
bdm::LoadBalanceFunctor::dest
std::vector< Agent * > & dest
Definition: resource_manager.cc:206
bdm::Agent::GetUid
const AgentUid & GetUid() const
Definition: agent.cc:123
bdm::Environment::GetLoadBalanceInfo
virtual LoadBalanceInfo * GetLoadBalanceInfo()=0
bdm::TypeIndex
Definition: type_index.h:27
bdm::LoadBalanceFunctor::agents
std::vector< std::vector< Agent * > > & agents
Definition: resource_manager.cc:205
bdm::ResourceManager::parallel_remove_
ParallelRemovalAuxData parallel_remove_
auxiliary data required for parallel agent removal
Definition: resource_manager.h:517
bdm::L2F
LambdaFunctor< decltype(&TLambda::operator())> L2F(const TLambda &l)
Definition: functor.h:110
bdm::ThreadInfo::GetMyNumaThreadId
int GetMyNumaThreadId() const
Return the numa thread id of an openmp thread.
Definition: thread_info.h:45
bdm::Agent
Contains code required by all agents.
Definition: agent.h:79
bdm::PlotNeighborMemoryHistogram
void PlotNeighborMemoryHistogram(bool before)
Definition: plot_memory_layout.cc:137
bdm::ThreadInfo::GetNumaThreadId
int GetNumaThreadId(int omp_thread_id) const
Return the numa thread id of an openmp thread.
Definition: thread_info.h:61
bdm::ForEachAgentParallelFunctor::functor_
TFunctor & functor_
Definition: resource_manager.cc:87
bdm::LoadBalanceFunctor
Definition: resource_manager.cc:201
bdm::LoadBalanceFunctor::uid_ah_map
AgentUidMap< AgentHandle > & uid_ah_map
Definition: resource_manager.cc:207
bdm::ForEachAgentParallelFunctor::operator()
void operator()(Agent *agent, AgentHandle) override
Definition: resource_manager.cc:89
bdm::Functor< void, Agent *, AgentHandle >
bdm::ResourceManager::uid_ah_map_
AgentUidMap< AgentHandle > uid_ah_map_
Maps an AgentUid to its storage location in agents_ .
Definition: resource_manager.h:501
sched_getcpu
int sched_getcpu()
Definition: numa.h:40
bdm::ForEachAgentParallelFunctor::ForEachAgentParallelFunctor
ForEachAgentParallelFunctor(TFunctor &f)
Definition: resource_manager.cc:88
bdm::ExclusivePrefixSum
void ExclusivePrefixSum(T *v, uint64_t n)
Definition: algorithm.h:61
bdm::ResourceManager::ParallelRemovalAuxData::not_to_left
std::vector< std::vector< uint64_t > > not_to_left
Definition: resource_manager.h:513
bdm::ThreadInfo::GetMyNumaNode
int GetMyNumaNode() const
Definition: thread_info.h:42
bdm::ResourceManager::~ResourceManager
virtual ~ResourceManager()
Definition: resource_manager.cc:45
bdm::ResourceManager::agents_
std::vector< std::vector< Agent * > > agents_
Pointer container for all agents.
Definition: resource_manager.h:503
bdm::TypeIndex::Remove
void Remove(Agent *agent)
Definition: type_index.cc:40
numa_num_configured_nodes
int numa_num_configured_nodes()
Definition: numa.h:27
bdm::AgentUidMap
Definition: agent_uid_map.h:31
partition.h
bdm::Log::Fatal
static void Fatal(const std::string &location, const Args &... parts)
Prints fatal error message.
Definition: log.h:115
bdm::Iterator::HasNext
virtual bool HasNext() const =0
bdm::Simulation::GetAgentUidGenerator
AgentUidGenerator * GetAgentUidGenerator()
Definition: simulation.cc:256
bdm::BinarySearch
uint64_t BinarySearch(const TSearch &search_val, const TContainer &container, uint64_t from, uint64_t to)
Definition: algorithm.h:76
bdm::ResourceManager::continuum_models_
std::unordered_map< uint64_t, Continuum * > continuum_models_
Maps a continuum ID to the pointer to the continuum models.
Definition: resource_manager.h:524
bdm::ResourceManager::MarkEnvironmentOutOfSync
void MarkEnvironmentOutOfSync() const
Definition: resource_manager.cc:588
bdm::Iterator::Next
virtual T Next()=0
bdm::Environment::MarkAsOutOfSync
void MarkAsOutOfSync()
Definition: environment.h:44
bdm::ResourceManager::ResourceManager
ResourceManager()
Definition: resource_manager.cc:30
shared_data.h
bdm::Simulation::GetEnvironment
Environment * GetEnvironment()
Definition: simulation.cc:260
bdm::LoadBalanceFunctor::LoadBalanceFunctor
LoadBalanceFunctor(bool minimize_memory, uint64_t offset, uint64_t nid, decltype(agents) agents, decltype(dest) dest, decltype(uid_ah_map) uid_ah_map, TypeIndex *type_index)
Definition: resource_manager.cc:210
bdm::PlotMemoryLayout
void PlotMemoryLayout(const std::vector< Agent * > &agents, int numa_node)
Definition: plot_memory_layout.cc:42
environment.h
bdm::ResourceManager::ForEachAgentParallel
virtual void ForEachAgentParallel(Functor< void, Agent * > &function, Functor< bool, Agent * > *filter=nullptr)
Definition: resource_manager.cc:92
bdm::ResourceManager::thread_info_
ThreadInfo * thread_info_
Definition: resource_manager.h:507
simulation.h
bdm::ForEachAgentParallelFunctor
Definition: resource_manager.cc:86
bdm::Simulation::GetParam
const Param * GetParam() const
Returns the simulation parameters.
Definition: simulation.cc:254
bdm::ThreadInfo::GetMaxThreads
int GetMaxThreads() const
Return the maximum number of threads.
Definition: thread_info.h:66
bdm::LoadBalanceFunctor::operator()
void operator()(Iterator< AgentHandle > *it) override
Definition: resource_manager.cc:221
bdm::TypeIndex::Update
void Update(Agent *new_agent)
Definition: type_index.cc:33
numa_run_on_node
int numa_run_on_node(int)
Definition: numa.h:29
resource_manager.h
bdm::Simulation::GetActive
static Simulation * GetActive()
This function returns the currently active Simulation simulation.
Definition: simulation.cc:68
bdm::Operation
Definition: operation.h:99
numa_available
int numa_available()
Definition: numa.h:26
bdm::ThreadInfo::GetMyThreadId
int GetMyThreadId() const
Definition: thread_info.h:39
bdm::LoadBalanceFunctor::minimize_memory
bool minimize_memory
Definition: resource_manager.cc:202
bdm::ThreadInfo::GetNumaNode
int GetNumaNode(int omp_thread_id) const
Returns the numa node the given openmp thread is bound to.
Definition: thread_info.h:51
bdm::ThreadInfo::GetThreadsInNumaNode
int GetThreadsInNumaNode(int numa_node) const
Returns the number of threads in a given NUMA node.
Definition: thread_info.h:56
bdm::ResourceManager::GetAgentVectorCapacity
size_t GetAgentVectorCapacity(int numa_node)
Definition: resource_manager.cc:579
bdm::AgentHandle
Definition: agent_handle.h:29
bdm::LoadBalanceFunctor::offset
uint64_t offset
Definition: resource_manager.cc:203