Lumiera  0.pre.03
»edit your freedom«
incidence-count.hpp
Go to the documentation of this file.
1 /*
2  INCIDENCE-COUNT.hpp - instrumentation helper to watch possibly multithreaded task activations
3 
4  Copyright (C) Lumiera.org
5  2024, Hermann Vosseler <Ichthyostega@web.de>
6 
7  This program is free software; you can redistribute it and/or
8  modify it under the terms of the GNU General Public License as
9  published by the Free Software Foundation; either version 2 of
10  the License, or (at your option) any later version.
11 
12  This program is distributed in the hope that it will be useful,
13  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  GNU General Public License for more details.
16 
17  You should have received a copy of the GNU General Public License
18  along with this program; if not, write to the Free Software
19  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 
21 */
22 
47 #ifndef LIB_INCIDENCE_COUNT_H
48 #define LIB_INCIDENCE_COUNT_H
49 
50 
51 #include "lib/nocopy.hpp"
52 #include "lib/iter-explorer.hpp"
53 
54 #include <cstdint>
55 #include <atomic>
56 #include <vector>
57 #include <chrono>
58 #include <limits>
59 #include <algorithm>
60 
61 
62 namespace lib {
63 
64  using std::vector;
65 
76  {
77  using TIMING_SCALE = std::micro; // Results are in µ-sec
78  using Clock = std::chrono::steady_clock;
79 
80  using Instance = decltype(Clock::now());
81  using Dur = std::chrono::duration<double, TIMING_SCALE>;
82 
83  struct Inc
84  {
85  Instance when;
86  uint8_t thread :8;
87  uint8_t caseID :8;
88  bool isLeave :1;
89  };
90 
91  using Sequence = vector<Inc>;
92  using Recording = vector<Sequence>;
93 
94  Recording rec_;
95 
96  std::atomic_uint8_t slotID_{0};
97 
99  uint8_t
101  { // Note : returning previous value before increment
102  return slotID_.fetch_add(+1, std::memory_order_relaxed);
103  }
104 
105  uint8_t
106  getMySlot()
107  {
108  thread_local uint8_t threadID{allocateNextSlot()};
109  ASSERT (threadID < std::numeric_limits<uint8_t>::max(), "WOW -- so many threads?");
110  return threadID;
111  }
112 
113  Sequence&
114  getMySequence(uint8_t threadID)
115  {
116  if (threadID >= rec_.size())
117  {
118  rec_.reserve (threadID+1);
119  for (size_t i = rec_.size(); i < threadID+1u; ++i)
120  rec_.emplace_back();
121  }
122  return rec_[threadID];
123  }
124 
125  void
126  addEntry(uint8_t caseID, bool isLeave)
127  {
128  uint8_t threadID{getMySlot()};
129  Sequence& seq = getMySequence(threadID);
130  Inc& incidence = seq.emplace_back();
131  incidence.when = Clock::now();
132  incidence.thread = threadID;
133  incidence.caseID = caseID;
134  incidence.isLeave = isLeave;
135  }
136 
137 
138  public:
139  IncidenceCount() = default;
140 
141 
143  expectThreads(uint8_t cnt)
144  {
145  REQUIRE (cnt);
146  rec_.resize (cnt);
147  return *this;
148  }
149 
151  expectIncidents(size_t cnt)
152  {
153  REQUIRE (cnt);
154  for (Sequence& s : rec_)
155  s.reserve (2*cnt);
156  return *this;
157  }
158 
159 
160 
161  /* ===== Measurement API ===== */
162 
163  void markEnter(uint8_t caseID =0) { addEntry(caseID, false); }
164  void markLeave(uint8_t caseID =0) { addEntry(caseID, true); }
165 
166 
167  /* ===== Evaluations ===== */
168 
169  struct Statistic
170  {
171  size_t eventCnt{0};
172  size_t activationCnt{0};
173  double cumulatedTime{0};
174  double activeTime{0};
175  double coveredTime{0};
176  double avgConcurrency{0};
177 
178  vector<size_t> caseCntr{};
179  vector<size_t> thrdCntr{};
180  vector<double> caseTime{};
181  vector<double> thrdTime{};
182  vector<double> concTime{};
183 
184  template<typename VAL>
185  static VAL
186  access (vector<VAL> const& data, size_t idx)
187  {
188  return idx < data.size()? data[idx]
189  : VAL{};
190  }
191  size_t cntCase (size_t id) const { return access (caseCntr, id); }
192  size_t cntThread(size_t id) const { return access (thrdCntr, id); }
193  double timeCase (size_t id) const { return access (caseTime, id); }
194  double timeThread(size_t id) const { return access (thrdTime, id); }
195  double timeAtConc(size_t id) const { return access (concTime, id); }
196  };
197 
199 
200  double
201  calcCumulatedTime()
202  {
203  return evaluate().cumulatedTime;
204  }
205 
206  };
207 
208 
209 
217  {
218  Statistic stat;
219  size_t numThreads = rec_.size();
220  if (numThreads == 0) return stat;
221 
222  size_t numEvents = explore(rec_)
223  .transform([](Sequence& seq){ return seq.size(); })
224  .resultSum();
225  if (numEvents == 0) return stat;
226  Sequence timeline;
227  timeline.reserve(numEvents);
228  for (Sequence& seq : rec_)
229  for (Inc& event : seq)
230  timeline.emplace_back(event);
231  std::stable_sort (timeline.begin(), timeline.end()
232  ,[](Inc const& l, Inc const& r) { return l.when < r.when; }
233  );
234 
235  int active{0};
236  vector<int> active_case;
237  vector<int> active_thrd(numThreads);
238  stat.thrdCntr.resize (numThreads);
239  stat.thrdTime.resize (numThreads);
240  stat.concTime.resize (numThreads+1); // also accounting for idle times in range
241 
242  // Integrate over the timeline...
243  // - book the preceding interval length into each affected partial sum
244  // - adjust current active count in accordance to the current event
245  Instance prev = timeline.front().when;
246  for (Inc& event : timeline)
247  {
248  if (event.caseID >= stat.caseCntr.size())
249  {
250  active_case .resize (event.caseID+1);
251  stat.caseCntr.resize (event.caseID+1);
252  stat.caseTime.resize (event.caseID+1);
253  }
254  Dur timeSlice = event.when - prev;
255  stat.cumulatedTime += active * timeSlice.count();
256  for (uint i=0; i < stat.caseCntr.size(); ++i)
257  stat.caseTime[i] += active_case[i] * timeSlice.count();
258  for (uint i=0; i < numThreads; ++i)
259  if (active_thrd[i]) // note: counting activity per thread, without overlapping cases
260  stat.thrdTime[i] += timeSlice.count();
261  size_t concurr = explore(active_thrd).filter([](int a){ return 0 < a; }).count();
262  ENSURE (concurr <= numThreads);
263  stat.avgConcurrency += concurr * timeSlice.count(); // contribution for weighted average
264  stat.concTime[concurr] += timeSlice.count();
265  if (event.isLeave)
266  {
267  ASSERT (0 < active);
268  ASSERT (0 < active_case[event.caseID]);
269  ASSERT (0 < active_thrd[event.thread]);
270  --active;
271  --active_case[event.caseID];
272  --active_thrd[event.thread];
273  }
274  else
275  {
276  ++active;
277  ++active_case[event.caseID];
278  ++active_thrd[event.thread];
279  ++stat.caseCntr[event.caseID];
280  ++stat.thrdCntr[event.thread];
281  ++stat.activationCnt;
282  }
283  prev = event.when;
284  }
285  Dur covered = timeline.back().when - timeline.front().when;
286  stat.coveredTime = covered.count();
287  stat.eventCnt = timeline.size();
288  ENSURE (0 < stat.activationCnt);
289  ENSURE (stat.eventCnt % 2 == 0);
290  stat.avgConcurrency /= stat.coveredTime; // time used as weight sum
291  stat.activeTime = explore(stat.thrdTime).resultSum();
292  return stat;
293  }
294 
295 
296 } // namespace lib
297 #endif /*LIB_INCIDENCE_COUNT_H*/
auto explore(IT &&srcSeq)
start building a IterExplorer by suitably wrapping the given iterable source.
double cumulatedTime
aggregated time over all cases
Any copy and copy construction prohibited.
Definition: nocopy.hpp:46
Implementation namespace for support and library code.
double avgConcurrency
amortised concurrency in timespan
Mix-Ins to allow or prohibit various degrees of copying and cloning.
uint8_t allocateNextSlot()
threadsafe allocation of thread/slotID
double coveredTime
overall timespan of observation
vector< double > caseTime
aggregated time per case
vector< size_t > thrdCntr
counting activations per thread
A recorder for concurrent incidences.
double activeTime
compounded time of thread activity
vector< size_t > caseCntr
counting activations per case
vector< double > thrdTime
time of activity per thread
Statistic evaluate()
Visit all data captured thus far, construct an unified timeline and then compute statistics evaluatio...
Building tree expanding and backtracking evaluations within hierarchical scopes.