38 #ifndef LIB_INCIDENCE_COUNT_H 39 #define LIB_INCIDENCE_COUNT_H 68 using TIMING_SCALE = std::micro;
69 using Clock = std::chrono::steady_clock;
71 using Instance = decltype(Clock::now());
72 using Dur = std::chrono::duration<double, TIMING_SCALE>;
82 using Sequence = vector<Inc>;
83 using Recording = vector<Sequence>;
87 std::atomic_uint8_t slotID_{0};
93 return slotID_.fetch_add(+1, std::memory_order_relaxed);
100 ASSERT (threadID < std::numeric_limits<uint8_t>::max(),
"WOW -- so many threads?");
105 getMySequence(uint8_t threadID)
107 if (threadID >= rec_.size())
109 rec_.reserve (threadID+1);
110 for (
size_t i = rec_.size(); i < threadID+1u; ++i)
113 return rec_[threadID];
117 addEntry(uint8_t caseID,
bool isLeave)
119 uint8_t threadID{getMySlot()};
120 Sequence& seq = getMySequence(threadID);
121 Inc& incidence = seq.emplace_back();
122 incidence.when = Clock::now();
123 incidence.thread = threadID;
124 incidence.caseID = caseID;
125 incidence.isLeave = isLeave;
134 expectThreads(uint8_t cnt)
142 expectIncidents(
size_t cnt)
145 for (Sequence& s : rec_)
154 void markEnter(uint8_t caseID =0) { addEntry(caseID,
false); }
155 void markLeave(uint8_t caseID =0) { addEntry(caseID,
true); }
163 size_t activationCnt{0};
164 double cumulatedTime{0};
165 double activeTime{0};
166 double coveredTime{0};
167 double avgConcurrency{0};
169 vector<size_t> caseCntr{};
170 vector<size_t> thrdCntr{};
171 vector<double> caseTime{};
172 vector<double> thrdTime{};
173 vector<double> concTime{};
175 template<
typename VAL>
177 access (vector<VAL>
const& data,
size_t idx)
179 return idx < data.size()? data[idx]
182 size_t cntCase (
size_t id)
const {
return access (caseCntr,
id); }
183 size_t cntThread(
size_t id)
const {
return access (thrdCntr,
id); }
184 double timeCase (
size_t id)
const {
return access (caseTime,
id); }
185 double timeThread(
size_t id)
const {
return access (thrdTime,
id); }
186 double timeAtConc(
size_t id)
const {
return access (concTime,
id); }
210 size_t numThreads = rec_.size();
211 if (numThreads == 0)
return stat;
213 size_t numEvents =
explore(rec_)
214 .transform([](Sequence& seq){
return seq.size(); })
216 if (numEvents == 0)
return stat;
218 timeline.reserve(numEvents);
219 for (Sequence& seq : rec_)
220 for (
Inc& event : seq)
221 timeline.emplace_back(event);
222 std::stable_sort (timeline.begin(), timeline.end()
223 ,[](
Inc const& l,
Inc const& r) {
return l.when < r.when; }
227 vector<int> active_case;
228 vector<int> active_thrd(numThreads);
231 stat.concTime.resize (numThreads+1);
236 Instance prev = timeline.front().when;
237 for (
Inc& event : timeline)
239 if (event.caseID >= stat.
caseCntr.size())
241 active_case .resize (event.caseID+1);
242 stat.
caseCntr.resize (event.caseID+1);
243 stat.
caseTime.resize (event.caseID+1);
245 Dur timeSlice =
event.when - prev;
247 for (uint i=0; i < stat.
caseCntr.size(); ++i)
248 stat.
caseTime[i] += active_case[i] * timeSlice.count();
249 for (uint i=0; i < numThreads; ++i)
251 stat.
thrdTime[i] += timeSlice.count();
252 size_t concurr =
explore(active_thrd).filter([](
int a){
return 0 < a; }).count();
253 ENSURE (concurr <= numThreads);
255 stat.concTime[concurr] += timeSlice.count();
259 ASSERT (0 < active_case[event.caseID]);
260 ASSERT (0 < active_thrd[event.thread]);
262 --active_case[
event.caseID];
263 --active_thrd[
event.thread];
268 ++active_case[
event.caseID];
269 ++active_thrd[
event.thread];
272 ++stat.activationCnt;
276 Dur covered = timeline.back().when - timeline.front().when;
278 stat.eventCnt = timeline.size();
279 ENSURE (0 < stat.activationCnt);
280 ENSURE (stat.eventCnt % 2 == 0);
auto explore(IT &&srcSeq)
start building a IterExplorer by suitably wrapping the given iterable source.
double cumulatedTime
aggregated time over all cases
Any copy and copy construction prohibited.
Implementation namespace for support and library code.
double avgConcurrency
amortised concurrency in timespan
Mix-Ins to allow or prohibit various degrees of copying and cloning.
uint8_t allocateNextSlot()
threadsafe allocation of thread/slotID
double coveredTime
overall timespan of observation
vector< double > caseTime
aggregated time per case
vector< size_t > thrdCntr
counting activations per thread
A recorder for concurrent incidences.
double activeTime
compounded time of thread activity
vector< size_t > caseCntr
counting activations per case
vector< double > thrdTime
time of activity per thread
Statistic evaluate()
Visit all data captured thus far, construct an unified timeline and then compute statistics evaluatio...
Building tree expanding and backtracking evaluations within hierarchical scopes.