Lumiera 0.pre.04
»edit your freedom«
Loading...
Searching...
No Matches
block-flow-test.cpp
Go to the documentation of this file.
1/*
2 BlockFlow(Test) - verify scheduler memory management scheme
3
4 Copyright (C)
5 2023, Hermann Vosseler <Ichthyostega@web.de>
6
7  **Lumiera** is free software; you can redistribute it and/or modify it
8  under the terms of the GNU General Public License as published by the
9  Free Software Foundation; either version 2 of the License, or (at your
10  option) any later version. See the file COPYING for further details.
11
12* *****************************************************************/
13
19#include "lib/test/run.hpp"
24#include "lib/meta/function.hpp"
25#include "lib/format-cout.hpp"
26#include "lib/util.hpp"
27
28#include <chrono>
29#include <vector>
30#include <tuple>
31
32using test::Test;
37
38using std::vector;
39using std::pair;
41
42
43namespace vault{
44namespace gear {
45namespace test {
46
47 namespace { // shorthand for test parametrisation
48
54
55 const size_t EXTENT_SIZ = Extent::SIZ();
59 const double TARGET_FILL = Strategy{}.config().TARGET_FILL;
60 const double ACTIVITIES_P_FR = Strategy{}.config().ACTIVITIES_PER_FRAME;
61 }
62
63
64
65
66
67 /*****************************************************************/
72 class BlockFlow_test : public Test
73 {
74
75 virtual void
76 run (Arg)
77 {
78 seedRand();
85 }
86
87
92 void
94 {
95 BlockFlow bFlow;
96 Time deadline = randTime();
97
98 Activity& tick = bFlow.until(deadline).create();
99 CHECK (tick.verb_ == Activity::TICK);
100 CHECK (1 == watch(bFlow).cntElm());
101 CHECK (1 == watch(bFlow).cntEpochs());
102 CHECK (watch(bFlow).first() > deadline);
103 CHECK (watch(bFlow).first() - deadline == bFlow.getEpochStep());
104
105 bFlow.discardBefore (deadline + Time{0,5});
106 CHECK (0 == watch(bFlow).cntEpochs());
107 CHECK (0 == watch(bFlow).cntElm());
108 }
109
110
111
123 void
125 {
126 Allocator alloc;
127 alloc.openNew();
128
129 // the raw storage Extent is a compact block
130 // providing uninitialised storage typed as `vault::gear::Activity`
131 Extent& extent = *alloc.begin();
132 CHECK (extent.size() == Extent::SIZ::value);
133 CHECK (sizeof(extent) == extent.size() * sizeof(Activity));
134 CHECK (showType<Extent::value_type>() == "vault::gear::Activity"_expect);
135
136 // we can just access some slot and place data there
137 extent[55].data_.feed.one = 555555555555555;
138
139 // now establish an Epoch placed into this storage block:
140 Epoch& epoch = Epoch::setup (alloc.begin(), Time{0,10});
141
142 // the underlying storage is not touched yet...
143 CHECK (epoch[55].data_.feed.one == 555555555555555);
144
145 // but in the first slot, an »EpochGate« has been implanted
146 Epoch::EpochGate& gate = epoch.gate();
147 CHECK (isSameObject (gate, epoch[0]));
148 CHECK (isSameObject (epoch[0], extent[0]));
149 CHECK (Time{gate.deadline()} == Time(0,10));
150 CHECK (Time{gate.deadline()} == Time{epoch[0].data_.condition.dead});
151 CHECK (epoch[0].is (Activity::GATE));
152
153 // the gate's `next`-pointer is (ab)used to manage the next allocation slot
154 CHECK (isSameObject (*gate.next, epoch[extent.size()-1]));
155 CHECK (0 == gate.filledSlots());
156 CHECK (0 == epoch.getFillFactor());
157
158 // the storage there is not used yet....
159 epoch[extent.size()-1].data_.timing.instant = Time{5,5};
160 // ....but will be overwritten by the following ctor call
161
162 // allocate a new Activity into the next free slot (using a faked AllocatorHandle)
163 BlockFlow::AllocatorHandle allocHandle{alloc.begin(), nullptr};
164 Activity& timeStart = allocHandle.create (Activity::WORKSTART);
165 CHECK (isSameObject (timeStart, epoch[extent.size()-1]));
166
167 // this Activity object is properly initialised (and memory was altered)
168 CHECK (epoch[extent.size()-1].data_.timing.instant != Time(5,5));
169 CHECK (epoch[extent.size()-1].data_.timing.instant == Time::NEVER);
170 CHECK (timeStart.verb_ == Activity::WORKSTART);
171 CHECK (timeStart.data_.timing.instant == Time::NEVER);
172 CHECK (timeStart.data_.timing.quality == 0);
173
174 // and the free-pointer was decremented to point to the next free slot
175 CHECK (isSameObject (*gate.next, epoch[extent.size()-2]));
176
177 // which also implies that there is still ample space left...
178 CHECK (1 == gate.filledSlots());
179 CHECK (gate.hasFreeSlot());
180
181 CHECK (epoch.getFillFactor() == double(gate.filledSlots()) / (EXTENT_SIZ-1));
182
183 // so let's eat this space up...
184 for (uint i=extent.size()-2; i>1; --i)
185 gate.claimNextSlot();
186
187 // one final slot is left (beyond the EpochGate itself)
188 CHECK (isSameObject (*gate.next, epoch[1]));
189 CHECK (gate.filledSlots() == EXTENT_SIZ-2);
190 CHECK (gate.hasFreeSlot());
191
192 gate.claimNextSlot();
193 // aaand the boat is full...
194 CHECK (not gate.hasFreeSlot());
195 CHECK (isSameObject (*gate.next, epoch[0]));
196 CHECK (gate.filledSlots() == EXTENT_SIZ-1);
197 CHECK (epoch.getFillFactor() == 1);
198
199 // a given Epoch can be checked for relevance against a deadline
200 CHECK (gate.deadline() == Time(0,10));
201
202 CHECK ( gate.isAlive (Time(0,5)));
203 CHECK ( gate.isAlive (Time(999,9)));
204 CHECK (not gate.isAlive (Time(0,10)));
205 CHECK (not gate.isAlive (Time(1,10)));
206 }
207
208
209
220 void
222 {
223 BlockFlow bFlow;
224
225 Time t1 = Time{ 0,10};
226 Time t2 = Time{500,10};
227 Time t3 = Time{ 0,11};
228
229 // no Epoch established yet...
230 auto& a1 = bFlow.until(t1).create();
231 CHECK (watch(bFlow).allEpochs() == "10s200ms"_expect);
232 CHECK (watch(bFlow).find(a1) == "10s200ms"_expect);
233
234 // setup Epoch grid into the future
235 auto& a3 = bFlow.until(t3).create();
236 CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect);
237 CHECK (watch(bFlow).find(a3) == "11s"_expect);
238
239 // associate to existing Epoch
240 auto& a2 = bFlow.until(t2).create();
241 CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect);
242 CHECK (watch(bFlow).find(a2) == "10s600ms"_expect);
243
244 Time t0 = Time{0,5};
245 // late(past) Activity is placed in the oldest Epoch alive
246 auto& a0 = bFlow.until(t0).create();
247 CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s"_expect);
248 CHECK (watch(bFlow).find(a0) == "10s200ms"_expect);
249
250 // provoke Epoch overflow by exhausting all available storage slots
251 BlockFlow::AllocatorHandle allocHandle = bFlow.until(Time{300,10});
252 for (uint i=1; i<EXTENT_SIZ; ++i)
253 allocHandle.create();
254
255 CHECK (allocHandle.currDeadline() == Time(400,10));
256 CHECK (not allocHandle.hasFreeSlot());
257
258 // ...causing next allocation to be shifted into subsequent Epoch
259 auto& a4 = allocHandle.create();
260 CHECK (allocHandle.currDeadline() == Time(600,10));
261 CHECK (allocHandle.hasFreeSlot());
262 CHECK (watch(bFlow).find(a4) == "10s600ms"_expect);
263
264 // fill up and exhaust this Epoch too....
265 for (uint i=1; i<EXTENT_SIZ; ++i)
266 allocHandle.create();
267
268 // so the handle has moved to the after next Epoch
269 CHECK (allocHandle.currDeadline() == Time(800,10));
270 CHECK (allocHandle.hasFreeSlot());
271
272 // even allocation with way earlier deadline is shifted here now
273 auto& a5 = bFlow.until(Time{220,10}).create();
274 CHECK (watch(bFlow).find(a5) == "10s800ms"_expect);
275
276 // now repeat the same pattern, but now towards uncharted Epochs
277 allocHandle = bFlow.until(Time{900,10});
278 for (uint i=2; i<EXTENT_SIZ; ++i)
279 allocHandle.create();
280
281 CHECK (allocHandle.currDeadline() == Time(0,11));
282 CHECK (not allocHandle.hasFreeSlot());
283 auto& a6 = bFlow.until(Time{850,10}).create();
284 // Note: encountered four overflow-Events, leading to decreased Epoch spacing for new Epochs
285 CHECK (watch(bFlow).find(a6) == "11s192ms"_expect);
286 CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s|11s192ms"_expect);
287
288 auto& a7 = bFlow.until(Time{500,11}).create();
289 // this allocation does not count as overflow, but has to expand the Epoch grid, now using the reduced Epoch spacing
290 CHECK (watch(bFlow).allEpochs() == "10s200ms|10s400ms|10s600ms|10s800ms|11s|11s192ms|11s384ms|11s576ms"_expect);
291 CHECK (watch(bFlow).find(a7) == "11s576ms"_expect);
292
293 // we created 8 elements (a0...a7) and caused three epochs to overflow...
294 CHECK (watch(bFlow).cntElm() == 8 + EXTENT_SIZ-1 + EXTENT_SIZ-1 + EXTENT_SIZ-2);
295
296 // on clean-up, actual fill ratio is used to adjust to optimise Epoch length for better space usage
297 CHECK (bFlow.getEpochStep() == "≺192ms≻"_expect);
298 bFlow.discardBefore (Time{999,10});
299 CHECK (bFlow.getEpochStep() == "≺218ms≻"_expect);
300 CHECK (watch(bFlow).allEpochs() == "11s|11s192ms|11s384ms|11s576ms"_expect);
301
302 // placed into the oldest Epoch still alive
303 auto& a8 = bFlow.until(Time{500,10}).create();
304 CHECK (watch(bFlow).find(a8) == "11s192ms"_expect);
305 }
306
307
308
314 void
316 {
317 BlockFlow bFlow;
318 CHECK (bFlow.getEpochStep() == INITIAL_EPOCH_STEP);
319
320 // whenever an Epoch overflow happens, capacity is boosted by reducing the Epoch duration
321 bFlow.markEpochOverflow();
322 CHECK (bFlow.getEpochStep() == INITIAL_EPOCH_STEP * BOOST_OVERFLOW);
323 bFlow.markEpochOverflow();
324 CHECK (bFlow.getEpochStep() == INITIAL_EPOCH_STEP * BOOST_OVERFLOW*BOOST_OVERFLOW);
325
326 // To counteract this increase, on clean-up the actual fill rate of the Extent
327 // serves to guess an optimal Epoch duration, which is averaged exponentially
328
329 // Using just arbitrary demo values for some fictional Epochs
330 TimeVar dur1 = INITIAL_EPOCH_STEP;
331 double fac1 = 0.8;
332 TimeVar dur2 = INITIAL_EPOCH_STEP * BOOST_OVERFLOW;
333 double fac2 = 0.3;
334
335 double goal1 = double(_raw(dur1)) / (fac1/TARGET_FILL);
336 double goal2 = double(_raw(dur2)) / (fac2/TARGET_FILL);
337
338 auto movingAverage = [&](TimeValue old, double contribution)
339 {
340 auto N = AVERAGE_EPOCHS;
341 auto averageTicks = double(_raw(old))*(N-1)/N + contribution/N;
342 return TimeValue{raw_time_64 (floor (averageTicks))};
343 };
344
345 TimeVar step = bFlow.getEpochStep();
346 bFlow.markEpochUnderflow (dur1, fac1);
347 CHECK (bFlow.getEpochStep() == movingAverage(step, goal1));
348
349 step = bFlow.getEpochStep();
350 bFlow.markEpochUnderflow (dur2, fac2);
351 CHECK (bFlow.getEpochStep() == movingAverage(step, goal2));
352 }
353
354
355
357 void
359 {
360 BlockFlow bFlow;
361
362 Duration initialStep{bFlow.getEpochStep()};
363 size_t initialFPS = Strategy{}.initialFrameRate();
364
365 // signal that the load will be doubled
366 bFlow.announceAdditionalFlow (FrameRate(initialFPS));
367 CHECK (bFlow.getEpochStep() * 2 == initialStep);
368
369 // signal that the load will again be doubled
370 bFlow.announceAdditionalFlow (FrameRate(2*initialFPS));
371 CHECK (bFlow.getEpochStep() * 4 == initialStep);
372 }
373
374
375
376
394 void
396 {
397 const size_t FPS = 200;
398 const size_t TICK_P_S = FPS * ACTIVITIES_P_FR; // Simulated throughput 200 frames per second
399 const raw_time_64 STP = Time::SCALE / TICK_P_S; // Simulation stepping (here 2 steps per ms)
400 const raw_time_64 RUN = _raw(Time{0,0,3}); // nominal length of the simulation time axis
401 Offset BASE_DEADLINE{FSecs{1,2}}; // base pre-roll before deadline
402 Offset SPREAD_DEAD{FSecs{2,100}}; // random spread of deadline around base
403 const uint INVOKE_LAG = _raw(Time{250,0}) /STP; // „invoke“ the Activity after simulated 250ms (≙ 500 steps)
404 const uint CLEAN_UP = _raw(Time{100,0}) /STP; // perform clean-up every 200 steps
405 const uint INSTANCES = RUN /STP; // 120000 Activity records to send through the test subject
406 const uint MAX_TIME = INSTANCES
407 +INVOKE_LAG+2*CLEAN_UP; // overall count of Test steps to perform
408
409 using TestData = vector<pair<TimeVar, size_t>>;
410 using Subjects = vector<reference_wrapper<Activity>>;
411
412 // pre-generate random test data
413 TestData testData{INSTANCES};
414 for (size_t i=0; i<INSTANCES; ++i)
415 {
416 const size_t SPREAD = 2*_raw(SPREAD_DEAD);
417 const size_t MIN_DEAD = _raw(BASE_DEADLINE) - _raw(SPREAD_DEAD);
418
419 auto&[t,r] = testData[i];
420 r = rani (SPREAD);
421 t = TimeValue(i*STP + MIN_DEAD + r);
422 }
423
424 Activity dummy; // reserve memory for test subject index
425 Subjects subject{INSTANCES, std::ref(dummy)};
426
427 auto runTest = [&](auto allocate, auto invoke) -> size_t
428 {
429 // allocate Activity record for deadline and with given random payload
430 ASSERT_VALID_SIGNATURE (decltype(allocate), Activity&(Time, size_t));
431
432 // access the given Activity, read the payload, then trigger disposal
433 ASSERT_VALID_SIGNATURE (decltype(invoke), size_t(Activity&));
434
435 size_t checksum{0};
436 for (size_t i=0; i<MAX_TIME; ++i)
437 {
438 if (i < INSTANCES)
439 {
440 auto const& data = testData[i];
441 subject[i] = allocate(data.first, data.second);
442 }
443 if (INVOKE_LAG <= i and i-INVOKE_LAG < INSTANCES)
444 checksum += invoke(subject[i-INVOKE_LAG]);
445 }
446 return checksum;
447 };
448
449 auto benchmark = [INSTANCES](auto invokeTest)
450 { // does the timing measurement with result in µ-seconds
451 return lib::test::benchmarkTime(invokeTest, INSTANCES);
452 };
453
454
455
456 /* =========== Test-Setup-1: no individual allocations/deallocations ========== */
457 size_t sum1{0};
458 vector<Activity> storage{INSTANCES};
459 auto noAlloc = [&]{ // use pre-allocated storage block
460 auto allocate = [i=0, &storage](Time, size_t check) mutable -> Activity&
461 {
462 return *new(&storage[i++]) Activity{check, size_t{55}};
463 };
464 auto invoke = [](Activity& feedActivity)
465 {
466 return feedActivity.data_.feed.one;
467 };
468
469 sum1 = runTest (allocate, invoke);
470 };
471
472
473 /* =========== Test-Setup-2: individual heap allocations ========== */
474 size_t sum2{0};
475 auto heapAlloc = [&]{
476 auto allocate = [](Time, size_t check) mutable -> Activity&
477 {
478 return *new Activity{check, size_t{55}};
479 };
480 auto invoke = [](Activity& feedActivity)
481 {
482 size_t check = feedActivity.data_.feed.one;
483 delete &feedActivity;
484 return check;
485 };
486
487 sum2 = runTest (allocate, invoke);
488 };
489
490
491 /* =========== Test-Setup-3: manage individually by ref-cnt ========== */
492 size_t sum3{0};
493 vector<std::shared_ptr<Activity>> manager{INSTANCES};
494 auto sharedAlloc = [&]{
495 auto allocate = [&, i=0](Time, size_t check) mutable -> Activity&
496 {
497 Activity* a = new Activity{check, size_t{55}};
498 manager[i].reset(a);
499 ++i;
500 return *a;
501 };
502 auto invoke = [&, i=0](Activity& feedActivity) mutable
503 {
504 size_t check = feedActivity.data_.feed.one;
505 manager[i].reset();
506 return check;
507 };
508
509 sum3 = runTest (allocate, invoke);
510 };
511
512
513 /* =========== Test-Setup-4: use BlockFlow allocation scheme ========== */
514
515 size_t sum4{0};
517 // Note: using the RenderConfig, which uses larger blocks and more pre-allocation
518 auto blockFlowAlloc = [&]{
519 auto allocHandle = blockFlow.until(Time{BASE_DEADLINE});
520 auto allocate = [&, j=0](Time t, size_t check) mutable -> Activity&
521 {
522 if (++j >= 10) // typically several Activities are allocated on the same deadline
523 {
524 allocHandle = blockFlow.until(t);
525 j = 0;
526 }
527 return allocHandle.create (check, size_t{55});
528 };
529 auto invoke = [&, i=0](Activity& feedActivity) mutable
530 {
531 size_t check = feedActivity.data_.feed.one;
532 if (i % CLEAN_UP == 0)
533 blockFlow.discardBefore (Time{TimeValue{i*STP}});
534 ++i;
535 return check;
536 };
537
538 sum4 = runTest (allocate, invoke);
539 };
540
541 // INVOKE Setup-1
542 auto time_noAlloc = benchmark(noAlloc);
543
544 // INVOKE Setup-2
545 auto time_heapAlloc = benchmark(heapAlloc);
546
547 // INVOKE Setup-3
548 auto time_sharedAlloc = benchmark(sharedAlloc);
549
550 cout<<"\n\n■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■□■"<<endl;
551
552 // INVOKE Setup-4
553 auto time_blockFlow = benchmark(blockFlowAlloc);
554
555 Duration expectStep{FSecs{blockFlow.framesPerEpoch(), FPS} * 9/10};
556
557 cout<<"\n___Microbenchmark____"
558 <<"\nnoAlloc : "<<time_noAlloc
559 <<"\nheapAlloc : "<<time_heapAlloc
560 <<"\nsharedAlloc : "<<time_sharedAlloc
561 <<"\nblockFlow : "<<time_blockFlow
562 <<"\n_____________________\n"
563 <<"\ninstances.... "<<INSTANCES
564 <<"\nfps.......... "<<FPS
565 <<"\nActivities/s. "<<TICK_P_S
566 <<"\nEpoch(expect) "<<expectStep
567 <<"\nEpoch (real) "<<blockFlow.getEpochStep()
568 <<"\ncnt Epochs... "<<watch(blockFlow).cntEpochs()
569 <<"\nalloc pool... "<<watch(blockFlow).poolSize()
570 <<endl;
571
572 // all Activities have been read in all test cases,
573 // yielding identical checksum
574 CHECK (sum1 == sum2);
575 CHECK (sum1 == sum3);
576 CHECK (sum1 == sum4);
577
578 // Epoch spacing regulation must be converge up to ±10ms
579 CHECK (expectStep - blockFlow.getEpochStep() < Time(10,0));
580
581 // after the initial overload is levelled,
582 // only a small number of Epochs should be active
583 CHECK (watch(blockFlow).cntEpochs() < 8);
584
585 // Due to Debug / Release builds, we can not check the runtime only a very rough margin.
586 // With -O3, this amortised allocation time should be way below time_sharedAlloc
587 CHECK (time_blockFlow < 800);
588 }
589 };
590
591
593 LAUNCHER (BlockFlow_test, "unit engine");
594
595
596
597}}} // namespace vault::gear::test
Memory management scheme for activities and parameter data passed through the Scheduler within the Lu...
Duration is the internal Lumiera time metric.
Framerate specified as frames per second.
Offset measures a distance in time.
basic constant internal time value.
static const raw_time_64 SCALE
Number of micro ticks (µs) per second as basic time scale.
a mutable time value, behaving like a plain number, allowing copy and re-accessing
Lumiera's internal time value datatype.
static const Time NEVER
border condition marker value. NEVER >= any time value
Abstract Base Class for all testcases.
Definition run.hpp:54
void seedRand()
draw a new random seed from a common nucleus, and re-seed the default-Gen.
Definition suite.cpp:211
Record to describe an Activity, to happen within the Scheduler's control flow.
Definition activity.hpp:227
@ GATE
probe window + count-down; activate next Activity, else re-schedule
Definition activity.hpp:236
@ TICK
internal engine »heart beat« for internal maintenance hook(s)
Definition activity.hpp:240
@ WORKSTART
signal start of some processing and transition grooming mode ⟼ *work mode
Definition activity.hpp:233
Local handle to allow allocating a collection of Activities, all sharing a common deadline.
Activity & create(ARGS &&...args)
Main API operation: allocate a new Activity record.
Allocation scheme for the Scheduler, based on Epoch(s).
blockFlow::Epoch< Allocator > Epoch
void markEpochOverflow()
Notify and adjust Epoch capacity as consequence of exhausting an Epoch.
void markEpochUnderflow(TimeVar actualLen, double fillFactor)
On clean-up of past Epochs, the actual fill factor is checked to guess an Epoch duration to make opti...
blockFlow::Strategy< CONF > Strategy
mem::ExtentFamily< Activity, EPOCH_SIZ > Allocator
Duration getEpochStep() const
void discardBefore(Time deadline)
Clean-up all storage related to activities before the given deadline.
Allocator::Extent Extent
AllocatorHandle until(Time deadline)
initiate allocations for activities to happen until some deadline
void announceAdditionalFlow(FrameRate additionalFps)
provide a hint to the self-regulating allocation scheme.
Allocation Extent holding scheduler Activities to be performed altogether before a common deadline.
Automatically use custom string conversion in C++ stream output.
Metaprogramming tools for detecting and transforming function types.
#define ASSERT_VALID_SIGNATURE(_FUN_, _SIG_)
Macro for a compile-time check to verify the given generic functors or lambdas expose some expected s...
Definition function.hpp:316
unsigned int uint
Definition integral.hpp:29
int64_t raw_time_64
Definition job.h:50
Functions to perform (multithreaded) timing measurement on a given functor.
double benchmarkTime(FUN const &invokeTestCode, const size_t repeatCnt=1)
Helper to invoke a functor or λ to observe its running time.
string showType()
diagnostic type output, including const and similar adornments
lib::time::Time randTime()
create a random but not insane Time value between 1s ... 10min + 500ms
Test runner and basic definitions for tests.
bool isSameObject(A const &a, B const &b)
compare plain object identity, based directly on the referee's memory identities.
Definition util.hpp:421
FlowDiagnostic< CONF > watch(BlockFlow< CONF > &theFlow)
Vault-Layer implementation namespace root.
Simplistic test class runner.
#define LAUNCHER(_TEST_CLASS_, _GROUPS_)
Definition run.hpp:116
Policy template to mix into the BlockFlow allocator, providing the parametrisation for self-regulatio...
double boostFactorOverflow() const
< reduced logarithmically, since overflow is detected on individual allocations
A collection of frequently used helper functions to support unit testing.
a family of time value like entities and their relationships.
Tiny helper functions and shortcuts to be used everywhere Consider this header to be effectively incl...