Lumiera 0.pre.04
»edit your freedom«
Loading...
Searching...
No Matches
test-chain-load-test.cpp
Go to the documentation of this file.
1/*
2 TestChainLoad(Test) - verify diagnostic setup to watch scheduler activities
3
4 Copyright (C)
5 2023, Hermann Vosseler <Ichthyostega@web.de>
6
7  **Lumiera** is free software; you can redistribute it and/or modify it
8  under the terms of the GNU General Public License as published by the
9  Free Software Foundation; either version 2 of the License, or (at your
10  option) any later version. See the file COPYING for further details.
11
12* *****************************************************************/
13
19#include "lib/test/run.hpp"
21#include "lib/format-string.hpp"
22#include "test-chain-load.hpp"
23#include "vault/gear/job.h"
24#include "lib/util.hpp"
25
26#include <array>
27
28
29using util::_Fmt;
30using util::isnil;
32using std::array;
33
34
35namespace vault{
36namespace gear {
37namespace test {
38
39 namespace { // shorthands and parameters for test...
40
43 using Node = ChainLoad16::Node;
44 auto isStartNode = [](Node& n){ return isStart(n); };
45 auto isInnerNode = [](Node& n){ return isInner(n); };
46 auto isExitNode = [](Node& n){ return isExit(n); };
47
48 }//(End)test definitions
49
50
51
52
53 /*****************************************************************/
62 class TestChainLoad_test : public Test
63 {
64
65 virtual void
83
84
89 void
91 {
92 auto testLoad =
96
97 // while building the graph, node hashes are computed
98 CHECK (testLoad.getHash() == 0x554F5086DE5B0861);
99
100
101 BlockFlowAlloc bFlow;
103 Scheduler scheduler{bFlow, watch};
104
105 testLoad.setupSchedule(scheduler)
106 .launch_and_wait();
107
108 // invocation through Scheduler has reproduced all node hashes
109 CHECK (testLoad.getHash() == 0x554F5086DE5B0861);
110 }
111
112
113
116 void
118 {
119 Node n0; // Default-created empty Node
120 CHECK (n0.hash == 0);
121 CHECK (n0.level == 0);
122 CHECK (n0.weight == 0);
123 CHECK (n0.pred.size() == 0 );
124 CHECK (n0.succ.size() == 0 );
125 CHECK (n0.pred == Node::Tab{0});
126 CHECK (n0.succ == Node::Tab{0});
127
128 Node n1{23}, n2{55}; // further Nodes with initial seed hash
129 CHECK (n1.hash == 23);
130 CHECK (n2.hash == 55);
131
132 CHECK (0 == n0.calculate()); // hash calculation is NOP on unconnected Nodes
133 CHECK (0 == n0.hash);
134 CHECK (23 == n1.calculate());
135 CHECK (23 == n1.hash);
136 CHECK (55 == n2.calculate());
137 CHECK (55 == n2.hash);
138
139 n0.addPred(n1); // establish bidirectional link between Nodes
140 CHECK (isSameObject (*n0.pred[0], n1));
141 CHECK (isSameObject (*n1.succ[0], n0));
142 CHECK (not n0.pred[1]);
143 CHECK (not n1.succ[1]);
144 CHECK (n2.pred == Node::Tab{0});
145 CHECK (n2.succ == Node::Tab{0});
146
147 n2.addSucc(n0); // works likewise in the other direction
148 CHECK (isSameObject (*n0.pred[0], n1));
149 CHECK (isSameObject (*n0.pred[1], n2)); // next link added into next free slot
150 CHECK (isSameObject (*n2.succ[0], n0));
151 CHECK (not n0.pred[2]);
152 CHECK (not n2.succ[1]);
153
154 CHECK (n0.hash == 0);
155 n0.calculate(); // but now hash calculation combines predecessors
156 CHECK (n0.hash == 0x53F8F4753B85558A);
157
158 Node n00; // another Node...
159 n00.addPred(n2) // just adding the predecessors in reversed order
160 .addPred(n1);
161
162 CHECK (n00.hash == 0);
163 n00.calculate(); // ==> hash is different, since it depends on order
164 CHECK (n00.hash == 0xECA6BE804934CAF2);
165 CHECK (n0.hash == 0x53F8F4753B85558A);
166
167 CHECK (isSameObject (*n1.succ[0], n0));
168 CHECK (isSameObject (*n1.succ[1], n00));
169 CHECK (isSameObject (*n2.succ[0], n0));
170 CHECK (isSameObject (*n2.succ[1], n00));
171 CHECK (isSameObject (*n00.pred[0], n2));
172 CHECK (isSameObject (*n00.pred[1], n1));
173 CHECK (isSameObject (*n0.pred[0], n1));
174 CHECK (isSameObject (*n0.pred[1], n2));
175
176 CHECK (n00.hash == 0xECA6BE804934CAF2);
177 n00.calculate(); // calculation is NOT idempotent (inherently statefull)
178 CHECK (n00.hash == 0xB682F06D29B165C0);
179
180 CHECK (isnil (n0.succ)); // number of predecessors or successors properly accounted for
181 CHECK (isnil (n00.succ));
182 CHECK (n00.succ.empty());
183 CHECK (0 == n00.succ.size());
184 CHECK (2 == n00.pred.size());
185 CHECK (2 == n0.pred.size());
186 CHECK (2 == n1.succ.size());
187 CHECK (2 == n2.succ.size());
188 CHECK (isnil (n1.pred));
189 CHECK (isnil (n2.pred));
190 }
191
192
193
201 void
203 {
204 auto graph = ChainLoad16{32}
205 .buildTopology();
206
207 CHECK (graph.topLevel() == 31);
208 CHECK (graph.getSeed() == 0);
209 CHECK (graph.getHash() == 0xB3445F1240A1B05F);
210
211 auto* node = & *graph.allNodes();
212 CHECK (node->hash == graph.getSeed());
213 CHECK (node->succ.size() == 1);
214 CHECK (isSameObject(*node, *node->succ[0]->pred[0]));
215
216 size_t steps{0};
217 while (not isnil(node->succ))
218 {// verify node connectivity
219 ++steps;
220 node = node->succ[0];
221 CHECK (steps == node->level);
222 CHECK (1 == node->pred.size());
223 size_t exHash = node->hash;
224
225 // recompute the hash -> reproducible
226 node->hash = 0;
227 node->calculate();
228 CHECK (exHash == node->hash);
229
230 // explicitly compute the hash (formula taken from boost)
231 node->hash = 0;
232 lib::hash::combine (node->hash, node->pred[0]->hash);
233 CHECK (exHash == node->hash);
234 }
235 // got a complete chain using all allocated nodes
236 CHECK (steps == 31);
237 CHECK (steps == graph.topLevel());
238 CHECK (node->hash == 0x5CDF544B70E59866);
239
240 // Since this graph has only a single exit-node,
241 // the global hash of the graph is derived from this hash
242 size_t globalHash{0};
243 lib::hash::combine (globalHash, node->hash);
244 CHECK (globalHash == graph.getHash());
245 CHECK (globalHash == 0xB3445F1240A1B05F);
246 }
247
248
249
250
251
261 void
263 {
264 ChainLoad16 graph{32};
265
266 // moderate symmetrical expansion with 40% probability and maximal +2 links
267 graph.expansionRule(graph.rule().probability(0.4).maxVal(2))
268 .buildTopology()
269// .printTopologyDOT()
270// .printTopologyStatistics()
271 ;
272 CHECK (graph.getHash() == 0x6EDD7B92F12E9A37);
273
274 auto stat = graph.computeGraphStatistics();
275 CHECK (stat.indicators[STAT_NODE].cnt == 32); // the 32 Nodes...
276 CHECK (stat.levels == 11); // ... were organised into 11 levels
277 CHECK (stat.indicators[STAT_FORK].cnt == 4); // we got 4 »Fork« events
278 CHECK (stat.indicators[STAT_SEED].cnt == 1); // one start node
279 CHECK (stat.indicators[STAT_EXIT].cnt == 1); // and one exit node at end
280 CHECK (stat.indicators[STAT_NODE].pL == "2.9090909"_expect); // ∅ 3 Nodes / level
281 CHECK (stat.indicators[STAT_NODE].cL == "0.640625"_expect); // with Node density concentrated towards end
282
283
284 // with additional re-shuffling, probability acts independent in each branch
285 // leading to more chances to draw a »fork«, leading to a faster expanding graph
286 graph.expansionRule(graph.rule().probability(0.4).maxVal(2).shuffle(23))
287 .buildTopology()
288// .printTopologyDOT()
289// .printTopologyStatistics()
290 ;
291 CHECK (graph.getHash() == 0x710D010554FEA614);
292
293 stat = graph.computeGraphStatistics();
294 CHECK (stat.levels == 7); // expands faster, with only 7 levels
295 CHECK (stat.indicators[STAT_NODE].pL == "4.5714286"_expect); // this time ∅ 4.6 Nodes / level
296 CHECK (stat.indicators[STAT_FORK].cnt == 7); // 7 »Fork« events
297 CHECK (stat.indicators[STAT_EXIT].cnt == 10); // but 10 »Exit« nodes....
298 CHECK (stat.indicators[STAT_JOIN].cnt == 1); // and even one »Join« node....
299 CHECK (stat.indicators[STAT_EXIT].cL == 1); // which are totally concentrated towards end
300 CHECK (stat.indicators[STAT_JOIN].cL == 1); // when nodes are exhausted
301
302
303 // if the generation is allowed to run for longer,
304 // while more constrained in width...
305 TestChainLoad<8> gra_2{256};
306 gra_2.expansionRule(gra_2.rule().probability(0.4).maxVal(2).shuffle(23))
308// .printTopologyDOT()
309// .printTopologyStatistics()
310 ;
311 CHECK (gra_2.getHash() == 0x619491B22C3F8A6F);
312
313 stat = gra_2.computeGraphStatistics();
314 CHECK (stat.levels == 36); // much more levels, as can be expected
315 CHECK (stat.indicators[STAT_NODE].pL == "7.1111111"_expect); // ∅ 7 Nodes per level
316 CHECK (stat.indicators[STAT_JOIN].pL == "0.77777778"_expect); // but also almost one join per level to deal with the limitation
317 CHECK (stat.indicators[STAT_FORK].frac == "0.24609375"_expect); // 25% forks (there is just not enough room for more forks)
318 CHECK (stat.indicators[STAT_JOIN].frac == "0.109375"_expect); // and 10% joins
319 CHECK (stat.indicators[STAT_EXIT].cnt == 3); // ...leading to 3 »Exit« nodes
320 CHECK (stat.indicators[STAT_EXIT].cL == 1); // ....located at the very end
321 }
322
323
324
325
326
333 void
335 {
336 ChainLoad16 graph{32};
337
338 // expand immediately at start and then gradually reduce / join chains
339 graph.expansionRule(graph.rule_atStart(8))
340 .reductionRule(graph.rule().probability(0.2).maxVal(3).shuffle(555))
341 .buildTopology()
342// .printTopologyDOT()
343// .printTopologyStatistics()
344 ;
345 CHECK (graph.getHash() == 0x3E9BFAE5E686BEB4);
346
347 auto stat = graph.computeGraphStatistics();
348 CHECK (stat.levels == 8); // This connection pattern filled 8 levels
349 CHECK (stat.indicators[STAT_JOIN].cnt == 4); // we got 4 »Join« events (reductions=
350 CHECK (stat.indicators[STAT_FORK].cnt == 1); // and the single expansion/fork
351 CHECK (stat.indicators[STAT_FORK].cL == 0.0); // ...sitting right at the beginning
352 CHECK (stat.indicators[STAT_NODE].cL == "0.42857143"_expect); // Nodes are concentrated towards the beginning
353
354
355 // expansion and reduction can counterbalance each other
356 graph.expansionRule(graph.rule().probability(0.2).maxVal(3).shuffle(555))
357 .reductionRule(graph.rule().probability(0.2).maxVal(3).shuffle(555))
358 .buildTopology()
359// .printTopologyDOT()
360// .printTopologyStatistics()
361 ;
362 CHECK (graph.getHash() == 0xB0335595D34F1D8D);
363
364 stat = graph.computeGraphStatistics();
365 CHECK (stat.levels == 11); // This example runs a bit longer
366 CHECK (stat.indicators[STAT_NODE].pL == "2.9090909"_expect); // in the middle threading 3-5 Nodes per Level
367 CHECK (stat.indicators[STAT_FORK].cnt == 5); // with 5 expansions
368 CHECK (stat.indicators[STAT_JOIN].cnt == 3); // and 3 reductions
369 CHECK (stat.indicators[STAT_FORK].cL == 0.5); // forks dominating earlier
370 CHECK (stat.indicators[STAT_JOIN].cL == "0.73333333"_expect); // while joins need forks as prerequisite
371
372
373 // expansion bursts can be balanced with a heightened reduction intensity
374 graph.expansionRule(graph.rule().probability(0.3).maxVal(4).shuffle(555))
375 .reductionRule(graph.rule().probability(0.9).maxVal(2).shuffle(555))
376 .buildTopology()
377// .printTopologyDOT()
378// .printTopologyStatistics()
379 ;
380 CHECK (graph.getHash() == 0x220A2E81F65146FC);
381
382 stat = graph.computeGraphStatistics();
383 CHECK (stat.levels == 12); // This graph has a similar outline
384 CHECK (stat.indicators[STAT_NODE].pL == "2.6666667"_expect); // in the middle threading 3-5 Nodes per Level
385 CHECK (stat.indicators[STAT_FORK].cnt == 7); // ...yet with quite different internal structure
386 CHECK (stat.indicators[STAT_JOIN].cnt == 9); //
387 CHECK (stat.indicators[STAT_FORK].cL == "0.41558442"_expect);
388 CHECK (stat.indicators[STAT_JOIN].cL == "0.62626263"_expect);
389 CHECK (stat.indicators[STAT_FORK].pLW == "0.19583333"_expect); // while the densities of forks and joins almost match,
390 CHECK (stat.indicators[STAT_JOIN].pLW == "0.26527778"_expect); // a slightly higher reduction density leads to convergence eventually
391 }
392
393
394
395
396
402 void
404 {
405 ChainLoad16 graph{32};
406
407 // randomly start new chains, to be carried-on linearly
408 graph.seedingRule(graph.rule().probability(0.2).maxVal(3).shuffle())
409 .buildTopology()
410// .printTopologyDOT()
411// .printTopologyStatistics()
412 ;
413 CHECK (graph.getHash() == 0xBC35A96B3CE1F39F);
414
415 auto stat = graph.computeGraphStatistics();
416 CHECK (stat.levels == 7); // 7 Levels...
417 CHECK (stat.indicators[STAT_SEED].cnt == 12); // overall 12 »Seed« events generated several ongoing chains
418 CHECK (stat.indicators[STAT_FORK].cnt == 0); // yet no branching/expanding
419 CHECK (stat.indicators[STAT_LINK].cnt == 14); // thus more and more chains were just carried on
420 CHECK (stat.indicators[STAT_LINK].pL == 2); // on average 2-3 per level are continuations
421 CHECK (stat.indicators[STAT_NODE].pL == "4.5714286"_expect); // leading to ∅ 4.5 Nodes per level
422 CHECK (stat.indicators[STAT_NODE].cL == "0.734375"_expect); // with nodes amassing towards the end
423 CHECK (stat.indicators[STAT_LINK].cL == "0.64285714"_expect); // because there are increasingly more links to carry-on
424 CHECK (stat.indicators[STAT_JOIN].cL == 1); // while joining only happens at the very end
425
426
427 // combining random seed nodes with reduction leads to a processing pattern
428 // with side-chaines successively joined into a single common result
429 graph.seedingRule(graph.rule().probability(0.2).maxVal(3).shuffle())
430 .reductionRule(graph.rule().probability(0.9).maxVal(2))
431 .buildTopology()
432// .printTopologyDOT()
433// .printTopologyStatistics()
434 ;
435 CHECK (graph.getHash() == 0x3DFA720156540247);
436
437 stat = graph.computeGraphStatistics();
438 CHECK (stat.indicators[STAT_SEED].cnt == 11); // the same number of 11 »Seed« events
439 CHECK (stat.indicators[STAT_JOIN].cnt == 6); // but now 6 joining nodes
440 CHECK (stat.indicators[STAT_LINK].cnt == 15); // and less carry-on
441 CHECK (stat.indicators[STAT_FORK].cnt == 0); // no branching
442 CHECK (stat.indicators[STAT_NODE].pL == 3.2); // leading a slightly leaner graph with ∅ 3.2 Nodes per level
443 CHECK (stat.indicators[STAT_NODE].cL == "0.5625"_expect); // and also slightly more evenly spaced this time
444 CHECK (stat.indicators[STAT_LINK].cL == "0.55555556"_expect); // links are also more encountered in the middle
445 CHECK (stat.indicators[STAT_JOIN].cL == "0.72222222"_expect); // and also joins are happening underway
446 CHECK (stat.levels == 10); // mostly because a leaner graph takes longer to use 32 Nodes
447 }
448
449
450
451
452
459 void
461 {
462 ChainLoad16 graph{32};
463
464 // terminate chains randomly
465 graph.pruningRule(graph.rule().probability(0.2))
466 .buildTopology()
467// .printTopologyDOT()
468// .printTopologyStatistics()
469 ;
470 CHECK (graph.getHash() == 0x660BD1CD261A990);
471
472 auto stat = graph.computeGraphStatistics();
473 CHECK (stat.levels == 32); // only a single line of connections...
474 CHECK (stat.segments == 8); // albeit severed into 8 segments
475 CHECK (stat.indicators[STAT_NODE].pS == 4); // with always 4 Nodes per segment
476 CHECK (stat.indicators[STAT_NODE].pL == 1); // and only ever a single node per level
477 CHECK (stat.indicators[STAT_SEED].cnt == 8); // consequently we get 8 »Seed« nodes
478 CHECK (stat.indicators[STAT_EXIT].cnt == 8); // 8 »Exit« nodes
479 CHECK (stat.indicators[STAT_LINK].cnt == 16); // and 16 interconnecting links
480
481
482 // combined with expansion, several tree-shaped segments emerge
483 graph.pruningRule(graph.rule().probability(0.2))
484 .expansionRule(graph.rule().probability(0.6))
485 .setSeed(10101)
486 .buildTopology()
487// .printTopologyDOT()
488// .printTopologyStatistics()
489 ;
490 CHECK (graph.getHash() == 0x1D0A7C39647340AA);
491
492 stat = graph.computeGraphStatistics();
493 CHECK (stat.levels == 14); //
494 CHECK (stat.segments == 5); // this time the graph is segregated into 5 parts
495 CHECK (stat.indicators[STAT_NODE].pS == "6.4"_expect); // with 4 Nodes per segment
496 CHECK (stat.indicators[STAT_FORK].sL == "0"_expect); // where »Fork« is always placed at the beginning of each segment
497 CHECK (stat.indicators[STAT_EXIT].sL == "1"_expect); // and several »Exit« at the end
498 CHECK (stat.indicators[STAT_EXIT].pS == "3"_expect); // with always 3 exits per segment
499 CHECK (stat.indicators[STAT_SEED].cnt == 5); // so overall we get 5 »Seed« nodes
500 CHECK (stat.indicators[STAT_FORK].cnt == 5); // 5 »Fork« nodes
501 CHECK (stat.indicators[STAT_EXIT].cnt == 15); // 15 »Exit« nodes
502 CHECK (stat.indicators[STAT_LINK].cnt == 12); // and 12 interconnecting links
503 CHECK (stat.indicators[STAT_NODE].pL == "2.2857143"_expect); // leading to ∅ ~2 Nodes per level
504
505
506 // however, by chance, with more randomised pruning points...
507 graph.pruningRule(graph.rule().probability(0.2).shuffle(5))
508 .expansionRule(graph.rule().probability(0.6))
509 .setSeed(10101)
510 .buildTopology()
511// .printTopologyDOT()
512// .printTopologyStatistics()
513 ;
514 CHECK (graph.getHash() == 0x12BB22F76ECC5C1B);
515
516 stat = graph.computeGraphStatistics();
517 CHECK (stat.segments == 1); // ...the graph can evade severing altogether
518 CHECK (stat.indicators[STAT_FORK].cnt == 3); // with overall 3 »Fork«
519 CHECK (stat.indicators[STAT_EXIT].cnt == 10); // and 10 »Exit« nodes
520 CHECK (stat.indicators[STAT_EXIT].pL == "1.6666667"_expect); // ∅ 1.6 exits per level
521 CHECK (stat.indicators[STAT_NODE].pL == "5.3333333"_expect); // ∅ 5.3 nodes per level
522
523
524 graph.expansionRule(graph.rule()); // reset
525
526
527 // combined with a special seeding rule,
528 // which injects /another seed/ in the next level after each seed,
529 // an equilibrium of chain seeding and termination can be achieved...
530 graph.seedingRule(graph.rule_atStart(1))
531 .pruningRule(graph.rule().probability(0.2))
532 .setSeed(10101)
533 .buildTopology()
534// .printTopologyDOT()
535// .printTopologyStatistics()
536 ;
537 CHECK (graph.getHash() == 0xBFFA04FE8202C708);
538
539 // NOTE: this example produced 11 disjoint graph parts,
540 // which however start and end interleaved
541 stat = graph.computeGraphStatistics();
542 CHECK (stat.levels == 12); // Generation carries on for 12 levels
543 CHECK (stat.segments == 1); // NOTE: the detection of segments FAILS here (due to interleaved starts)
544 CHECK (stat.indicators[STAT_SEED].cnt == 12); // 12 »Seed« nodes
545 CHECK (stat.indicators[STAT_EXIT].cnt == 11); // 11 »Exit« nodes (including the isolated, last one)
546 CHECK (stat.indicators[STAT_LINK].cnt == 10); // 10 interconnecting links
547 CHECK (stat.indicators[STAT_JOIN].cnt == 1); // and one additional »Join«
548 CHECK (stat.indicators[STAT_JOIN].cL == "1"_expect); // ....appended at graph completion
549 CHECK (stat.indicators[STAT_NODE].pL == "2.6666667"_expect); // overall ∅ 2⅔ nodes per level (converging ⟶ 3)
550 CHECK (stat.indicators[STAT_NODE].cL == "0.52840909"_expect); // with generally levelled distribution
551 CHECK (stat.indicators[STAT_SEED].cL == "0.5"_expect); // also for the seeds
552 CHECK (stat.indicators[STAT_EXIT].cL == "0.62809917"_expect); // and the exits
553
554
555 // The next example is »interesting« insofar it shows self-similarity
556 // The generation is entirely repetitive and locally predictable,
557 // producing an ongoing sequence of small graph segments,
558 // partially overlapping with interwoven starts.
559 graph.seedingRule(graph.rule().fixedVal(1))
560 .pruningRule(graph.rule().probability(0.5))
561 .reductionRule(graph.rule().probability(0.8).maxVal(4))
562 .setSeed(10101)
563 .buildTopology()
564// .printTopologyDOT()
565// .printTopologyStatistics()
566 ;
567 CHECK (graph.getHash() == 0xFB0A0EA9B7072507);
568
569 stat = graph.computeGraphStatistics();
570 CHECK (stat.levels == 8); // Generation carries on for 13 levels
571 CHECK (stat.indicators[STAT_JOIN].pL == 1); // with one »Join« event per level on average
572 CHECK (stat.indicators[STAT_SEED].cnt == 22); // seeds are injected with /fixed rate/, meaning that
573 CHECK (stat.indicators[STAT_SEED].pL == 2.75); // there is one additional seed for every node in previous level
574 }
575
576
577
578
579
598 void
600 {
601 ChainLoad16 graph{256};
602
603 // This example creates a repetitive, non-expanding stable pattern
604 // comprised of four small graph segments, generated interleaved
605 // Explanation: rule_atLink() triggers when the preceding node is a »Link«
606 graph.seedingRule(graph.rule_atLink(1))
607 .pruningRule(graph.rule().probability(0.4))
608 .reductionRule(graph.rule().probability(0.6).maxVal(5).minVal(2))
609 .setSeed(23)
610 .buildTopology()
611// .printTopologyDOT()
612// .printTopologyStatistics()
613 ;
614 CHECK (graph.getHash() == 0x6B5D7BD3130044E2);
615
616 auto stat = graph.computeGraphStatistics();
617 CHECK (stat.indicators[STAT_NODE].cL == "0.50509511"_expect); // The resulting distribution of nodes is stable and balanced
618 CHECK (stat.levels == 93); // ...arranging the 256 nodes into 93 levels
619 CHECK (stat.indicators[STAT_NODE].pL == "2.7526882"_expect); // ...with ∅ 2.7 nodes per level
620 CHECK (stat.indicators[STAT_SEED].pL == "1.0537634"_expect); // comprised of ∅ 1 seed per level
621 CHECK (stat.indicators[STAT_JOIN].pL == "0.48387097"_expect); // ~ ∅ ½ join per level
622 CHECK (stat.indicators[STAT_EXIT].pL == "0.34408602"_expect); // ~ ∅ ⅓ exit per level
623 CHECK (stat.indicators[STAT_SEED].frac == "0.3828125"_expect); // overall, 38% nodes are seeds
624 CHECK (stat.indicators[STAT_EXIT].frac == "0.125"_expect); // and ⅛ are exit nodes
625 CHECK (stat.indicators[STAT_SEED].cLW == "0.49273514"_expect); // the density centre of all node kinds
626 CHECK (stat.indicators[STAT_LINK].cLW == "0.49588657"_expect); // ...is close to the middle
627 CHECK (stat.indicators[STAT_JOIN].cLW == "0.52481335"_expect);
628 CHECK (stat.indicators[STAT_EXIT].cLW == "0.55716297"_expect);
629
630
631
632 // with only a slight increase in pruning probability
633 // the graph goes into a stable repetition loop rather,
634 // repeating a single shape with 3 seeds, 3 links and one 3-fold join as exit
635 graph.seedingRule(graph.rule_atLink(1))
636 .pruningRule(graph.rule().probability(0.5))
637 .reductionRule(graph.rule().probability(0.6).maxVal(5).minVal(2))
638 .setSeed(23)
639 .buildTopology()
640// .printTopologyDOT()
641// .printTopologyStatistics()
642 ;
643 CHECK (graph.getHash() == 0x20122CF2A1F301D1);
644
645 stat = graph.computeGraphStatistics();
646 CHECK (stat.levels == 77); //
647 CHECK (stat.indicators[STAT_NODE].pL == "3.3246753"_expect); // ∅ 3.3 nodes per level
648 CHECK (stat.indicators[STAT_SEED].frac == "0.421875"_expect); // 42% seed
649 CHECK (stat.indicators[STAT_EXIT].frac == "0.14453125"_expect); // 14% exit
650
651
652
653 // The next example uses a different generation approach:
654 // Here, seeding happens randomly, while every join immediately
655 // forces a prune, so all joins become exit nodes.
656 // With a reduction probability slightly over seed, yet limited reduction strength
657 // the generation goes into a stable repetition loop, yet with rather small graphs,
658 // comprised each of two seeds, two links and a single 2-fold join at exit,
659 // with exit and the two seeds of the following graph happening simultaneously.
660 graph.seedingRule(graph.rule().probability(0.6).maxVal(1))
661 .reductionRule(graph.rule().probability(0.75).maxVal(3))
662 .pruningRule(graph.rule_atJoin(1))
663 .setSeed(47)
664 .buildTopology()
665// .printTopologyDOT()
666// .printTopologyStatistics()
667 ;
668 CHECK (graph.getHash() == 0xB58904674ED84031);
669
670 stat = graph.computeGraphStatistics();
671 CHECK (stat.levels == 104); //
672 CHECK (stat.indicators[STAT_NODE].pL == "2.4615385"_expect); // ∅ 2.5 nodes per level
673 CHECK (stat.indicators[STAT_SEED].frac == "0.40234375"_expect); // 40% seed
674 CHECK (stat.indicators[STAT_EXIT].frac == "0.19921875"_expect); // 20% exit
675 CHECK (stat.indicators[STAT_SEED].pL == "0.99038462"_expect); // resulting in 1 seed per level
676 CHECK (stat.indicators[STAT_EXIT].pL == "0.49038462"_expect); // ½ exit per level
677
678
679 // »short_segments_interleaved«
680 // Increased seed probability combined with overall seed value 0 ◁──── (crucial, other seeds produce larger graphs)
681 // produces what seems to be the best stable repetition loop:
682 // same shape as in preceding, yet interwoven by 2 steps
683 graph.seedingRule(graph.rule().probability(0.8).maxVal(1))
684 .reductionRule(graph.rule().probability(0.75).maxVal(3))
685 .pruningRule(graph.rule_atJoin(1))
686 .setSeed(0)
687 .buildTopology()
688// .printTopologyDOT()
689// .printTopologyStatistics()
690 ;
691 CHECK (graph.getHash() == 0x11B57D9E98FDF6DF);
692
693 stat = graph.computeGraphStatistics();
694 CHECK (stat.levels == 55); // much denser arrangement due to stronger interleaving
695 CHECK (stat.indicators[STAT_NODE].pL == "4.6545455"_expect); // ∅ 4.7 nodes per level — almost twice as much
696 CHECK (stat.indicators[STAT_SEED].frac == "0.3984375"_expect); // 40% seed
697 CHECK (stat.indicators[STAT_EXIT].frac == "0.1953125"_expect); // 20% exit — same fractions
698 CHECK (stat.indicators[STAT_SEED].pL == "1.8545455"_expect); // 1.85 seed per level — higher density
699 CHECK (stat.indicators[STAT_EXIT].pL == "0.90909091"_expect); // 0.9 exit per level
700
701
702 // With just the addition of irregularity through shuffling on the reduction,
703 // a stable and tightly interwoven pattern of medium sized graphs is generated
704 graph.seedingRule(graph.rule().probability(0.8).maxVal(1))
705 .reductionRule(graph.rule().probability(0.75).maxVal(3).shuffle())
706 .pruningRule(graph.rule_atJoin(1))
707 .setSeed(0)
708 .buildTopology()
709// .printTopologyDOT()
710// .printTopologyStatistics()
711 ;
712 CHECK (graph.getHash() == 0x7C0453E7A4F6418D);
713
714 stat = graph.computeGraphStatistics();
715 CHECK (stat.levels == 44); //
716 CHECK (stat.indicators[STAT_NODE].pL == "5.8181818"_expect); // ∅ 5.7 nodes per level
717 CHECK (stat.indicators[STAT_SEED].pL == "2.4318182"_expect); // ∅ 2.4 seeds
718 CHECK (stat.indicators[STAT_LINK].pL == "2.4772727"_expect); // ∅ 2.5 link nodes
719 CHECK (stat.indicators[STAT_EXIT].pL == "1"_expect); // ∅ 1 join/exit nodes — indicating stronger spread/reduction
720
721
722
723 // This example uses another setup, without special rules;
724 // rather, seed, reduction and pruning are tuned to balance each other.
725 // The result is a regular interwoven pattern of very small graphs,
726 // slowly expanding yet stable under constriction of width.
727 // Predominant is a shape with two seeds on two levels, a single link and a 2-fold join;
728 // caused by width constriction, this becomes complemented by larger compounds at intervals.
729 graph.seedingRule(graph.rule().probability(0.8).maxVal(1))
730 .reductionRule(graph.rule().probability(0.75).maxVal(3))
731 .pruningRule(graph.rule().probability(0.55))
732 .setSeed(55) // ◁───────────────────────────────────────────── use 31 for width limited to 8 nodes
733 .buildTopology()
734// .printTopologyDOT()
735// .printTopologyStatistics()
736 ;
737 CHECK (graph.getHash() == 0x904A906B7859301A);
738
739 stat = graph.computeGraphStatistics();
740 CHECK (stat.levels == 21); // ▶ resulting graph is very dense, hitting the parallelisation limit
741 CHECK (stat.indicators[STAT_NODE].pL == "12.190476"_expect); // ∅ more than 12 nodes per level !
742 CHECK (stat.indicators[STAT_SEED].pL == "6.8571429"_expect); // comprised of ∅ 6.9 seeds
743 CHECK (stat.indicators[STAT_LINK].pL == "2.3809524"_expect); // ∅ 2.4 links
744 CHECK (stat.indicators[STAT_JOIN].pL == "2.8095238"_expect); // ∅ 2.8 joins
745 CHECK (stat.indicators[STAT_EXIT].pL == "2.5714286"_expect); // ∅ 2.6 exits
746 CHECK (stat.indicators[STAT_SEED].frac == "0.5625"_expect ); // 56% seed
747 CHECK (stat.indicators[STAT_EXIT].frac == "0.2109375"_expect); // 21% exit
748
749
750
751 // A slight parameters variation generates medium sized graphs, which are deep interwoven;
752 // the generation is slowly expanding, but becomes stable under width constriction
753 graph.seedingRule(graph.rule().probability(0.8).maxVal(1))
754 .reductionRule(graph.rule().probability(0.6).maxVal(5).minVal(2))
755 .pruningRule(graph.rule().probability(0.4))
756 .setSeed(42)
757 .buildTopology()
758// .printTopologyDOT()
759// .printTopologyStatistics()
760 ;
761 CHECK (graph.getHash() == 0x9453C56534FF9CD6);
762
763 stat = graph.computeGraphStatistics();
764 CHECK (stat.levels == 26); //
765 CHECK (stat.indicators[STAT_NODE].pL == "9.8461538"_expect); // ∅ 9.8 nodes per level — ⅓ less dense
766 CHECK (stat.indicators[STAT_SEED].frac == "0.40234375"_expect); // 40% seed
767 CHECK (stat.indicators[STAT_LINK].frac == "0.453125"_expect); // 45% link
768 CHECK (stat.indicators[STAT_JOIN].frac == "0.109375"_expect ); // 11% joins
769 CHECK (stat.indicators[STAT_EXIT].frac == "0.08984375"_expect); // 8% exits — hinting at very strong reduction
770
771
772 // The same setup with different seeing produces a
773 // stable repetitive change of linear chain and small tree with 2 joins
774 graph.seedingRule(graph.rule().probability(0.8).maxVal(2))
775 .reductionRule(graph.rule().probability(0.6).maxVal(5).minVal(2))
776 .pruningRule(graph.rule().probability(0.42))
777 .setSeed(23)
778 .buildTopology()
779// .printTopologyDOT()
780// .printTopologyStatistics()
781 ;
782 CHECK (graph.getHash() == 0xA57727C2ED277C87);
783
784 stat = graph.computeGraphStatistics();
785 CHECK (stat.levels == 129); //
786 CHECK (stat.indicators[STAT_NODE].pL == "1.9844961"_expect); // ∅ ~2 nodes per level — much lesser density
787 CHECK (stat.indicators[STAT_SEED].frac == "0.3359375"_expect); // 33% seed
788 CHECK (stat.indicators[STAT_LINK].frac == "0.4140625"_expect); // 42% link
789 CHECK (stat.indicators[STAT_JOIN].frac == "0.1640625"_expect); // 16% join
790 CHECK (stat.indicators[STAT_EXIT].frac == "0.171875"_expect); // 17% exit — only a 2:1 reduction on average
791
792
793 // With added shuffling in the seed rule, and under width constriction,
794 // an irregular sequence of small to large and strongly interwoven graphs emerges.
795 graph.seedingRule(graph.rule().probability(0.8).maxVal(2).shuffle())
796 .reductionRule(graph.rule().probability(0.6).maxVal(5).minVal(2))
797 .pruningRule(graph.rule().probability(0.42))
798 .setSeed(23)
799 .buildTopology()
800// .printTopologyDOT()
801// .printTopologyStatistics()
802 ;
803 CHECK (graph.getHash() == 0x4D0575F8BD269FC3);
804
805 stat = graph.computeGraphStatistics();
806 CHECK (stat.levels == 20); // rather dense
807 CHECK (stat.indicators[STAT_NODE].pL == "12.8"_expect); // ∅ 12.8 nodes per level
808 CHECK (stat.indicators[STAT_SEED].pL == "7.65"_expect); // ∅ 7.7 seeds
809 CHECK (stat.indicators[STAT_LINK].pL == "3.15"_expect); // ∅ 3 links
810 CHECK (stat.indicators[STAT_JOIN].pL == "1.9"_expect); // ∅ 1.9 joins
811 CHECK (stat.indicators[STAT_EXIT].pL == "0.95"_expect); // ∅ ~1 exit per level
812
813
814
815 // »chain_loadBursts«
816 // The final example attempts to balance expansion and reduction forces.
817 // Since reduction needs expanded nodes to work on, expansion always gets
818 // a head start and we need to tune reduction to slightly higher strength
819 // to ensure the graph width does not explode. The result is one single
820 // graph with increasingly complex connections, which can expand into
821 // width limitation at places, but also collapse to a single thread.
822 // The seed controls how fast the onset of the pattern happens.
823 // low values -> long single-chain prelude
824 // seed ≔ 55 -> prelude with 2 chains, then join, then onset at level 17
825 // high values -> massive onset quickly going into saturation
826 graph.expansionRule(graph.rule().probability(0.27).maxVal(4))
827 .reductionRule(graph.rule().probability(0.44).maxVal(6).minVal(2))
828 .seedingRule(graph.rule())
829 .pruningRule(graph.rule())
830 .setSeed(62)
831 .buildTopology()
832// .printTopologyDOT()
833// .printTopologyStatistics()
834 ;
835 CHECK (graph.getHash() == 0x25114F8770B1B78E);
836
837 stat = graph.computeGraphStatistics();
838 CHECK (stat.levels == 30); // rather high concurrency
839 CHECK (stat.indicators[STAT_SEED].cnt == 1); // a single seed
840 CHECK (stat.indicators[STAT_EXIT].cnt == 4); // ...and 4 exit when running out of node space
841 CHECK (stat.indicators[STAT_NODE].pL == "8.5333333"_expect); // ∅ 8.25 nodes per level
842 CHECK (stat.indicators[STAT_FORK].frac == "0.16015625"_expect); // 16% forks
843 CHECK (stat.indicators[STAT_LINK].frac == "0.76171875"_expect); // 77% links
844 CHECK (stat.indicators[STAT_JOIN].frac == "0.1015625"_expect); // 10% joins
845 CHECK (stat.indicators[STAT_KNOT].frac == "0.0390625"_expect); // 3% »Knot« nodes which both join and fork
846 CHECK (stat.indicators[STAT_FORK].cLW == "0.43298744"_expect); // density centre of forks lies earlier
847 CHECK (stat.indicators[STAT_JOIN].cLW == "0.64466378"_expect); // while density centre of joins leans rather towards end
848 }
849
850
851
852
853
854
855
856
859 void
861 {
862 ComputationalLoad cpuLoad;
863 CHECK (cpuLoad.timeBase == 100us);
864
865 double micros = cpuLoad.invoke();
866 CHECK (micros < 2000);
867 CHECK (micros > 2);
868
869 cpuLoad.calibrate();
870
871 micros = cpuLoad.invoke();
872 CHECK (micros < 133);
873 CHECK (micros > 80);
874
875 micros = cpuLoad.benchmark();
876 CHECK (micros < 110);
877 CHECK (micros > 90);
878
879 cpuLoad.useAllocation = true;
880 micros = cpuLoad.invoke();
881 CHECK (micros < 133);
882 CHECK (micros > 80);
883
884 micros = cpuLoad.benchmark();
885 CHECK (micros < 110);
886 CHECK (micros > 90);
887
888 cpuLoad.timeBase = 1ms;
889 cpuLoad.sizeBase *= 100;
890 cpuLoad.calibrate();
891
892 cpuLoad.useAllocation = false;
893 micros = cpuLoad.invoke();
894 CHECK (micros > 900);
895 micros = cpuLoad.invoke(5);
896 CHECK (micros > 4600);
897 micros = cpuLoad.invoke(10);
898 CHECK (micros > 9500);
899 micros = cpuLoad.invoke(100);
900 CHECK (micros > 95000);
901
902 cpuLoad.useAllocation = true;
903 micros = cpuLoad.invoke();
904 CHECK (micros > 900);
905 micros = cpuLoad.invoke(5);
906 CHECK (micros > 4600);
907 micros = cpuLoad.invoke(10);
908 CHECK (micros > 9500);
909 micros = cpuLoad.invoke(100);
910 CHECK (micros > 95000);
911 }
912
913
914
923 void
925 {
926 ChainLoad16 graph{32};
927 graph.expansionRule(graph.rule().probability(0.8).maxVal(1))
928 .pruningRule(graph.rule().probability(0.6))
929 .weightRule((graph.rule().probability(0.5)))
930 .buildTopology();
931
932 CHECK (8 == graph.allNodes().filter(isStartNode).count());
933 CHECK (16 == graph.allNodes().filter(isExitNode).count());
934
935
936 // verify computation of the globally combined exit hash
937 auto exitHashes = graph.allNodes()
938 .filter(isExitNode)
939 .transform([](Node& n){ return n.hash; })
940 .effuse();
941 CHECK (16 == exitHashes.size());
942
943 size_t combinedHash{0};
944 for (uint i=0; i <16; ++i)
945 lib::hash::combine (combinedHash, exitHashes[i]);
946
947 CHECK (graph.getHash() == combinedHash);
948 CHECK (graph.getHash() == 0x33B00C450215EB00);
949
950
951 // verify connectivity and local exit hashes
952 graph.allNodePtr().grouped<4>()
953 .foreach([&](auto group)
954 { // verify wiring pattern
955 // and the resulting exit hashes
956 auto& [a,b,c,d] = *group;
957 CHECK (isStart(a));
958 CHECK (isInner(b));
959 CHECK (not a->weight);
960 CHECK (not b->weight);
961 CHECK (isExit(c));
962 CHECK (isExit(d));
963 CHECK (c->hash == 0xAEDC04CFA2E5B999);
964 CHECK (d->hash == 0xAEDC04CFA2E5B999);
965 CHECK (c->weight == 4);
966 CHECK (d->weight == 4);
967 });
968
969
970 graph.setSeed(55).clearNodeHashes();
971 CHECK (graph.getSeed() == 55);
972 CHECK (graph.getHash() == 0);
973 graph.allNodePtr().grouped<4>()
974 .foreach([&](auto group)
975 { // verify hashes have been reset
976 auto& [a,b,c,d] = *group;
977 CHECK (a->hash == 55);
978 CHECK (b->hash == 0);
979 CHECK (b->hash == 0);
980 CHECK (b->hash == 0);
981 });
982
983 graph.recalculate();
984 CHECK (graph.getHash() == 0x17427F67DBC8BCC0);
985 graph.allNodePtr().grouped<4>()
986 .foreach([&](auto group)
987 { // verify hashes were recalculated
988 // based on the new seed
989 auto& [a,b,c,d] = *group;
990 CHECK (a->hash == 55);
991 CHECK (c->hash == 0x7887993B0ED41395);
992 CHECK (d->hash == 0x7887993B0ED41395);
993 });
994
995 // seeding and recalculation are reproducible
996 graph.setSeed(0).recalculate();
997 CHECK (graph.getHash() == 0x33B00C450215EB00);
998 graph.setSeed(55).recalculate();
999 CHECK (graph.getHash() == 0x17427F67DBC8BCC0);
1000 }
1001
1002
1003
1006 void
1008 {
1009 double t1 =
1010 TestChainLoad{64}
1012 .buildTopology()
1014
1015 double t2 =
1016 TestChainLoad{64}
1018 .buildTopology()
1020
1021 double t3 =
1022 TestChainLoad{256}
1024 .buildTopology()
1026
1027 auto isWithin10Percent = [](double t, double r)
1028 {
1029 auto delta = abs (1.0 - t/r);
1030 return delta < 0.1;
1031 };
1032
1033 // the test-graph has 64 Nodes,
1034 // each using the default load of 100µs
1035 CHECK (isWithin10Percent(t1, 6400)); // thus overall we should be close to 6.4ms
1036 CHECK (isWithin10Percent(t2, 10*t1)); // and the 10-fold load should yield 10-times
1037 CHECK (isWithin10Percent(t3, 4*t1)); // using 4 times as much nodes (64->256)
1038
1039 // the time measurement uses a performance
1040 // which clears, re-seeds and calculates the complete graph
1041 auto graph =
1042 TestChainLoad{64}
1044 .buildTopology();
1045
1046 CHECK (graph.getHash() == 0x554F5086DE5B0861);
1047
1048 graph.clearNodeHashes();
1049 CHECK (graph.getHash() == 0);
1050
1051 // this is used by the timing benchmark
1052 graph.performGraphSynchronously();
1053 CHECK (graph.getHash() == 0x554F5086DE5B0861);
1054
1055 graph.clearNodeHashes();
1056 CHECK (graph.getHash() == 0);
1057
1058 graph.calcRuntimeReference();
1059 CHECK (graph.getHash() == 0x554F5086DE5B0861);
1060 }
1061
1062
1063
1070 void
1072 {
1073 TestChainLoad testLoad{64};
1075 .buildTopology()
1076// .printTopologyDOT()
1077 ;
1078
1079 // compute aggregated level data....
1080 auto level = testLoad.allLevelWeights().effuse();
1081 CHECK (level.size() == 26);
1082
1083 // visualise and verify this data......
1084 auto node = testLoad.allNodePtr().effuse();
1085 _Fmt nodeFmt{"i=%-2d lev:%-2d w=%1d"};
1086 _Fmt levelFmt{" Σ%-2d Σw:%2d"};
1087 auto nodeStr = [&](uint i)
1088 {
1089 size_t l = node[i]->level;
1090 return string{nodeFmt % i % node[i]->level % node[i]->weight}
1091 + (i == level[l].endidx? string{levelFmt % level[l].nodes % level[l].weight}
1092 : string{" · · "});
1093 };
1094 // |idx--level--wght|-levelSum-------
1095 CHECK (nodeStr( 1) == "i=1 lev:1 w=0 Σ1 Σw: 0"_expect);
1096 CHECK (nodeStr( 2) == "i=2 lev:2 w=2 Σ1 Σw: 2"_expect);
1097 CHECK (nodeStr( 3) == "i=3 lev:3 w=0 Σ1 Σw: 0"_expect);
1098 CHECK (nodeStr( 4) == "i=4 lev:4 w=0 Σ1 Σw: 0"_expect);
1099 CHECK (nodeStr( 5) == "i=5 lev:5 w=0 Σ1 Σw: 0"_expect);
1100 CHECK (nodeStr( 6) == "i=6 lev:6 w=1 Σ1 Σw: 1"_expect);
1101 CHECK (nodeStr( 7) == "i=7 lev:7 w=2 Σ1 Σw: 2"_expect);
1102 CHECK (nodeStr( 8) == "i=8 lev:8 w=2 Σ1 Σw: 2"_expect);
1103 CHECK (nodeStr( 9) == "i=9 lev:9 w=1 · · "_expect);
1104 CHECK (nodeStr(10) == "i=10 lev:9 w=1 Σ2 Σw: 2"_expect);
1105 CHECK (nodeStr(11) == "i=11 lev:10 w=0 · · "_expect);
1106 CHECK (nodeStr(12) == "i=12 lev:10 w=0 Σ2 Σw: 0"_expect);
1107 CHECK (nodeStr(13) == "i=13 lev:11 w=0 · · "_expect);
1108 CHECK (nodeStr(14) == "i=14 lev:11 w=0 Σ2 Σw: 0"_expect);
1109 CHECK (nodeStr(15) == "i=15 lev:12 w=1 · · "_expect);
1110 CHECK (nodeStr(16) == "i=16 lev:12 w=1 Σ2 Σw: 2"_expect);
1111 CHECK (nodeStr(17) == "i=17 lev:13 w=1 · · "_expect);
1112 CHECK (nodeStr(18) == "i=18 lev:13 w=1 Σ2 Σw: 2"_expect);
1113 CHECK (nodeStr(19) == "i=19 lev:14 w=2 · · "_expect);
1114 CHECK (nodeStr(20) == "i=20 lev:14 w=2 Σ2 Σw: 4"_expect);
1115 CHECK (nodeStr(21) == "i=21 lev:15 w=0 Σ1 Σw: 0"_expect);
1116 CHECK (nodeStr(22) == "i=22 lev:16 w=1 Σ1 Σw: 1"_expect);
1117 CHECK (nodeStr(23) == "i=23 lev:17 w=3 Σ1 Σw: 3"_expect);
1118 CHECK (nodeStr(24) == "i=24 lev:18 w=0 · · "_expect);
1119 CHECK (nodeStr(25) == "i=25 lev:18 w=0 · · "_expect);
1120 CHECK (nodeStr(26) == "i=26 lev:18 w=0 · · "_expect);
1121 CHECK (nodeStr(27) == "i=27 lev:18 w=0 · · "_expect);
1122 CHECK (nodeStr(28) == "i=28 lev:18 w=0 Σ5 Σw: 0"_expect);
1123 CHECK (nodeStr(29) == "i=29 lev:19 w=2 · · "_expect);
1124 CHECK (nodeStr(30) == "i=30 lev:19 w=2 · · "_expect);
1125 CHECK (nodeStr(31) == "i=31 lev:19 w=2 · · "_expect);
1126 CHECK (nodeStr(32) == "i=32 lev:19 w=2 · · "_expect);
1127 CHECK (nodeStr(33) == "i=33 lev:19 w=2 Σ5 Σw:10"_expect);
1128 CHECK (nodeStr(34) == "i=34 lev:20 w=3 · · "_expect);
1129 CHECK (nodeStr(35) == "i=35 lev:20 w=2 Σ2 Σw: 5"_expect);
1130 CHECK (nodeStr(36) == "i=36 lev:21 w=1 · · "_expect);
1131 CHECK (nodeStr(37) == "i=37 lev:21 w=1 · · "_expect);
1132 CHECK (nodeStr(38) == "i=38 lev:21 w=3 Σ3 Σw: 5"_expect);
1133 CHECK (nodeStr(39) == "i=39 lev:22 w=3 · · "_expect);
1134 CHECK (nodeStr(40) == "i=40 lev:22 w=3 · · "_expect);
1135 CHECK (nodeStr(41) == "i=41 lev:22 w=0 · · "_expect);
1136 CHECK (nodeStr(42) == "i=42 lev:22 w=0 · · "_expect);
1137 CHECK (nodeStr(43) == "i=43 lev:22 w=0 · · "_expect);
1138 CHECK (nodeStr(44) == "i=44 lev:22 w=0 Σ6 Σw: 6"_expect);
1139 CHECK (nodeStr(45) == "i=45 lev:23 w=0 · · "_expect);
1140
1141 // compute a weight factor for each level,
1142 // using the number of nodes, the weight sum and concurrency
1143 CHECK (level[19].nodes = 5); // ╭────────────────────────╢ concurrency
1144 CHECK (level[19].weight = 10); // ▽ ╭───────╢ boost by concurrency
1145 CHECK (computeWeightFactor(level[19], 1) == 10.0);// ▽
1146 CHECK (computeWeightFactor(level[19], 2) == 10.0 / (5.0/3));
1147 CHECK (computeWeightFactor(level[19], 3) == 10.0 / (5.0/2));
1148 CHECK (computeWeightFactor(level[19], 4) == 10.0 / (5.0/2));
1149 CHECK (computeWeightFactor(level[19], 5) == 10.0 / (5.0/1));
1150
1151 // build a schedule sequence based on
1152 // summing up weight factors, with example concurrency ≔ 4
1153 uint concurrency = 4;
1154 auto steps = testLoad.levelScheduleSequence(concurrency).effuse();
1155 CHECK (steps.size() == 26);
1156
1157 // for documentation/verification: show also the boost factor and the resulting weight factor
1158 auto boost = [&](uint i){ return level[i].nodes / std::ceil (double(level[i].nodes)/concurrency); };
1159 auto wfact = [&](uint i){ return computeWeightFactor(level[i], concurrency); };
1160
1161 _Fmt stepFmt{"lev:%-2d nodes:%-2d Σw:%2d %4.1f Δ%5.3f ▿▿ %6.3f"};
1162 auto stepStr = [&](uint i){ return string{stepFmt % i % level[i].nodes % level[i].weight % boost(i) % wfact(i) % steps[i]}; };
1163
1164 // boost wfactor steps
1165 CHECK (stepStr( 0) == "lev:0 nodes:1 Σw: 0 1.0 Δ0.000 ▿▿ 0.000"_expect);
1166 CHECK (stepStr( 1) == "lev:1 nodes:1 Σw: 0 1.0 Δ0.000 ▿▿ 0.000"_expect);
1167 CHECK (stepStr( 2) == "lev:2 nodes:1 Σw: 2 1.0 Δ2.000 ▿▿ 2.000"_expect);
1168 CHECK (stepStr( 3) == "lev:3 nodes:1 Σw: 0 1.0 Δ0.000 ▿▿ 2.000"_expect);
1169 CHECK (stepStr( 4) == "lev:4 nodes:1 Σw: 0 1.0 Δ0.000 ▿▿ 2.000"_expect);
1170 CHECK (stepStr( 5) == "lev:5 nodes:1 Σw: 0 1.0 Δ0.000 ▿▿ 2.000"_expect);
1171 CHECK (stepStr( 6) == "lev:6 nodes:1 Σw: 1 1.0 Δ1.000 ▿▿ 3.000"_expect);
1172 CHECK (stepStr( 7) == "lev:7 nodes:1 Σw: 2 1.0 Δ2.000 ▿▿ 5.000"_expect);
1173 CHECK (stepStr( 8) == "lev:8 nodes:1 Σw: 2 1.0 Δ2.000 ▿▿ 7.000"_expect);
1174 CHECK (stepStr( 9) == "lev:9 nodes:2 Σw: 2 2.0 Δ1.000 ▿▿ 8.000"_expect);
1175 CHECK (stepStr(10) == "lev:10 nodes:2 Σw: 0 2.0 Δ0.000 ▿▿ 8.000"_expect);
1176 CHECK (stepStr(11) == "lev:11 nodes:2 Σw: 0 2.0 Δ0.000 ▿▿ 8.000"_expect);
1177 CHECK (stepStr(12) == "lev:12 nodes:2 Σw: 2 2.0 Δ1.000 ▿▿ 9.000"_expect);
1178 CHECK (stepStr(13) == "lev:13 nodes:2 Σw: 2 2.0 Δ1.000 ▿▿ 10.000"_expect);
1179 CHECK (stepStr(14) == "lev:14 nodes:2 Σw: 4 2.0 Δ2.000 ▿▿ 12.000"_expect);
1180 CHECK (stepStr(15) == "lev:15 nodes:1 Σw: 0 1.0 Δ0.000 ▿▿ 12.000"_expect);
1181 CHECK (stepStr(16) == "lev:16 nodes:1 Σw: 1 1.0 Δ1.000 ▿▿ 13.000"_expect);
1182 CHECK (stepStr(17) == "lev:17 nodes:1 Σw: 3 1.0 Δ3.000 ▿▿ 16.000"_expect);
1183 CHECK (stepStr(18) == "lev:18 nodes:5 Σw: 0 2.5 Δ0.000 ▿▿ 16.000"_expect);
1184 CHECK (stepStr(19) == "lev:19 nodes:5 Σw:10 2.5 Δ4.000 ▿▿ 20.000"_expect);
1185 CHECK (stepStr(20) == "lev:20 nodes:2 Σw: 5 2.0 Δ2.500 ▿▿ 22.500"_expect);
1186 CHECK (stepStr(21) == "lev:21 nodes:3 Σw: 5 3.0 Δ1.667 ▿▿ 24.167"_expect);
1187 CHECK (stepStr(22) == "lev:22 nodes:6 Σw: 6 3.0 Δ2.000 ▿▿ 26.167"_expect);
1188 CHECK (stepStr(23) == "lev:23 nodes:6 Σw: 6 3.0 Δ2.000 ▿▿ 28.167"_expect);
1189 CHECK (stepStr(24) == "lev:24 nodes:10 Σw: 9 3.3 Δ2.700 ▿▿ 30.867"_expect);
1190 CHECK (stepStr(25) == "lev:25 nodes:3 Σw: 4 3.0 Δ1.333 ▿▿ 32.200"_expect);
1191 }
1192
1193
1194
1195
1206 void
1208 {
1209 array<Node,4> nodes;
1210 auto& [s,p1,p2,e] = nodes;
1211 s.addSucc(p1)
1212 .addSucc(p2);
1213 e.addPred(p1)
1214 .addPred(p2);
1215 s.level = 0;
1216 p1.level = p2.level = 1;
1217 e.level = 2;
1218 CHECK (e.hash == 0);
1219 for (Node& n : nodes)
1220 n.calculate();
1221 CHECK (e.hash == 0x6A5924BA3389D7C);
1222
1223
1224 // now do the same invoked as »render job«
1225 for (Node& n : nodes)
1226 n.hash = 0;
1227 s.level = 0;
1228 p1.level = 1;
1229 p2.level = 1;
1230 e.level = 2;
1231
1232 RandomChainCalcFunctor<16> chainJob{nodes[0]};
1233 Job job0{chainJob
1234 ,chainJob.encodeNodeID(0)
1235 ,chainJob.encodeLevel(0)};
1236 Job job1{chainJob
1237 ,chainJob.encodeNodeID(1)
1238 ,chainJob.encodeLevel(1)};
1239 Job job2{chainJob
1240 ,chainJob.encodeNodeID(2)
1241 ,chainJob.encodeLevel(1)};
1242 Job job3{chainJob
1243 ,chainJob.encodeNodeID(3)
1244 ,chainJob.encodeLevel(2)};
1245
1246 CHECK (e.hash == 0);
1247 job0.triggerJob();
1248 // ◁───────────────────────────────────────────── Note: fail to invoke some predecessor....
1249 job2.triggerJob();
1250 job3.triggerJob();
1251 CHECK (e.hash != 0x6A5924BA3389D7C);
1252
1253 e.hash = 0;
1254 job1.triggerJob(); // recalculate missing part of the graph...
1255 job3.triggerJob();
1256 CHECK (e.hash == 0x6A5924BA3389D7C);
1257
1258 job3.triggerJob(); // Hash calculations are *not* idempotent
1259 CHECK (e.hash != 0x6A5924BA3389D7C);
1260
1261
1262 // use the »planing job« to organise the calculations:
1263 // Let the callbacks create a clone — which at the end should generate the same hash
1264 array<Node,4> clone;
1265 size_t lastTouched(-1);
1266 size_t lastNode (-1);
1267 size_t lastLevel(-1);
1268 bool shallContinue{false};
1269 auto getNodeIdx = [&](Node* n) { return n - &nodes[0]; };
1270
1271 // callback-λ rigged for test....
1272 // Instead of invoking the Scheduler, here we replicate the node structure
1273 auto disposeStep = [&](size_t idx, size_t level)
1274 {
1275 Node& n = clone[idx];
1276 n.clear();
1277 n.level = level;
1278 lastTouched = idx;
1279 };
1280 auto setDependency = [&](Node* pred, Node* succ)
1281 {
1282 size_t predIdx = getNodeIdx(pred);
1283 size_t succIdx = getNodeIdx(succ);
1284 // replicate this relation into the clone array
1285 clone[predIdx].addSucc(clone[succIdx]);
1286 };
1287 auto continuation = [&](size_t, size_t nodeDone, size_t levelDone, bool work_left)
1288 {
1289 lastNode =nodeDone;
1290 lastLevel = levelDone;
1291 shallContinue = work_left;
1292 };
1293 // build a JobFunctor for the planning step(s)
1294 RandomChainPlanFunctor<16> planJob{nodes.front(), nodes.size()
1295 ,disposeStep
1296 ,setDependency
1297 ,continuation};
1298 Job jobP1{planJob
1299 ,planJob.encodeNodeID(1)
1300 ,Time::ANYTIME};
1301 Job jobP2{planJob
1302 ,planJob.encodeNodeID(5)
1303 ,Time::ANYTIME};
1304
1305 jobP1.triggerJob();
1306 CHECK (lastLevel = 1);
1307 CHECK (lastTouched = 2);
1308 CHECK (lastTouched == lastNode);
1309 Node* lastN = &clone[lastTouched];
1310 CHECK (lastN->level == lastLevel);
1311 CHECK ( isnil (lastN->succ));
1312 CHECK (not isnil (lastN->pred));
1313 CHECK (shallContinue);
1314
1315 jobP2.triggerJob();
1316 CHECK (lastLevel = 3);
1317 CHECK (lastTouched = 3);
1318 CHECK (lastTouched == lastNode);
1319 lastN = &clone[lastTouched];
1320 CHECK (lastN->level == 2);
1321 CHECK (lastN->level < lastLevel);
1322 CHECK ( isnil (lastN->succ));
1323 CHECK (not isnil (lastN->pred));
1324 CHECK (not shallContinue);
1325
1326 // all clone nodes should be wired properly now
1327 CHECK (lastN->hash == 0);
1328 for (Node& n : clone)
1329 n.calculate();
1330 CHECK (lastN->hash == 0x6A5924BA3389D7C);
1331 }
1332 };
1333
1334
1337
1338
1339
1340}}} // namespace vault::gear::test
static const Time ANYTIME
border condition marker value. ANYTIME <= any time value
A front-end for using printf-style formatting.
Collector and aggregator for performance data.
Individual frame rendering task, forwarding to a closure.
Definition job.h:276
»Scheduler-Service« : coordinate render activities.
A calibratable CPU load to be invoked from a node job functor.
double invoke(uint scaleStep=1)
cause a delay by computational load
Render JobFunctor to invoke the calculation of a single Node.
Render JobFunctor to perform chunk wise planning of Node jobs to calculate a complete Chain-Load grap...
A Generator for synthetic Render Jobs for Scheduler load testing.
auto allLevelWeights()
calculate node weights aggregated per level
double calcRuntimeReference(microseconds timeBase=LOAD_DEFAULT_TIME, size_t sizeBase=0, size_t repeatCnt=GRAPH_BENCHMARK_RUNS)
Conduct a number of benchmark runs over processing the Graph synchronously.
TestChainLoad && configureShape_chain_loadBursts()
preconfigured topology: single graph with massive »load bursts«
TestChainLoad && buildTopology()
Use current configuration and seed to (re)build Node connectivity.
TestChainLoad && configureShape_short_segments3_interleaved()
preconfigured topology: simple interwoven 3-step graph segments
TestChainLoad && expansionRule(Rule r)
Front-end for printf-style string template interpolation.
unsigned int uint
Definition integral.hpp:29
Definition of a render job.
void combine(size_t &combinedHash, size_t additionalHash)
meld the additional hash value into the given base hash value.
Test runner and basic definitions for tests.
bool isSameObject(A const &a, B const &b)
compare plain object identity, based directly on the referee's memory identities.
Definition util.hpp:421
bool isnil(lib::time::Duration const &dur)
TestChainLoad< 16 > ChainLoad16
shorthand for specific parameters employed by the following tests
const StatKey STAT_LINK
1:1 linking node
const StatKey STAT_FORK
forking node
const StatKey STAT_NODE
all nodes
const StatKey STAT_JOIN
joining node
double computeWeightFactor(LevelWeight const &lw, uint concurrency)
simplified model for expense of a level of nodes, computed concurrently.
const StatKey STAT_EXIT
exit node
const StatKey STAT_SEED
seed node
const StatKey STAT_KNOT
knot (joins and forks)
FlowDiagnostic< CONF > watch(BlockFlow< CONF > &theFlow)
Vault-Layer implementation namespace root.
Simplistic test class runner.
#define LAUNCHER(_TEST_CLASS_, _GROUPS_)
Definition run.hpp:116
Generate synthetic computation load for Scheduler performance tests.
A collection of frequently used helper functions to support unit testing.
Tiny helper functions and shortcuts to be used everywhere Consider this header to be effectively incl...