Lumiera  0.pre.03
»edit your freedom«
test-chain-load-test.cpp
Go to the documentation of this file.
1 /*
2  TestChainLoad(Test) - verify diagnostic setup to watch scheduler activities
3 
4  Copyright (C) Lumiera.org
5  2023, Hermann Vosseler <Ichthyostega@web.de>
6 
7  This program is free software; you can redistribute it and/or
8  modify it under the terms of the GNU General Public License as
9  published by the Free Software Foundation; either version 2 of
10  the License, or (at your option) any later version.
11 
12  This program is distributed in the hope that it will be useful,
13  but WITHOUT ANY WARRANTY; without even the implied warranty of
14  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  GNU General Public License for more details.
16 
17  You should have received a copy of the GNU General Public License
18  along with this program; if not, write to the Free Software
19  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 
21 * *****************************************************/
22 
28 #include "lib/test/run.hpp"
29 #include "lib/test/test-helper.hpp"
30 #include "lib/format-string.hpp"
31 #include "test-chain-load.hpp"
32 #include "vault/gear/job.h"
33 #include "lib/util.hpp"
34 
35 #include <array>
36 
37 
38 using util::_Fmt;
39 using util::isnil;
40 using util::isSameObject;
41 using std::array;
42 
43 
44 namespace vault{
45 namespace gear {
46 namespace test {
47 
48  namespace { // shorthands and parameters for test...
49 
52  using Node = ChainLoad16::Node;
53  auto isStartNode = [](Node& n){ return isStart(n); };
54  auto isInnerNode = [](Node& n){ return isInner(n); };
55  auto isExitNode = [](Node& n){ return isExit(n); };
56 
57  }//(End)test definitions
58 
59 
60 
61 
62  /*****************************************************************/
71  class TestChainLoad_test : public Test
72  {
73 
74  virtual void
75  run (Arg)
76  {
77  usageExample();
78  verify_Node();
90  }
91 
92 
97  void
99  {
100  auto testLoad =
101  TestChainLoad{64}
103  .buildTopology();
104 
105  // while building the graph, node hashes are computed
106  CHECK (testLoad.getHash() == 0x554F5086DE5B0861);
107 
108 
109  BlockFlowAlloc bFlow;
110  EngineObserver watch;
111  Scheduler scheduler{bFlow, watch};
112 
113  testLoad.setupSchedule(scheduler)
114  .launch_and_wait();
115 
116  // invocation through Scheduler has reproduced all node hashes
117  CHECK (testLoad.getHash() == 0x554F5086DE5B0861);
118  }
119 
120 
121 
124  void
126  {
127  Node n0; // Default-created empty Node
128  CHECK (n0.hash == 0);
129  CHECK (n0.level == 0);
130  CHECK (n0.weight == 0);
131  CHECK (n0.pred.size() == 0 );
132  CHECK (n0.succ.size() == 0 );
133  CHECK (n0.pred == Node::Tab{0});
134  CHECK (n0.succ == Node::Tab{0});
135 
136  Node n1{23}, n2{55}; // further Nodes with initial seed hash
137  CHECK (n1.hash == 23);
138  CHECK (n2.hash == 55);
139 
140  CHECK (0 == n0.calculate()); // hash calculation is NOP on unconnected Nodes
141  CHECK (0 == n0.hash);
142  CHECK (23 == n1.calculate());
143  CHECK (23 == n1.hash);
144  CHECK (55 == n2.calculate());
145  CHECK (55 == n2.hash);
146 
147  n0.addPred(n1); // establish bidirectional link between Nodes
148  CHECK (isSameObject (*n0.pred[0], n1));
149  CHECK (isSameObject (*n1.succ[0], n0));
150  CHECK (not n0.pred[1]);
151  CHECK (not n1.succ[1]);
152  CHECK (n2.pred == Node::Tab{0});
153  CHECK (n2.succ == Node::Tab{0});
154 
155  n2.addSucc(n0); // works likewise in the other direction
156  CHECK (isSameObject (*n0.pred[0], n1));
157  CHECK (isSameObject (*n0.pred[1], n2)); // next link added into next free slot
158  CHECK (isSameObject (*n2.succ[0], n0));
159  CHECK (not n0.pred[2]);
160  CHECK (not n2.succ[1]);
161 
162  CHECK (n0.hash == 0);
163  n0.calculate(); // but now hash calculation combines predecessors
164  CHECK (n0.hash == 0x53F8F4753B85558A);
165 
166  Node n00; // another Node...
167  n00.addPred(n2) // just adding the predecessors in reversed order
168  .addPred(n1);
169 
170  CHECK (n00.hash == 0);
171  n00.calculate(); // ==> hash is different, since it depends on order
172  CHECK (n00.hash == 0xECA6BE804934CAF2);
173  CHECK (n0.hash == 0x53F8F4753B85558A);
174 
175  CHECK (isSameObject (*n1.succ[0], n0));
176  CHECK (isSameObject (*n1.succ[1], n00));
177  CHECK (isSameObject (*n2.succ[0], n0));
178  CHECK (isSameObject (*n2.succ[1], n00));
179  CHECK (isSameObject (*n00.pred[0], n2));
180  CHECK (isSameObject (*n00.pred[1], n1));
181  CHECK (isSameObject (*n0.pred[0], n1));
182  CHECK (isSameObject (*n0.pred[1], n2));
183 
184  CHECK (n00.hash == 0xECA6BE804934CAF2);
185  n00.calculate(); // calculation is NOT idempotent (inherently statefull)
186  CHECK (n00.hash == 0xB682F06D29B165C0);
187 
188  CHECK (isnil (n0.succ)); // number of predecessors or successors properly accounted for
189  CHECK (isnil (n00.succ));
190  CHECK (n00.succ.empty());
191  CHECK (0 == n00.succ.size());
192  CHECK (2 == n00.pred.size());
193  CHECK (2 == n0.pred.size());
194  CHECK (2 == n1.succ.size());
195  CHECK (2 == n2.succ.size());
196  CHECK (isnil (n1.pred));
197  CHECK (isnil (n2.pred));
198  }
199 
200 
201 
209  void
211  {
212  auto graph = ChainLoad16{32}
213  .buildTopology();
214 
215  CHECK (graph.topLevel() == 31);
216  CHECK (graph.getSeed() == 0);
217  CHECK (graph.getHash() == 0xB3445F1240A1B05F);
218 
219  auto* node = & *graph.allNodes();
220  CHECK (node->hash == graph.getSeed());
221  CHECK (node->succ.size() == 1);
222  CHECK (isSameObject(*node, *node->succ[0]->pred[0]));
223 
224  size_t steps{0};
225  while (not isnil(node->succ))
226  {// verify node connectivity
227  ++steps;
228  node = node->succ[0];
229  CHECK (steps == node->level);
230  CHECK (1 == node->pred.size());
231  size_t exHash = node->hash;
232 
233  // recompute the hash -> reproducible
234  node->hash = 0;
235  node->calculate();
236  CHECK (exHash == node->hash);
237 
238  // explicitly compute the hash using boost::hash
239  node->hash = 0;
240  boost::hash_combine (node->hash, node->pred[0]->hash);
241  CHECK (exHash == node->hash);
242  }
243  // got a complete chain using all allocated nodes
244  CHECK (steps == 31);
245  CHECK (steps == graph.topLevel());
246  CHECK (node->hash == 0x5CDF544B70E59866);
247 
248  // Since this graph has only a single exit-node,
249  // the global hash of the graph is derived from this hash
250  size_t globalHash{0};
251  boost::hash_combine (globalHash, node->hash);
252  CHECK (globalHash == graph.getHash());
253  CHECK (globalHash == 0xB3445F1240A1B05F);
254  }
255 
256 
257 
258 
259 
269  void
271  {
272  ChainLoad16 graph{32};
273 
274  // moderate symmetrical expansion with 40% probability and maximal +2 links
275  graph.expansionRule(graph.rule().probability(0.4).maxVal(2))
276  .buildTopology()
277 // .printTopologyDOT()
278 // .printTopologyStatistics()
279  ;
280  CHECK (graph.getHash() == 0x6EDD7B92F12E9A37);
281 
282  auto stat = graph.computeGraphStatistics();
283  CHECK (stat.indicators[STAT_NODE].cnt == 32); // the 32 Nodes...
284  CHECK (stat.levels == 11); // ... were organised into 11 levels
285  CHECK (stat.indicators[STAT_FORK].cnt == 4); // we got 4 »Fork« events
286  CHECK (stat.indicators[STAT_SEED].cnt == 1); // one start node
287  CHECK (stat.indicators[STAT_EXIT].cnt == 1); // and one exit node at end
288  CHECK (stat.indicators[STAT_NODE].pL == "2.9090909"_expect); // ∅ 3 Nodes / level
289  CHECK (stat.indicators[STAT_NODE].cL == "0.640625"_expect); // with Node density concentrated towards end
290 
291 
292  // with additional re-shuffling, probability acts independent in each branch
293  // leading to more chances to draw a »fork«, leading to a faster expanding graph
294  graph.expansionRule(graph.rule().probability(0.4).maxVal(2).shuffle(23))
295  .buildTopology()
296 // .printTopologyDOT()
297 // .printTopologyStatistics()
298  ;
299  CHECK (graph.getHash() == 0x710D010554FEA614);
300 
301  stat = graph.computeGraphStatistics();
302  CHECK (stat.levels == 7); // expands faster, with only 7 levels
303  CHECK (stat.indicators[STAT_NODE].pL == "4.5714286"_expect); // this time ∅ 4.6 Nodes / level
304  CHECK (stat.indicators[STAT_FORK].cnt == 7); // 7 »Fork« events
305  CHECK (stat.indicators[STAT_EXIT].cnt == 10); // but 10 »Exit« nodes....
306  CHECK (stat.indicators[STAT_JOIN].cnt == 1); // and even one »Join« node....
307  CHECK (stat.indicators[STAT_EXIT].cL == 1); // which are totally concentrated towards end
308  CHECK (stat.indicators[STAT_JOIN].cL == 1); // when nodes are exhausted
309 
310 
311  // if the generation is allowed to run for longer,
312  // while more constrained in width...
313  TestChainLoad<8> gra_2{256};
314  gra_2.expansionRule(gra_2.rule().probability(0.4).maxVal(2).shuffle(23))
315  .buildTopology()
316 // .printTopologyDOT()
317 // .printTopologyStatistics()
318  ;
319  CHECK (gra_2.getHash() == 0x619491B22C3F8A6F);
320 
321  stat = gra_2.computeGraphStatistics();
322  CHECK (stat.levels == 36); // much more levels, as can be expected
323  CHECK (stat.indicators[STAT_NODE].pL == "7.1111111"_expect); // ∅ 7 Nodes per level
324  CHECK (stat.indicators[STAT_JOIN].pL == "0.77777778"_expect); // but also almost one join per level to deal with the limitation
325  CHECK (stat.indicators[STAT_FORK].frac == "0.24609375"_expect); // 25% forks (there is just not enough room for more forks)
326  CHECK (stat.indicators[STAT_JOIN].frac == "0.109375"_expect); // and 10% joins
327  CHECK (stat.indicators[STAT_EXIT].cnt == 3); // ...leading to 3 »Exit« nodes
328  CHECK (stat.indicators[STAT_EXIT].cL == 1); // ....located at the very end
329  }
330 
331 
332 
333 
334 
341  void
343  {
344  ChainLoad16 graph{32};
345 
346  // expand immediately at start and then gradually reduce / join chains
347  graph.expansionRule(graph.rule_atStart(8))
348  .reductionRule(graph.rule().probability(0.2).maxVal(3).shuffle(555))
349  .buildTopology()
350 // .printTopologyDOT()
351 // .printTopologyStatistics()
352  ;
353  CHECK (graph.getHash() == 0x3E9BFAE5E686BEB4);
354 
355  auto stat = graph.computeGraphStatistics();
356  CHECK (stat.levels == 8); // This connection pattern filled 8 levels
357  CHECK (stat.indicators[STAT_JOIN].cnt == 4); // we got 4 »Join« events (reductions=
358  CHECK (stat.indicators[STAT_FORK].cnt == 1); // and the single expansion/fork
359  CHECK (stat.indicators[STAT_FORK].cL == 0.0); // ...sitting right at the beginning
360  CHECK (stat.indicators[STAT_NODE].cL == "0.42857143"_expect); // Nodes are concentrated towards the beginning
361 
362 
363  // expansion and reduction can counterbalance each other
364  graph.expansionRule(graph.rule().probability(0.2).maxVal(3).shuffle(555))
365  .reductionRule(graph.rule().probability(0.2).maxVal(3).shuffle(555))
366  .buildTopology()
367 // .printTopologyDOT()
368 // .printTopologyStatistics()
369  ;
370  CHECK (graph.getHash() == 0xB0335595D34F1D8D);
371 
372  stat = graph.computeGraphStatistics();
373  CHECK (stat.levels == 11); // This example runs a bit longer
374  CHECK (stat.indicators[STAT_NODE].pL == "2.9090909"_expect); // in the middle threading 3-5 Nodes per Level
375  CHECK (stat.indicators[STAT_FORK].cnt == 5); // with 5 expansions
376  CHECK (stat.indicators[STAT_JOIN].cnt == 3); // and 3 reductions
377  CHECK (stat.indicators[STAT_FORK].cL == 0.5); // forks dominating earlier
378  CHECK (stat.indicators[STAT_JOIN].cL == "0.73333333"_expect); // while joins need forks as prerequisite
379 
380 
381  // expansion bursts can be balanced with a heightened reduction intensity
382  graph.expansionRule(graph.rule().probability(0.3).maxVal(4).shuffle(555))
383  .reductionRule(graph.rule().probability(0.9).maxVal(2).shuffle(555))
384  .buildTopology()
385 // .printTopologyDOT()
386 // .printTopologyStatistics()
387  ;
388  CHECK (graph.getHash() == 0x220A2E81F65146FC);
389 
390  stat = graph.computeGraphStatistics();
391  CHECK (stat.levels == 12); // This graph has a similar outline
392  CHECK (stat.indicators[STAT_NODE].pL == "2.6666667"_expect); // in the middle threading 3-5 Nodes per Level
393  CHECK (stat.indicators[STAT_FORK].cnt == 7); // ...yet with quite different internal structure
394  CHECK (stat.indicators[STAT_JOIN].cnt == 9); //
395  CHECK (stat.indicators[STAT_FORK].cL == "0.41558442"_expect);
396  CHECK (stat.indicators[STAT_JOIN].cL == "0.62626263"_expect);
397  CHECK (stat.indicators[STAT_FORK].pLW == "0.19583333"_expect); // while the densities of forks and joins almost match,
398  CHECK (stat.indicators[STAT_JOIN].pLW == "0.26527778"_expect); // a slightly higher reduction density leads to convergence eventually
399  }
400 
401 
402 
403 
404 
410  void
412  {
413  ChainLoad16 graph{32};
414 
415  // randomly start new chains, to be carried-on linearly
416  graph.seedingRule(graph.rule().probability(0.2).maxVal(3).shuffle())
417  .buildTopology()
418 // .printTopologyDOT()
419 // .printTopologyStatistics()
420  ;
421  CHECK (graph.getHash() == 0xBC35A96B3CE1F39F);
422 
423  auto stat = graph.computeGraphStatistics();
424  CHECK (stat.levels == 7); // 7 Levels...
425  CHECK (stat.indicators[STAT_SEED].cnt == 12); // overall 12 »Seed« events generated several ongoing chains
426  CHECK (stat.indicators[STAT_FORK].cnt == 0); // yet no branching/expanding
427  CHECK (stat.indicators[STAT_LINK].cnt == 14); // thus more and more chains were just carried on
428  CHECK (stat.indicators[STAT_LINK].pL == 2); // on average 2-3 per level are continuations
429  CHECK (stat.indicators[STAT_NODE].pL == "4.5714286"_expect); // leading to ∅ 4.5 Nodes per level
430  CHECK (stat.indicators[STAT_NODE].cL == "0.734375"_expect); // with nodes amassing towards the end
431  CHECK (stat.indicators[STAT_LINK].cL == "0.64285714"_expect); // because there are increasingly more links to carry-on
432  CHECK (stat.indicators[STAT_JOIN].cL == 1); // while joining only happens at the very end
433 
434 
435  // combining random seed nodes with reduction leads to a processing pattern
436  // with side-chaines successively joined into a single common result
437  graph.seedingRule(graph.rule().probability(0.2).maxVal(3).shuffle())
438  .reductionRule(graph.rule().probability(0.9).maxVal(2))
439  .buildTopology()
440 // .printTopologyDOT()
441 // .printTopologyStatistics()
442  ;
443  CHECK (graph.getHash() == 0x3DFA720156540247);
444 
445  stat = graph.computeGraphStatistics();
446  CHECK (stat.indicators[STAT_SEED].cnt == 11); // the same number of 11 »Seed« events
447  CHECK (stat.indicators[STAT_JOIN].cnt == 6); // but now 6 joining nodes
448  CHECK (stat.indicators[STAT_LINK].cnt == 15); // and less carry-on
449  CHECK (stat.indicators[STAT_FORK].cnt == 0); // no branching
450  CHECK (stat.indicators[STAT_NODE].pL == 3.2); // leading a slightly leaner graph with ∅ 3.2 Nodes per level
451  CHECK (stat.indicators[STAT_NODE].cL == "0.5625"_expect); // and also slightly more evenly spaced this time
452  CHECK (stat.indicators[STAT_LINK].cL == "0.55555556"_expect); // links are also more encountered in the middle
453  CHECK (stat.indicators[STAT_JOIN].cL == "0.72222222"_expect); // and also joins are happening underway
454  CHECK (stat.levels == 10); // mostly because a leaner graph takes longer to use 32 Nodes
455  }
456 
457 
458 
459 
460 
467  void
469  {
470  ChainLoad16 graph{32};
471 
472  // terminate chains randomly
473  graph.pruningRule(graph.rule().probability(0.2))
474  .buildTopology()
475 // .printTopologyDOT()
476 // .printTopologyStatistics()
477  ;
478  CHECK (graph.getHash() == 0x660BD1CD261A990);
479 
480  auto stat = graph.computeGraphStatistics();
481  CHECK (stat.levels == 32); // only a single line of connections...
482  CHECK (stat.segments == 8); // albeit severed into 8 segments
483  CHECK (stat.indicators[STAT_NODE].pS == 4); // with always 4 Nodes per segment
484  CHECK (stat.indicators[STAT_NODE].pL == 1); // and only ever a single node per level
485  CHECK (stat.indicators[STAT_SEED].cnt == 8); // consequently we get 8 »Seed« nodes
486  CHECK (stat.indicators[STAT_EXIT].cnt == 8); // 8 »Exit« nodes
487  CHECK (stat.indicators[STAT_LINK].cnt == 16); // and 16 interconnecting links
488 
489 
490  // combined with expansion, several tree-shaped segments emerge
491  graph.pruningRule(graph.rule().probability(0.2))
492  .expansionRule(graph.rule().probability(0.6))
493  .setSeed(10101)
494  .buildTopology()
495 // .printTopologyDOT()
496 // .printTopologyStatistics()
497  ;
498  CHECK (graph.getHash() == 0x1D0A7C39647340AA);
499 
500  stat = graph.computeGraphStatistics();
501  CHECK (stat.levels == 14); //
502  CHECK (stat.segments == 5); // this time the graph is segregated into 5 parts
503  CHECK (stat.indicators[STAT_NODE].pS == "6.4"_expect); // with 4 Nodes per segment
504  CHECK (stat.indicators[STAT_FORK].sL == "0"_expect); // where »Fork« is always placed at the beginning of each segment
505  CHECK (stat.indicators[STAT_EXIT].sL == "1"_expect); // and several »Exit« at the end
506  CHECK (stat.indicators[STAT_EXIT].pS == "3"_expect); // with always 3 exits per segment
507  CHECK (stat.indicators[STAT_SEED].cnt == 5); // so overall we get 5 »Seed« nodes
508  CHECK (stat.indicators[STAT_FORK].cnt == 5); // 5 »Fork« nodes
509  CHECK (stat.indicators[STAT_EXIT].cnt == 15); // 15 »Exit« nodes
510  CHECK (stat.indicators[STAT_LINK].cnt == 12); // and 12 interconnecting links
511  CHECK (stat.indicators[STAT_NODE].pL == "2.2857143"_expect); // leading to ∅ ~2 Nodes per level
512 
513 
514  // however, by chance, with more randomised pruning points...
515  graph.pruningRule(graph.rule().probability(0.2).shuffle(5))
516  .expansionRule(graph.rule().probability(0.6))
517  .setSeed(10101)
518  .buildTopology()
519 // .printTopologyDOT()
520 // .printTopologyStatistics()
521  ;
522  CHECK (graph.getHash() == 0x12BB22F76ECC5C1B);
523 
524  stat = graph.computeGraphStatistics();
525  CHECK (stat.segments == 1); // ...the graph can evade severing altogether
526  CHECK (stat.indicators[STAT_FORK].cnt == 3); // with overall 3 »Fork«
527  CHECK (stat.indicators[STAT_EXIT].cnt == 10); // and 10 »Exit« nodes
528  CHECK (stat.indicators[STAT_EXIT].pL == "1.6666667"_expect); // ∅ 1.6 exits per level
529  CHECK (stat.indicators[STAT_NODE].pL == "5.3333333"_expect); // ∅ 5.3 nodes per level
530 
531 
532  graph.expansionRule(graph.rule()); // reset
533 
534 
535  // combined with a special seeding rule,
536  // which injects /another seed/ in the next level after each seed,
537  // an equilibrium of chain seeding and termination can be achieved...
538  graph.seedingRule(graph.rule_atStart(1))
539  .pruningRule(graph.rule().probability(0.2))
540  .setSeed(10101)
541  .buildTopology()
542 // .printTopologyDOT()
543 // .printTopologyStatistics()
544  ;
545  CHECK (graph.getHash() == 0xBFFA04FE8202C708);
546 
547  // NOTE: this example produced 11 disjoint graph parts,
548  // which however start and end interleaved
549  stat = graph.computeGraphStatistics();
550  CHECK (stat.levels == 12); // Generation carries on for 12 levels
551  CHECK (stat.segments == 1); // NOTE: the detection of segments FAILS here (due to interleaved starts)
552  CHECK (stat.indicators[STAT_SEED].cnt == 12); // 12 »Seed« nodes
553  CHECK (stat.indicators[STAT_EXIT].cnt == 11); // 11 »Exit« nodes (including the isolated, last one)
554  CHECK (stat.indicators[STAT_LINK].cnt == 10); // 10 interconnecting links
555  CHECK (stat.indicators[STAT_JOIN].cnt == 1); // and one additional »Join«
556  CHECK (stat.indicators[STAT_JOIN].cL == "1"_expect); // ....appended at graph completion
557  CHECK (stat.indicators[STAT_NODE].pL == "2.6666667"_expect); // overall ∅ 2⅔ nodes per level (converging ⟶ 3)
558  CHECK (stat.indicators[STAT_NODE].cL == "0.52840909"_expect); // with generally levelled distribution
559  CHECK (stat.indicators[STAT_SEED].cL == "0.5"_expect); // also for the seeds
560  CHECK (stat.indicators[STAT_EXIT].cL == "0.62809917"_expect); // and the exits
561 
562 
563  // The next example is »interesting« insofar it shows self-similarity
564  // The generation is entirely repetitive and locally predictable,
565  // producing an ongoing sequence of small graph segments,
566  // partially overlapping with interwoven starts.
567  graph.seedingRule(graph.rule().fixedVal(1))
568  .pruningRule(graph.rule().probability(0.5))
569  .reductionRule(graph.rule().probability(0.8).maxVal(4))
570  .setSeed(10101)
571  .buildTopology()
572 // .printTopologyDOT()
573 // .printTopologyStatistics()
574  ;
575  CHECK (graph.getHash() == 0xFB0A0EA9B7072507);
576 
577  stat = graph.computeGraphStatistics();
578  CHECK (stat.levels == 8); // Generation carries on for 13 levels
579  CHECK (stat.indicators[STAT_JOIN].pL == 1); // with one »Join« event per level on average
580  CHECK (stat.indicators[STAT_SEED].cnt == 22); // seeds are injected with /fixed rate/, meaning that
581  CHECK (stat.indicators[STAT_SEED].pL == 2.75); // there is one additional seed for every node in previous level
582  }
583 
584 
585 
586 
587 
606  void
608  {
609  ChainLoad16 graph{256};
610 
611  // This example creates a repetitive, non-expanding stable pattern
612  // comprised of four small graph segments, generated interleaved
613  // Explanation: rule_atLink() triggers when the preceding node is a »Link«
614  graph.seedingRule(graph.rule_atLink(1))
615  .pruningRule(graph.rule().probability(0.4))
616  .reductionRule(graph.rule().probability(0.6).maxVal(5).minVal(2))
617  .setSeed(23)
618  .buildTopology()
619 // .printTopologyDOT()
620 // .printTopologyStatistics()
621  ;
622  CHECK (graph.getHash() == 0x6B5D7BD3130044E2);
623 
624  auto stat = graph.computeGraphStatistics();
625  CHECK (stat.indicators[STAT_NODE].cL == "0.50509511"_expect); // The resulting distribution of nodes is stable and balanced
626  CHECK (stat.levels == 93); // ...arranging the 256 nodes into 93 levels
627  CHECK (stat.indicators[STAT_NODE].pL == "2.7526882"_expect); // ...with ∅ 2.7 nodes per level
628  CHECK (stat.indicators[STAT_SEED].pL == "1.0537634"_expect); // comprised of ∅ 1 seed per level
629  CHECK (stat.indicators[STAT_JOIN].pL == "0.48387097"_expect); // ~ ∅ ½ join per level
630  CHECK (stat.indicators[STAT_EXIT].pL == "0.34408602"_expect); // ~ ∅ ⅓ exit per level
631  CHECK (stat.indicators[STAT_SEED].frac == "0.3828125"_expect); // overall, 38% nodes are seeds
632  CHECK (stat.indicators[STAT_EXIT].frac == "0.125"_expect); // and ⅛ are exit nodes
633  CHECK (stat.indicators[STAT_SEED].cLW == "0.49273514"_expect); // the density centre of all node kinds
634  CHECK (stat.indicators[STAT_LINK].cLW == "0.49588657"_expect); // ...is close to the middle
635  CHECK (stat.indicators[STAT_JOIN].cLW == "0.52481335"_expect);
636  CHECK (stat.indicators[STAT_EXIT].cLW == "0.55716297"_expect);
637 
638 
639 
640  // with only a slight increase in pruning probability
641  // the graph goes into a stable repetition loop rather,
642  // repeating a single shape with 3 seeds, 3 links and one 3-fold join as exit
643  graph.seedingRule(graph.rule_atLink(1))
644  .pruningRule(graph.rule().probability(0.5))
645  .reductionRule(graph.rule().probability(0.6).maxVal(5).minVal(2))
646  .setSeed(23)
647  .buildTopology()
648 // .printTopologyDOT()
649 // .printTopologyStatistics()
650  ;
651  CHECK (graph.getHash() == 0x20122CF2A1F301D1);
652 
653  stat = graph.computeGraphStatistics();
654  CHECK (stat.levels == 77); //
655  CHECK (stat.indicators[STAT_NODE].pL == "3.3246753"_expect); // ∅ 3.3 nodes per level
656  CHECK (stat.indicators[STAT_SEED].frac == "0.421875"_expect); // 42% seed
657  CHECK (stat.indicators[STAT_EXIT].frac == "0.14453125"_expect); // 14% exit
658 
659 
660 
661  // The next example uses a different generation approach:
662  // Here, seeding happens randomly, while every join immediately
663  // forces a prune, so all joins become exit nodes.
664  // With a reduction probability slightly over seed, yet limited reduction strength
665  // the generation goes into a stable repetition loop, yet with rather small graphs,
666  // comprised each of two seeds, two links and a single 2-fold join at exit,
667  // with exit and the two seeds of the following graph happening simultaneously.
668  graph.seedingRule(graph.rule().probability(0.6).maxVal(1))
669  .reductionRule(graph.rule().probability(0.75).maxVal(3))
670  .pruningRule(graph.rule_atJoin(1))
671  .setSeed(47)
672  .buildTopology()
673 // .printTopologyDOT()
674 // .printTopologyStatistics()
675  ;
676  CHECK (graph.getHash() == 0xB58904674ED84031);
677 
678  stat = graph.computeGraphStatistics();
679  CHECK (stat.levels == 104); //
680  CHECK (stat.indicators[STAT_NODE].pL == "2.4615385"_expect); // ∅ 2.5 nodes per level
681  CHECK (stat.indicators[STAT_SEED].frac == "0.40234375"_expect); // 40% seed
682  CHECK (stat.indicators[STAT_EXIT].frac == "0.19921875"_expect); // 20% exit
683  CHECK (stat.indicators[STAT_SEED].pL == "0.99038462"_expect); // resulting in 1 seed per level
684  CHECK (stat.indicators[STAT_EXIT].pL == "0.49038462"_expect); // ½ exit per level
685 
686 
687  // »short_segments_interleaved«
688  // Increased seed probability combined with overall seed value 0 ◁──── (crucial, other seeds produce larger graphs)
689  // produces what seems to be the best stable repetition loop:
690  // same shape as in preceding, yet interwoven by 2 steps
691  graph.seedingRule(graph.rule().probability(0.8).maxVal(1))
692  .reductionRule(graph.rule().probability(0.75).maxVal(3))
693  .pruningRule(graph.rule_atJoin(1))
694  .setSeed(0)
695  .buildTopology()
696 // .printTopologyDOT()
697 // .printTopologyStatistics()
698  ;
699  CHECK (graph.getHash() == 0x11B57D9E98FDF6DF);
700 
701  stat = graph.computeGraphStatistics();
702  CHECK (stat.levels == 55); // much denser arrangement due to stronger interleaving
703  CHECK (stat.indicators[STAT_NODE].pL == "4.6545455"_expect); // ∅ 4.7 nodes per level — almost twice as much
704  CHECK (stat.indicators[STAT_SEED].frac == "0.3984375"_expect); // 40% seed
705  CHECK (stat.indicators[STAT_EXIT].frac == "0.1953125"_expect); // 20% exit — same fractions
706  CHECK (stat.indicators[STAT_SEED].pL == "1.8545455"_expect); // 1.85 seed per level — higher density
707  CHECK (stat.indicators[STAT_EXIT].pL == "0.90909091"_expect); // 0.9 exit per level
708 
709 
710  // With just the addition of irregularity through shuffling on the reduction,
711  // a stable and tightly interwoven pattern of medium sized graphs is generated
712  graph.seedingRule(graph.rule().probability(0.8).maxVal(1))
713  .reductionRule(graph.rule().probability(0.75).maxVal(3).shuffle())
714  .pruningRule(graph.rule_atJoin(1))
715  .setSeed(0)
716  .buildTopology()
717 // .printTopologyDOT()
718 // .printTopologyStatistics()
719  ;
720  CHECK (graph.getHash() == 0x7C0453E7A4F6418D);
721 
722  stat = graph.computeGraphStatistics();
723  CHECK (stat.levels == 44); //
724  CHECK (stat.indicators[STAT_NODE].pL == "5.8181818"_expect); // ∅ 5.7 nodes per level
725  CHECK (stat.indicators[STAT_SEED].pL == "2.4318182"_expect); // ∅ 2.4 seeds
726  CHECK (stat.indicators[STAT_LINK].pL == "2.4772727"_expect); // ∅ 2.5 link nodes
727  CHECK (stat.indicators[STAT_EXIT].pL == "1"_expect); // ∅ 1 join/exit nodes — indicating stronger spread/reduction
728 
729 
730 
731  // This example uses another setup, without special rules;
732  // rather, seed, reduction and pruning are tuned to balance each other.
733  // The result is a regular interwoven pattern of very small graphs,
734  // slowly expanding yet stable under constriction of width.
735  // Predominant is a shape with two seeds on two levels, a single link and a 2-fold join;
736  // caused by width constriction, this becomes complemented by larger compounds at intervals.
737  graph.seedingRule(graph.rule().probability(0.8).maxVal(1))
738  .reductionRule(graph.rule().probability(0.75).maxVal(3))
739  .pruningRule(graph.rule().probability(0.55))
740  .setSeed(55) // ◁───────────────────────────────────────────── use 31 for width limited to 8 nodes
741  .buildTopology()
742 // .printTopologyDOT()
743 // .printTopologyStatistics()
744  ;
745  CHECK (graph.getHash() == 0x904A906B7859301A);
746 
747  stat = graph.computeGraphStatistics();
748  CHECK (stat.levels == 21); // ▶ resulting graph is very dense, hitting the parallelisation limit
749  CHECK (stat.indicators[STAT_NODE].pL == "12.190476"_expect); // ∅ more than 12 nodes per level !
750  CHECK (stat.indicators[STAT_SEED].pL == "6.8571429"_expect); // comprised of ∅ 6.9 seeds
751  CHECK (stat.indicators[STAT_LINK].pL == "2.3809524"_expect); // ∅ 2.4 links
752  CHECK (stat.indicators[STAT_JOIN].pL == "2.8095238"_expect); // ∅ 2.8 joins
753  CHECK (stat.indicators[STAT_EXIT].pL == "2.5714286"_expect); // ∅ 2.6 exits
754  CHECK (stat.indicators[STAT_SEED].frac == "0.5625"_expect ); // 56% seed
755  CHECK (stat.indicators[STAT_EXIT].frac == "0.2109375"_expect); // 21% exit
756 
757 
758 
759  // A slight parameters variation generates medium sized graphs, which are deep interwoven;
760  // the generation is slowly expanding, but becomes stable under width constriction
761  graph.seedingRule(graph.rule().probability(0.8).maxVal(1))
762  .reductionRule(graph.rule().probability(0.6).maxVal(5).minVal(2))
763  .pruningRule(graph.rule().probability(0.4))
764  .setSeed(42)
765  .buildTopology()
766 // .printTopologyDOT()
767 // .printTopologyStatistics()
768  ;
769  CHECK (graph.getHash() == 0x9453C56534FF9CD6);
770 
771  stat = graph.computeGraphStatistics();
772  CHECK (stat.levels == 26); //
773  CHECK (stat.indicators[STAT_NODE].pL == "9.8461538"_expect); // ∅ 9.8 nodes per level — ⅓ less dense
774  CHECK (stat.indicators[STAT_SEED].frac == "0.40234375"_expect); // 40% seed
775  CHECK (stat.indicators[STAT_LINK].frac == "0.453125"_expect); // 45% link
776  CHECK (stat.indicators[STAT_JOIN].frac == "0.109375"_expect ); // 11% joins
777  CHECK (stat.indicators[STAT_EXIT].frac == "0.08984375"_expect); // 8% exits — hinting at very strong reduction
778 
779 
780  // The same setup with different seeing produces a
781  // stable repetitive change of linear chain and small tree with 2 joins
782  graph.seedingRule(graph.rule().probability(0.8).maxVal(2))
783  .reductionRule(graph.rule().probability(0.6).maxVal(5).minVal(2))
784  .pruningRule(graph.rule().probability(0.42))
785  .setSeed(23)
786  .buildTopology()
787 // .printTopologyDOT()
788 // .printTopologyStatistics()
789  ;
790  CHECK (graph.getHash() == 0xA57727C2ED277C87);
791 
792  stat = graph.computeGraphStatistics();
793  CHECK (stat.levels == 129); //
794  CHECK (stat.indicators[STAT_NODE].pL == "1.9844961"_expect); // ∅ ~2 nodes per level — much lesser density
795  CHECK (stat.indicators[STAT_SEED].frac == "0.3359375"_expect); // 33% seed
796  CHECK (stat.indicators[STAT_LINK].frac == "0.4140625"_expect); // 42% link
797  CHECK (stat.indicators[STAT_JOIN].frac == "0.1640625"_expect); // 16% join
798  CHECK (stat.indicators[STAT_EXIT].frac == "0.171875"_expect); // 17% exit — only a 2:1 reduction on average
799 
800 
801  // With added shuffling in the seed rule, and under width constriction,
802  // an irregular sequence of small to large and strongly interwoven graphs emerges.
803  graph.seedingRule(graph.rule().probability(0.8).maxVal(2).shuffle())
804  .reductionRule(graph.rule().probability(0.6).maxVal(5).minVal(2))
805  .pruningRule(graph.rule().probability(0.42))
806  .setSeed(23)
807  .buildTopology()
808 // .printTopologyDOT()
809 // .printTopologyStatistics()
810  ;
811  CHECK (graph.getHash() == 0x4D0575F8BD269FC3);
812 
813  stat = graph.computeGraphStatistics();
814  CHECK (stat.levels == 20); // rather dense
815  CHECK (stat.indicators[STAT_NODE].pL == "12.8"_expect); // ∅ 12.8 nodes per level
816  CHECK (stat.indicators[STAT_SEED].pL == "7.65"_expect); // ∅ 7.7 seeds
817  CHECK (stat.indicators[STAT_LINK].pL == "3.15"_expect); // ∅ 3 links
818  CHECK (stat.indicators[STAT_JOIN].pL == "1.9"_expect); // ∅ 1.9 joins
819  CHECK (stat.indicators[STAT_EXIT].pL == "0.95"_expect); // ∅ ~1 exit per level
820 
821 
822 
823  // »chain_loadBursts«
824  // The final example attempts to balance expansion and reduction forces.
825  // Since reduction needs expanded nodes to work on, expansion always gets
826  // a head start and we need to tune reduction to slightly higher strength
827  // to ensure the graph width does not explode. The result is one single
828  // graph with increasingly complex connections, which can expand into
829  // width limitation at places, but also collapse to a single thread.
830  // The seed controls how fast the onset of the pattern happens.
831  // low values -> long single-chain prelude
832  // seed ≔ 55 -> prelude with 2 chains, then join, then onset at level 17
833  // high values -> massive onset quickly going into saturation
834  graph.expansionRule(graph.rule().probability(0.27).maxVal(4))
835  .reductionRule(graph.rule().probability(0.44).maxVal(6).minVal(2))
836  .seedingRule(graph.rule())
837  .pruningRule(graph.rule())
838  .setSeed(62)
839  .buildTopology()
840 // .printTopologyDOT()
841 // .printTopologyStatistics()
842  ;
843  CHECK (graph.getHash() == 0x25114F8770B1B78E);
844 
845  stat = graph.computeGraphStatistics();
846  CHECK (stat.levels == 30); // rather high concurrency
847  CHECK (stat.indicators[STAT_SEED].cnt == 1); // a single seed
848  CHECK (stat.indicators[STAT_EXIT].cnt == 4); // ...and 4 exit when running out of node space
849  CHECK (stat.indicators[STAT_NODE].pL == "8.5333333"_expect); // ∅ 8.25 nodes per level
850  CHECK (stat.indicators[STAT_FORK].frac == "0.16015625"_expect); // 16% forks
851  CHECK (stat.indicators[STAT_LINK].frac == "0.76171875"_expect); // 77% links
852  CHECK (stat.indicators[STAT_JOIN].frac == "0.1015625"_expect); // 10% joins
853  CHECK (stat.indicators[STAT_KNOT].frac == "0.0390625"_expect); // 3% »Knot« nodes which both join and fork
854  CHECK (stat.indicators[STAT_FORK].cLW == "0.43298744"_expect); // density centre of forks lies earlier
855  CHECK (stat.indicators[STAT_JOIN].cLW == "0.64466378"_expect); // while density centre of joins leans rather towards end
856  }
857 
858 
859 
860 
861 
862 
863 
864 
867  void
869  {
870  ComputationalLoad cpuLoad;
871  CHECK (cpuLoad.timeBase == 100us);
872 
873  double micros = cpuLoad.invoke();
874  CHECK (micros < 2000);
875  CHECK (micros > 2);
876 
877  cpuLoad.calibrate();
878 
879  micros = cpuLoad.invoke();
880  CHECK (micros < 133);
881  CHECK (micros > 80);
882 
883  micros = cpuLoad.benchmark();
884  CHECK (micros < 110);
885  CHECK (micros > 90);
886 
887  cpuLoad.useAllocation = true;
888  micros = cpuLoad.invoke();
889  CHECK (micros < 133);
890  CHECK (micros > 80);
891 
892  micros = cpuLoad.benchmark();
893  CHECK (micros < 110);
894  CHECK (micros > 90);
895 
896  cpuLoad.timeBase = 1ms;
897  cpuLoad.sizeBase *= 100;
898  cpuLoad.calibrate();
899 
900  cpuLoad.useAllocation = false;
901  micros = cpuLoad.invoke();
902  CHECK (micros > 900);
903  micros = cpuLoad.invoke(5);
904  CHECK (micros > 4600);
905  micros = cpuLoad.invoke(10);
906  CHECK (micros > 9500);
907  micros = cpuLoad.invoke(100);
908  CHECK (micros > 95000);
909 
910  cpuLoad.useAllocation = true;
911  micros = cpuLoad.invoke();
912  CHECK (micros > 900);
913  micros = cpuLoad.invoke(5);
914  CHECK (micros > 4600);
915  micros = cpuLoad.invoke(10);
916  CHECK (micros > 9500);
917  micros = cpuLoad.invoke(100);
918  CHECK (micros > 95000);
919  }
920 
921 
922 
931  void
933  {
934  ChainLoad16 graph{32};
935  graph.expansionRule(graph.rule().probability(0.8).maxVal(1))
936  .pruningRule(graph.rule().probability(0.6))
937  .weightRule((graph.rule().probability(0.5)))
938  .buildTopology();
939 
940  CHECK (8 == graph.allNodes().filter(isStartNode).count());
941  CHECK (16 == graph.allNodes().filter(isExitNode).count());
942 
943 
944  // verify computation of the globally combined exit hash
945  auto exitHashes = graph.allNodes()
946  .filter(isExitNode)
947  .transform([](Node& n){ return n.hash; })
948  .effuse();
949  CHECK (16 == exitHashes.size());
950 
951  size_t combinedHash{0};
952  for (uint i=0; i <16; ++i)
953  boost::hash_combine (combinedHash, exitHashes[i]);
954 
955  CHECK (graph.getHash() == combinedHash);
956  CHECK (graph.getHash() == 0x33B00C450215EB00);
957 
958 
959  // verify connectivity and local exit hashes
960  graph.allNodePtr().grouped<4>()
961  .foreach([&](auto group)
962  { // verify wiring pattern
963  // and the resulting exit hashes
964  auto& [a,b,c,d] = *group;
965  CHECK (isStart(a));
966  CHECK (isInner(b));
967  CHECK (not a->weight);
968  CHECK (not b->weight);
969  CHECK (isExit(c));
970  CHECK (isExit(d));
971  CHECK (c->hash == 0xAEDC04CFA2E5B999);
972  CHECK (d->hash == 0xAEDC04CFA2E5B999);
973  CHECK (c->weight == 4);
974  CHECK (d->weight == 4);
975  });
976 
977 
978  graph.setSeed(55).clearNodeHashes();
979  CHECK (graph.getSeed() == 55);
980  CHECK (graph.getHash() == 0);
981  graph.allNodePtr().grouped<4>()
982  .foreach([&](auto group)
983  { // verify hashes have been reset
984  auto& [a,b,c,d] = *group;
985  CHECK (a->hash == 55);
986  CHECK (b->hash == 0);
987  CHECK (b->hash == 0);
988  CHECK (b->hash == 0);
989  });
990 
991  graph.recalculate();
992  CHECK (graph.getHash() == 0x17427F67DBC8BCC0);
993  graph.allNodePtr().grouped<4>()
994  .foreach([&](auto group)
995  { // verify hashes were recalculated
996  // based on the new seed
997  auto& [a,b,c,d] = *group;
998  CHECK (a->hash == 55);
999  CHECK (c->hash == 0x7887993B0ED41395);
1000  CHECK (d->hash == 0x7887993B0ED41395);
1001  });
1002 
1003  // seeding and recalculation are reproducible
1004  graph.setSeed(0).recalculate();
1005  CHECK (graph.getHash() == 0x33B00C450215EB00);
1006  graph.setSeed(55).recalculate();
1007  CHECK (graph.getHash() == 0x17427F67DBC8BCC0);
1008  }
1009 
1010 
1011 
1014  void
1016  {
1017  double t1 =
1018  TestChainLoad{64}
1020  .buildTopology()
1022 
1023  double t2 =
1024  TestChainLoad{64}
1026  .buildTopology()
1027  .calcRuntimeReference(1ms);
1028 
1029  double t3 =
1030  TestChainLoad{256}
1032  .buildTopology()
1034 
1035  auto isWithin10Percent = [](double t, double r)
1036  {
1037  auto delta = abs (1.0 - t/r);
1038  return delta < 0.1;
1039  };
1040 
1041  // the test-graph has 64 Nodes,
1042  // each using the default load of 100µs
1043  CHECK (isWithin10Percent(t1, 6400)); // thus overall we should be close to 6.4ms
1044  CHECK (isWithin10Percent(t2, 10*t1)); // and the 10-fold load should yield 10-times
1045  CHECK (isWithin10Percent(t3, 4*t1)); // using 4 times as much nodes (64->256)
1046 
1047  // the time measurement uses a performance
1048  // which clears, re-seeds and calculates the complete graph
1049  auto graph =
1050  TestChainLoad{64}
1052  .buildTopology();
1053 
1054  CHECK (graph.getHash() == 0x554F5086DE5B0861);
1055 
1056  graph.clearNodeHashes();
1057  CHECK (graph.getHash() == 0);
1058 
1059  // this is used by the timing benchmark
1060  graph.performGraphSynchronously();
1061  CHECK (graph.getHash() == 0x554F5086DE5B0861);
1062 
1063  graph.clearNodeHashes();
1064  CHECK (graph.getHash() == 0);
1065 
1066  graph.calcRuntimeReference();
1067  CHECK (graph.getHash() == 0x554F5086DE5B0861);
1068  }
1069 
1070 
1071 
1078  void
1080  {
1081  TestChainLoad testLoad{64};
1083  .buildTopology()
1084 // .printTopologyDOT()
1085  ;
1086 
1087  // compute aggregated level data....
1088  auto level = testLoad.allLevelWeights().effuse();
1089  CHECK (level.size() == 26);
1090 
1091  // visualise and verify this data......
1092  auto node = testLoad.allNodePtr().effuse();
1093  _Fmt nodeFmt{"i=%-2d lev:%-2d w=%1d"};
1094  _Fmt levelFmt{" Σ%-2d Σw:%2d"};
1095  auto nodeStr = [&](uint i)
1096  {
1097  size_t l = node[i]->level;
1098  return string{nodeFmt % i % node[i]->level % node[i]->weight}
1099  + (i == level[l].endidx? string{levelFmt % level[l].nodes % level[l].weight}
1100  : string{" · · "});
1101  };
1102  // |idx--level--wght|-levelSum-------
1103  CHECK (nodeStr( 1) == "i=1 lev:1 w=0 Σ1 Σw: 0"_expect);
1104  CHECK (nodeStr( 2) == "i=2 lev:2 w=2 Σ1 Σw: 2"_expect);
1105  CHECK (nodeStr( 3) == "i=3 lev:3 w=0 Σ1 Σw: 0"_expect);
1106  CHECK (nodeStr( 4) == "i=4 lev:4 w=0 Σ1 Σw: 0"_expect);
1107  CHECK (nodeStr( 5) == "i=5 lev:5 w=0 Σ1 Σw: 0"_expect);
1108  CHECK (nodeStr( 6) == "i=6 lev:6 w=1 Σ1 Σw: 1"_expect);
1109  CHECK (nodeStr( 7) == "i=7 lev:7 w=2 Σ1 Σw: 2"_expect);
1110  CHECK (nodeStr( 8) == "i=8 lev:8 w=2 Σ1 Σw: 2"_expect);
1111  CHECK (nodeStr( 9) == "i=9 lev:9 w=1 · · "_expect);
1112  CHECK (nodeStr(10) == "i=10 lev:9 w=1 Σ2 Σw: 2"_expect);
1113  CHECK (nodeStr(11) == "i=11 lev:10 w=0 · · "_expect);
1114  CHECK (nodeStr(12) == "i=12 lev:10 w=0 Σ2 Σw: 0"_expect);
1115  CHECK (nodeStr(13) == "i=13 lev:11 w=0 · · "_expect);
1116  CHECK (nodeStr(14) == "i=14 lev:11 w=0 Σ2 Σw: 0"_expect);
1117  CHECK (nodeStr(15) == "i=15 lev:12 w=1 · · "_expect);
1118  CHECK (nodeStr(16) == "i=16 lev:12 w=1 Σ2 Σw: 2"_expect);
1119  CHECK (nodeStr(17) == "i=17 lev:13 w=1 · · "_expect);
1120  CHECK (nodeStr(18) == "i=18 lev:13 w=1 Σ2 Σw: 2"_expect);
1121  CHECK (nodeStr(19) == "i=19 lev:14 w=2 · · "_expect);
1122  CHECK (nodeStr(20) == "i=20 lev:14 w=2 Σ2 Σw: 4"_expect);
1123  CHECK (nodeStr(21) == "i=21 lev:15 w=0 Σ1 Σw: 0"_expect);
1124  CHECK (nodeStr(22) == "i=22 lev:16 w=1 Σ1 Σw: 1"_expect);
1125  CHECK (nodeStr(23) == "i=23 lev:17 w=3 Σ1 Σw: 3"_expect);
1126  CHECK (nodeStr(24) == "i=24 lev:18 w=0 · · "_expect);
1127  CHECK (nodeStr(25) == "i=25 lev:18 w=0 · · "_expect);
1128  CHECK (nodeStr(26) == "i=26 lev:18 w=0 · · "_expect);
1129  CHECK (nodeStr(27) == "i=27 lev:18 w=0 · · "_expect);
1130  CHECK (nodeStr(28) == "i=28 lev:18 w=0 Σ5 Σw: 0"_expect);
1131  CHECK (nodeStr(29) == "i=29 lev:19 w=2 · · "_expect);
1132  CHECK (nodeStr(30) == "i=30 lev:19 w=2 · · "_expect);
1133  CHECK (nodeStr(31) == "i=31 lev:19 w=2 · · "_expect);
1134  CHECK (nodeStr(32) == "i=32 lev:19 w=2 · · "_expect);
1135  CHECK (nodeStr(33) == "i=33 lev:19 w=2 Σ5 Σw:10"_expect);
1136  CHECK (nodeStr(34) == "i=34 lev:20 w=3 · · "_expect);
1137  CHECK (nodeStr(35) == "i=35 lev:20 w=2 Σ2 Σw: 5"_expect);
1138  CHECK (nodeStr(36) == "i=36 lev:21 w=1 · · "_expect);
1139  CHECK (nodeStr(37) == "i=37 lev:21 w=1 · · "_expect);
1140  CHECK (nodeStr(38) == "i=38 lev:21 w=3 Σ3 Σw: 5"_expect);
1141  CHECK (nodeStr(39) == "i=39 lev:22 w=3 · · "_expect);
1142  CHECK (nodeStr(40) == "i=40 lev:22 w=3 · · "_expect);
1143  CHECK (nodeStr(41) == "i=41 lev:22 w=0 · · "_expect);
1144  CHECK (nodeStr(42) == "i=42 lev:22 w=0 · · "_expect);
1145  CHECK (nodeStr(43) == "i=43 lev:22 w=0 · · "_expect);
1146  CHECK (nodeStr(44) == "i=44 lev:22 w=0 Σ6 Σw: 6"_expect);
1147  CHECK (nodeStr(45) == "i=45 lev:23 w=0 · · "_expect);
1148 
1149  // compute a weight factor for each level,
1150  // using the number of nodes, the weight sum and concurrency
1151  CHECK (level[19].nodes = 5); // ╭────────────────────────╢ concurrency
1152  CHECK (level[19].weight = 10); // ▽ ╭───────╢ boost by concurrency
1153  CHECK (computeWeightFactor(level[19], 1) == 10.0);// ▽
1154  CHECK (computeWeightFactor(level[19], 2) == 10.0 / (5.0/3));
1155  CHECK (computeWeightFactor(level[19], 3) == 10.0 / (5.0/2));
1156  CHECK (computeWeightFactor(level[19], 4) == 10.0 / (5.0/2));
1157  CHECK (computeWeightFactor(level[19], 5) == 10.0 / (5.0/1));
1158 
1159  // build a schedule sequence based on
1160  // summing up weight factors, with example concurrency ≔ 4
1161  uint concurrency = 4;
1162  auto steps = testLoad.levelScheduleSequence(concurrency).effuse();
1163  CHECK (steps.size() == 26);
1164 
1165  // for documentation/verification: show also the boost factor and the resulting weight factor
1166  auto boost = [&](uint i){ return level[i].nodes / std::ceil (double(level[i].nodes)/concurrency); };
1167  auto wfact = [&](uint i){ return computeWeightFactor(level[i], concurrency); };
1168 
1169  _Fmt stepFmt{"lev:%-2d nodes:%-2d Σw:%2d %4.1f Δ%5.3f ▿▿ %6.3f"};
1170  auto stepStr = [&](uint i){ return string{stepFmt % i % level[i].nodes % level[i].weight % boost(i) % wfact(i) % steps[i]}; };
1171 
1172  // boost wfactor steps
1173  CHECK (stepStr( 0) == "lev:0 nodes:1 Σw: 0 1.0 Δ0.000 ▿▿ 0.000"_expect);
1174  CHECK (stepStr( 1) == "lev:1 nodes:1 Σw: 0 1.0 Δ0.000 ▿▿ 0.000"_expect);
1175  CHECK (stepStr( 2) == "lev:2 nodes:1 Σw: 2 1.0 Δ2.000 ▿▿ 2.000"_expect);
1176  CHECK (stepStr( 3) == "lev:3 nodes:1 Σw: 0 1.0 Δ0.000 ▿▿ 2.000"_expect);
1177  CHECK (stepStr( 4) == "lev:4 nodes:1 Σw: 0 1.0 Δ0.000 ▿▿ 2.000"_expect);
1178  CHECK (stepStr( 5) == "lev:5 nodes:1 Σw: 0 1.0 Δ0.000 ▿▿ 2.000"_expect);
1179  CHECK (stepStr( 6) == "lev:6 nodes:1 Σw: 1 1.0 Δ1.000 ▿▿ 3.000"_expect);
1180  CHECK (stepStr( 7) == "lev:7 nodes:1 Σw: 2 1.0 Δ2.000 ▿▿ 5.000"_expect);
1181  CHECK (stepStr( 8) == "lev:8 nodes:1 Σw: 2 1.0 Δ2.000 ▿▿ 7.000"_expect);
1182  CHECK (stepStr( 9) == "lev:9 nodes:2 Σw: 2 2.0 Δ1.000 ▿▿ 8.000"_expect);
1183  CHECK (stepStr(10) == "lev:10 nodes:2 Σw: 0 2.0 Δ0.000 ▿▿ 8.000"_expect);
1184  CHECK (stepStr(11) == "lev:11 nodes:2 Σw: 0 2.0 Δ0.000 ▿▿ 8.000"_expect);
1185  CHECK (stepStr(12) == "lev:12 nodes:2 Σw: 2 2.0 Δ1.000 ▿▿ 9.000"_expect);
1186  CHECK (stepStr(13) == "lev:13 nodes:2 Σw: 2 2.0 Δ1.000 ▿▿ 10.000"_expect);
1187  CHECK (stepStr(14) == "lev:14 nodes:2 Σw: 4 2.0 Δ2.000 ▿▿ 12.000"_expect);
1188  CHECK (stepStr(15) == "lev:15 nodes:1 Σw: 0 1.0 Δ0.000 ▿▿ 12.000"_expect);
1189  CHECK (stepStr(16) == "lev:16 nodes:1 Σw: 1 1.0 Δ1.000 ▿▿ 13.000"_expect);
1190  CHECK (stepStr(17) == "lev:17 nodes:1 Σw: 3 1.0 Δ3.000 ▿▿ 16.000"_expect);
1191  CHECK (stepStr(18) == "lev:18 nodes:5 Σw: 0 2.5 Δ0.000 ▿▿ 16.000"_expect);
1192  CHECK (stepStr(19) == "lev:19 nodes:5 Σw:10 2.5 Δ4.000 ▿▿ 20.000"_expect);
1193  CHECK (stepStr(20) == "lev:20 nodes:2 Σw: 5 2.0 Δ2.500 ▿▿ 22.500"_expect);
1194  CHECK (stepStr(21) == "lev:21 nodes:3 Σw: 5 3.0 Δ1.667 ▿▿ 24.167"_expect);
1195  CHECK (stepStr(22) == "lev:22 nodes:6 Σw: 6 3.0 Δ2.000 ▿▿ 26.167"_expect);
1196  CHECK (stepStr(23) == "lev:23 nodes:6 Σw: 6 3.0 Δ2.000 ▿▿ 28.167"_expect);
1197  CHECK (stepStr(24) == "lev:24 nodes:10 Σw: 9 3.3 Δ2.700 ▿▿ 30.867"_expect);
1198  CHECK (stepStr(25) == "lev:25 nodes:3 Σw: 4 3.0 Δ1.333 ▿▿ 32.200"_expect);
1199  }
1200 
1201 
1202 
1203 
1214  void
1216  {
1217  array<Node,4> nodes;
1218  auto& [s,p1,p2,e] = nodes;
1219  s.addSucc(p1)
1220  .addSucc(p2);
1221  e.addPred(p1)
1222  .addPred(p2);
1223  s.level = 0;
1224  p1.level = p2.level = 1;
1225  e.level = 2;
1226  CHECK (e.hash == 0);
1227  for (Node& n : nodes)
1228  n.calculate();
1229  CHECK (e.hash == 0x6A5924BA3389D7C);
1230 
1231 
1232  // now do the same invoked as »render job«
1233  for (Node& n : nodes)
1234  n.hash = 0;
1235  s.level = 0;
1236  p1.level = 1;
1237  p2.level = 1;
1238  e.level = 2;
1239 
1240  RandomChainCalcFunctor<16> chainJob{nodes[0]};
1241  Job job0{chainJob
1242  ,chainJob.encodeNodeID(0)
1243  ,chainJob.encodeLevel(0)};
1244  Job job1{chainJob
1245  ,chainJob.encodeNodeID(1)
1246  ,chainJob.encodeLevel(1)};
1247  Job job2{chainJob
1248  ,chainJob.encodeNodeID(2)
1249  ,chainJob.encodeLevel(1)};
1250  Job job3{chainJob
1251  ,chainJob.encodeNodeID(3)
1252  ,chainJob.encodeLevel(2)};
1253 
1254  CHECK (e.hash == 0);
1255  job0.triggerJob();
1256  // ◁───────────────────────────────────────────── Note: fail to invoke some predecessor....
1257  job2.triggerJob();
1258  job3.triggerJob();
1259  CHECK (e.hash != 0x6A5924BA3389D7C);
1260 
1261  e.hash = 0;
1262  job1.triggerJob(); // recalculate missing part of the graph...
1263  job3.triggerJob();
1264  CHECK (e.hash == 0x6A5924BA3389D7C);
1265 
1266  job3.triggerJob(); // Hash calculations are *not* idempotent
1267  CHECK (e.hash != 0x6A5924BA3389D7C);
1268 
1269 
1270  // use the »planing job« to organise the calculations:
1271  // Let the callbacks create a clone — which at the end should generate the same hash
1272  array<Node,4> clone;
1273  size_t lastTouched(-1);
1274  size_t lastNode (-1);
1275  size_t lastLevel(-1);
1276  bool shallContinue{false};
1277  auto getNodeIdx = [&](Node* n) { return n - &nodes[0]; };
1278 
1279  // callback-λ rigged for test....
1280  // Instead of invoking the Scheduler, here we replicate the node structure
1281  auto disposeStep = [&](size_t idx, size_t level)
1282  {
1283  Node& n = clone[idx];
1284  n.clear();
1285  n.level = level;
1286  lastTouched = idx;
1287  };
1288  auto setDependency = [&](Node* pred, Node* succ)
1289  {
1290  size_t predIdx = getNodeIdx(pred);
1291  size_t succIdx = getNodeIdx(succ);
1292  // replicate this relation into the clone array
1293  clone[predIdx].addSucc(clone[succIdx]);
1294  };
1295  auto continuation = [&](size_t, size_t nodeDone, size_t levelDone, bool work_left)
1296  {
1297  lastNode =nodeDone;
1298  lastLevel = levelDone;
1299  shallContinue = work_left;
1300  };
1301  // build a JobFunctor for the planning step(s)
1302  RandomChainPlanFunctor<16> planJob{nodes.front(), nodes.size()
1303  ,disposeStep
1304  ,setDependency
1305  ,continuation};
1306  Job jobP1{planJob
1307  ,planJob.encodeNodeID(1)
1308  ,Time::ANYTIME};
1309  Job jobP2{planJob
1310  ,planJob.encodeNodeID(5)
1311  ,Time::ANYTIME};
1312 
1313  jobP1.triggerJob();
1314  CHECK (lastLevel = 1);
1315  CHECK (lastTouched = 2);
1316  CHECK (lastTouched == lastNode);
1317  Node* lastN = &clone[lastTouched];
1318  CHECK (lastN->level == lastLevel);
1319  CHECK ( isnil (lastN->succ));
1320  CHECK (not isnil (lastN->pred));
1321  CHECK (shallContinue);
1322 
1323  jobP2.triggerJob();
1324  CHECK (lastLevel = 3);
1325  CHECK (lastTouched = 3);
1326  CHECK (lastTouched == lastNode);
1327  lastN = &clone[lastTouched];
1328  CHECK (lastN->level == 2);
1329  CHECK (lastN->level < lastLevel);
1330  CHECK ( isnil (lastN->succ));
1331  CHECK (not isnil (lastN->pred));
1332  CHECK (not shallContinue);
1333 
1334  // all clone nodes should be wired properly now
1335  CHECK (lastN->hash == 0);
1336  for (Node& n : clone)
1337  n.calculate();
1338  CHECK (lastN->hash == 0x6A5924BA3389D7C);
1339  }
1340  };
1341 
1342 
1344  LAUNCHER (TestChainLoad_test, "unit engine");
1345 
1346 
1347 
1348 }}} // namespace vault::gear::test
static const Time ANYTIME
border condition marker value. ANYTIME <= any time value
Definition: timevalue.hpp:322
const StatKey STAT_NODE
all nodes
const StatKey STAT_LINK
1:1 linking node
const StatKey STAT_SEED
seed node
const StatKey STAT_KNOT
knot (joins and forks)
Definition: trait.hpp:69
Definition: run.hpp:49
const StatKey STAT_EXIT
exit node
Front-end for printf-style string template interpolation.
double invoke(uint scaleStep=1)
cause a delay by computational load
TestChainLoad && buildTopology()
Use current configuration and seed to (re)build Node connectivity.
Generate synthetic computation load for Scheduler performance tests.
A Generator for synthetic Render Jobs for Scheduler load testing.
A front-end for using printf-style formatting.
»Scheduler-Service« : coordinate render activities.
Definition: scheduler.hpp:222
Simple test class runner.
Render JobFunctor to invoke the calculation of a single Node.
Tiny helper functions and shortcuts to be used everywhere Consider this header to be effectively incl...
double computeWeightFactor(LevelWeight const &lw, uint concurrency)
simplified model for expense of a level of nodes, computed concurrently.
double calcRuntimeReference(microseconds timeBase=LOAD_DEFAULT_TIME, size_t sizeBase=0, size_t repeatCnt=GRAPH_BENCHMARK_RUNS)
Conduct a number of benchmark runs over processing the Graph synchronously.
TestChainLoad< 16 > ChainLoad16
shorthand for specific parameters employed by the following tests
A collection of frequently used helper functions to support unit testing.
TestChainLoad && configureShape_chain_loadBursts()
preconfigured topology: single graph with massive »load bursts«
const StatKey STAT_JOIN
joining node
Statistic computeGraphStatistics()
Operator on TestChainLoad to evaluate current graph connectivity.
Definition of a render job.
const StatKey STAT_FORK
forking node
TestChainLoad && clearNodeHashes()
Clear node hashes and propagate seed value.
TestChainLoad && configureShape_short_segments3_interleaved()
preconfigured topology: simple interwoven 3-step graph segments
Render JobFunctor to perform chunk wise planning of Node jobs to calculate a complete Chain-Load grap...
Individual frame rendering task, forwarding to a closure.
Definition: job.h:277
Vault-Layer implementation namespace root.
auto allLevelWeights()
calculate node weights aggregated per level
TestChainLoad && performGraphSynchronously(microseconds timeBase=LOAD_DEFAULT_TIME, size_t sizeBase=0)
Emulate complete graph processing in a single threaded loop.
Collector and aggregator for performance data.
A calibratable CPU load to be invoked from a node job functor.
bool isSameObject(A const &a, B const &b)
compare plain object identity, bypassing any custom comparison operators.
Definition: util.hpp:372