Lumiera 0.pre.04
»edit your freedom«
Loading...
Searching...
No Matches
block-flow.hpp
Go to the documentation of this file.
1/*
2 BLOCK-FLOW.hpp - specialised custom allocator to manage scheduler data
3
4 Copyright (C)
5 2023, Hermann Vosseler <Ichthyostega@web.de>
6
7  **Lumiera** is free software; you can redistribute it and/or modify it
8  under the terms of the GNU General Public License as published by the
9  Free Software Foundation; either version 2 of the License, or (at your
10  option) any later version. See the file COPYING for further details.
11
12*/
13
14
76#ifndef SRC_VAULT_GEAR_BLOCK_FLOW_H_
77#define SRC_VAULT_GEAR_BLOCK_FLOW_H_
78
79
80#include "vault/common.hpp"
84#include "lib/iter-explorer.hpp"
85#include "lib/format-util.hpp"
86#include "lib/nocopy.hpp"
87#include "lib/util.hpp"
88
89#include <utility>
90
91
92namespace vault{
93namespace gear {
94
95 using util::isnil;
96 using lib::time::Time;
97 using lib::time::FSecs;
101
102 namespace err = lumiera::error;
103
104
105 namespace blockFlow {
106
114 const size_t BLOCK_EXPAND_SAFETY_LIMIT = 3000;
115
116
122 {
123 /* === characteristic parameters === */
124 const static size_t EPOCH_SIZ = 100;
125 const Duration DUTY_CYCLE{FSecs(1)};
126 const size_t INITIAL_STREAMS = 2;
127
128 /* === algorithm tuning settings === */
129 const double TARGET_FILL = 0.90;
130 const double BOOST_FACTOR = 0.85;
131 const double DAMP_THRESHOLD = 0.08;
132
133 /* === contextual assumptions === */
134 const size_t ACTIVITIES_PER_FRAME = 10;
135 const size_t REFERENCE_FPS = 25;
136 const size_t OVERLOAD_LIMIT = 60;
137 };
138
144 {
145 const static size_t EPOCH_SIZ = 500;
146 const size_t INITIAL_STREAMS = 5;
147 };
148
153 template<class CONF>
154 struct Strategy
155 {
156 CONF const&
157 config() const
158 { // Meyers Singleton
159 static const CONF configInstance;
160 return configInstance;
161 }
162
163 size_t
165 {
166 return config().EPOCH_SIZ / config().ACTIVITIES_PER_FRAME;
167 }
168
169 size_t
171 {
172 return config().INITIAL_STREAMS * config().REFERENCE_FPS;
173 }
174
180
181 size_t
183 {
184 return util::max(2*_raw(config().DUTY_CYCLE) / _raw(initialEpochStep()), 2u);
185 }
186
187 size_t
189 {
190 return util::max (initialEpochCnt(), 6u);
191 }
192
193 double
195 {
196 return config().BOOST_FACTOR;
197 }
198
199 double
201 {
202 return pow(config().BOOST_FACTOR, 5.0/config().EPOCH_SIZ);
203 }
204
207 {
208 return Duration{TimeValue(_raw(initialEpochStep()) / config().OVERLOAD_LIMIT)};
209 }
210 };
211
212
213
214
223 template<class ALO>
224 class Epoch
225 : public ALO::Extent
226 {
227 using RawIter = ALO::iterator;
228 using SIZ = ALO::Extent::SIZ;
229
231 Epoch() = delete;
232
233 public:
242 : Activity
243 {
252 : Activity{int(0), Time::ANYTIME}
253 {
254 // initialise allocation usage marker: start at last usable slot
255 next = this + (Epoch::SIZ() - 1);
256 ENSURE (next != this);
257 }
258 // default copyable
259
262 {
263 return data_.condition.dead;
264 }
265
266 bool
268 {
269 return data_.condition.isHold() // NOTE: expected callback keeps alive
271 }
272
273 size_t
275 {
276 const Activity* firstAllocPoint{this + (Epoch::SIZ()-1)};
277 return firstAllocPoint - next;
278 }
279
280 bool
282 { // see C++ § 5.9 : comparison of pointers within same array
283 return next > this;
284 }
285
286 Activity*
288 {
289 REQUIRE (hasFreeSlot());
290 return next--;
291 }
292 };
293
294
295 EpochGate& gate() { return static_cast<EpochGate&> ((*this)[0]); }
296 Time deadline() { return Time{gate().deadline()}; }
297
298 double
300 {
301 return double(gate().filledSlots()) / (SIZ()-1);
302 }
303
304
305 static Epoch&
306 implantInto (RawIter storageSlot)
307 {
308 Epoch& target = static_cast<Epoch&> (*storageSlot);
309 new(&target[0]) EpochGate{};
310 return target;
311 }
312
313 static Epoch&
314 setup (RawIter storageSlot, Time deadline)
315 {
316 Epoch& newEpoch{implantInto (storageSlot)};
317 newEpoch.gate().deadline() = deadline;
318 return newEpoch;
319 }
320 };
321
322
323 }//(End)namespace blockFlow
324
325 template<class CONF>
326 class FlowDiagnostic;
327
328
329
330
331 /******************************************************/
340 template<class CONF = blockFlow::DefaultConfig>
342 : public blockFlow::Strategy<CONF>
344 {
345 constexpr static size_t EPOCH_SIZ = CONF::EPOCH_SIZ;
346
347 public:
351 using Extent = Allocator::Extent;
353
355
356 private:
359
360
362 static Epoch&
363 asEpoch (Extent& extent)
364 {
365 return static_cast<Epoch&> (extent);
366 }
367
373 : public RawIter
374 {
375 Epoch* curr_{nullptr};
376
377 Epoch*
379 {
380 return RawIter::checkPoint()? & asEpoch (RawIter::yield())
381 : nullptr;
382 }
383
384 public:
385 StorageAdaptor() = default;
387 : RawIter{it}
388 , curr_{accessEpoch()}
389 { }
390
391 bool
393 {
394 return bool(curr_);
395 }
396
397 Epoch&
398 yield() const
399 {
400 return *curr_;
401 }
402
403 void
405 {
406 RawIter::validatePos(curr_);
407 RawIter::iterNext();
408 curr_ = accessEpoch();
409 }
410
411 void
412 expandAlloc (size_t cnt =1)
413 {
414 RawIter::expandAlloc(cnt);
415 curr_ = accessEpoch();
416 }
417 };
418
419
420
421 public:
426
429 {
430 return Duration{epochStep_};
431 }
432
433 void
434 adjustEpochStep (double factor)
435 {
436 double stretched = _raw(epochStep_) * factor;
437 raw_time_64 microTicks(floor (stretched));
438 epochStep_ = TimeValue{microTicks};
439 }
440
441
442
445
446
457 {
460
461 public:
463 : epoch_{slot}
464 , flow_{parent}
465 { }
466
467 /*************************************************/
470 template<typename...ARGS>
471 Activity&
472 create (ARGS&& ...args)
473 {
474 return *new(claimSlot()) Activity {std::forward<ARGS> (args)...};
475 }
476
477 Time currDeadline() const { return epoch_->deadline(); }
478 bool hasFreeSlot() const { return epoch_->gate().hasFreeSlot(); }
479
480
481 private:
482 void*
484 {
485 while (not (epoch_ and
486 epoch_->gate().hasFreeSlot()))
487 // Epoch overflow...
488 {// shift to following Epoch; possibly allocate
489 if (not epoch_)
490 {
491 auto lastDeadline = flow_->lastEpoch().deadline();
492 epoch_.expandAlloc(); // may throw out-of-memory..
493 ENSURE (epoch_);
494 Epoch::setup (epoch_, lastDeadline + flow_->getEpochStep());
495 }
496 else
497 {
499 ++epoch_;
500 }
501 }
502 return epoch_->gate().claimNextSlot();
503 }
504 };
505
506
507 /* ===== public BlockFlow API ===== */
508
515 AllocatorHandle
516 until (Time deadline)
517 {
518 if (isnil (alloc_))
519 {//just create new Epoch one epochStep ahead
520 alloc_.openNew();
521 Epoch::setup (alloc_.begin(), deadline + Time{epochStep_});
522 return AllocatorHandle{alloc_.begin(), this};
523 }
524 else
525 {//find out how the given time relates to existing Epochs
526 if (firstEpoch().deadline() >= deadline)
527 // way into the past ... put it in the first available Epoch
528 return AllocatorHandle{alloc_.begin(), this};
529 else
530 if (lastEpoch().deadline() < deadline)
531 { // a deadline beyond the established Epochs...
532 // create a grid of new epochs up to the requested point
533 TimeVar lastDeadline = lastEpoch().deadline();
534 auto distance = _raw(deadline) - _raw(lastDeadline);
535 EpochIter nextEpoch{alloc_.end()};
536 ENSURE (not nextEpoch); // not valid yet, but we will allocate starting there...
537 auto requiredNew = distance / _raw(epochStep_);
538 ___sanityCheckAlloc(requiredNew);
539 if (distance % _raw(epochStep_) > 0)
540 ++requiredNew; // fractional: requested deadline lies within last epoch
541 nextEpoch.expandAlloc (requiredNew);
542 // nextEpoch now points to the first new Epoch
543 for ( ; 0 < requiredNew; --requiredNew)
544 {
545 REQUIRE (nextEpoch);
546 lastDeadline += epochStep_;
547 Epoch::setup (nextEpoch, lastDeadline);
548 if (deadline <= lastDeadline)
549 {
550 ENSURE (requiredNew == 1);
551 return AllocatorHandle{nextEpoch, this};
552 } // break out and return handle to allocate into the matching Epoch
553 ++nextEpoch;
554 }
555 NOTREACHED ("Logic of counting new Epochs");
556 }
557 else
558 for (EpochIter epochIt{alloc_.begin()}; epochIt; ++epochIt)
559 if (epochIt->deadline() >= deadline)
560 return AllocatorHandle{epochIt, this};
561
562 NOTREACHED ("Inconsistency in BlockFlow Epoch deadline organisation");
563 }
564 }
565
573 void
575 {
576 if (isnil (alloc_)
577 or firstEpoch().deadline() > deadline)
578 return;
579
580 size_t toDiscard{0};
581 for (Epoch& epoch : allEpochs())
582 {
583 if (epoch.gate().isAlive (deadline))
584 break;
585 ++toDiscard;
586 auto currDeadline = epoch.deadline();
587 auto epochDuration = currDeadline - updatePastDeadline(currDeadline);
588 markEpochUnderflow (epochDuration, epoch.getFillFactor());
589 }
590 // ask to discard the enumerated Extents
591 alloc_.dropOld (toDiscard);
592 }
593
594
601 void
607 // caching access to the config saves 15-30% per allocation
610
621 void
622 markEpochUnderflow (TimeVar actualLen, double fillFactor)
623 {
624 auto interpolate = [&](auto f, auto v1, auto v2) { return f*v2 + (1-f)*v1; };
625
626 // use actual fill as signal, set desired fill-level as goal
627 fillFactor /= config().TARGET_FILL;
628 auto THRESH = config().DAMP_THRESHOLD;
629 double adjust =
630 fillFactor > THRESH? fillFactor // limit signal for almost empty Epochs to avoid overshooting
631 : interpolate (1 - fillFactor/THRESH, fillFactor, Strategy::boostFactor());
632
633 // damped adjustment towards ideal size
634 double contribution = double(_raw(actualLen)) / _raw(epochStep_) / adjust;
635
636 // Exponential MA: mean ≔ mean · (N-1)/N + newVal/N
637 auto N = Strategy::averageEpochs();
638 double avgFactor = (contribution + N-1) / N; // contribution = newVal / mean => can extract factor
639 adjustEpochStep (avgFactor);
640 }
641
642
651 void
653 {
655 currFps += additionalFps;
656 TimeVar adaptedSpacing = Strategy::framesPerEpoch() / currFps;
657 epochStep_ = util::max (adaptedSpacing, _cache_timeStep_cutOff);
658 }
659
660
661 private:
662 Epoch&
664 {
665 REQUIRE (not isnil (alloc_));
666 return asEpoch(*alloc_.begin());
667 }
668 Epoch&
670 {
671 REQUIRE (not isnil (alloc_));
672 return asEpoch(*alloc_.last());
673 }
674
677 {
678 return alloc_.begin();
679 }
680
688 Time
690 {
692 pastDeadline_ = newDeadline - epochStep_;
693 TimeVar previous = pastDeadline_;
694 pastDeadline_ = newDeadline;
695 return previous;
696 }
698
699 void
700 ___sanityCheckAlloc (size_t newBlockCnt)
701 {
702 if (newBlockCnt > blockFlow::BLOCK_EXPAND_SAFETY_LIMIT)
703 throw err::Fatal{"Deadline expansion causes allocation of "
704 +util::showSize(newBlockCnt) +"blocks > "
706 , err::LUMIERA_ERROR_CAPACITY};
707 }
708
709
711 friend class FlowDiagnostic<CONF>;
712 };
713
714
715
716
717
718
719
720
721 /* ===== Test / Diagnostic ===== */
722
723 template<class CONF>
725 {
727
729
730 public:
732 : flow_{theFlow}
733 { }
734
736 Time last() { return flow_.lastEpoch().deadline(); }
737 size_t cntEpochs() { return watch(flow_.alloc_).active(); }
738 size_t poolSize() { return watch(flow_.alloc_).size(); }
739
742 find (Activity& someActivity)
743 {
744 for (Epoch& epoch : flow_.allEpochs())
745 for (Activity& act : epoch)
746 if (util::isSameObject (act, someActivity))
747 return epoch.deadline();
748 return Time::NEVER;
749 }
750
752 std::string
754 {
755 if (isnil (flow_.alloc_)) return "";
756 auto deadlines = lib::explore (flow_.allEpochs())
757 .transform([](Epoch& a){ return TimeValue{a.deadline()}; });
758 return util::join(deadlines, "|");
759 }
760
762 size_t
764 {
765 size_t cnt{0};
766 for (auto& epoch : flow_.allEpochs())
767 cnt += epoch.gate().filledSlots();
768 return cnt;
769 }
770 };
771
772 template<class CONF>
773 inline FlowDiagnostic<CONF>
775 {
776 return FlowDiagnostic{theFlow};
777 }
778
779
780
781}} // namespace vault::gear
782#endif /*SRC_VAULT_GEAR_BLOCK_FLOW_H_*/
Descriptor for a piece of operational logic performed by the scheduler.
Duration is the internal Lumiera time metric.
Framerate specified as frames per second.
basic constant internal time value.
static const raw_time_64 SCALE
Number of micro ticks (µs) per second as basic time scale.
a mutable time value, behaving like a plain number, allowing copy and re-accessing
Lumiera's internal time value datatype.
static const Time NEVER
border condition marker value. NEVER >= any time value
static const Time ANYTIME
border condition marker value. ANYTIME <= any time value
Derived specific exceptions within Lumiera's exception hierarchy.
Definition error.hpp:193
Any copy and copy construction prohibited.
Definition nocopy.hpp:38
Record to describe an Activity, to happen within the Scheduler's control flow.
Definition activity.hpp:227
Activity * next
Activities are organised into chains to represent relations based on verbs.
Definition activity.hpp:249
Local handle to allow allocating a collection of Activities, all sharing a common deadline.
Activity & create(ARGS &&...args)
Main API operation: allocate a new Activity record.
AllocatorHandle(RawIter slot, BlockFlow *parent)
Adapt the access to the raw storage to present the Extents as Epoch; also caches the address resoluti...
Allocation scheme for the Scheduler, based on Epoch(s).
blockFlow::Epoch< Allocator > Epoch
void adjustEpochStep(double factor)
void markEpochOverflow()
Notify and adjust Epoch capacity as consequence of exhausting an Epoch.
CONF const & config() const
static constexpr size_t EPOCH_SIZ
void markEpochUnderflow(TimeVar actualLen, double fillFactor)
On clean-up of past Epochs, the actual fill factor is checked to guess an Epoch duration to make opti...
Allocator::iterator RawIter
lib::IterableDecorator< StorageAdaptor > EpochIter
Adapted storage-Extent iterator, directly exposing Epoch&.
void ___sanityCheckAlloc(size_t newBlockCnt)
static Epoch & asEpoch(Extent &extent)
Duration getEpochStep() const
void discardBefore(Time deadline)
Clean-up all storage related to activities before the given deadline.
Allocator::Extent Extent
AllocatorHandle until(Time deadline)
initiate allocations for activities to happen until some deadline
Time updatePastDeadline(TimeVar newDeadline)
void announceAdditionalFlow(FrameRate additionalFps)
provide a hint to the self-regulating allocation scheme.
std::string allEpochs()
render deadlines of all currently active Epochs
size_t cntElm()
count all currently active allocated elements
BlockFlow< CONF > & flow_
BlockFlow< CONF >::Epoch Epoch
FlowDiagnostic(BlockFlow< CONF > &theFlow)
TimeValue find(Activity &someActivity)
find out in which Epoch the given Activity was placed
< special definitions for the Scheduler activity language
Definition activity.hpp:116
Allocation Extent holding scheduler Activities to be performed altogether before a common deadline.
static Epoch & setup(RawIter storageSlot, Time deadline)
static Epoch & implantInto(RawIter storageSlot)
iterator last()
positioned to the last / latest storage extent opened
void openNew(size_t cnt=1)
claim next cnt extents, possibly allocate.
void dropOld(size_t cnt)
discard oldest cnt extents
iterator begin()
iterate over all the currently active Extents
lib::IterableDecorator< IdxLink > iterator
allow transparent iteration of Extents, with the ability to expand storage
Memory management scheme for cyclically used memory extents.
Collection of small helpers and convenience shortcuts for diagnostics & formatting.
Building tree expanding and backtracking evaluations within hierarchical scopes.
int64_t raw_time_64
Definition job.h:50
boost::rational< int64_t > FSecs
rational representation of fractional seconds
auto explore(IT &&srcSeq)
start building a IterExplorer by suitably wrapping the given iterable source.
bool isSameObject(A const &a, B const &b)
compare plain object identity, based directly on the referee's memory identities.
Definition util.hpp:421
auto max(IT &&elms)
string showSize(size_t val) noexcept
string join(COLL &&coll, string const &delim=", ")
enumerate a collection's contents, separated by delimiter.
bool isnil(lib::time::Duration const &dur)
const size_t BLOCK_EXPAND_SAFETY_LIMIT
< Parametrisation of Scheduler memory management scheme
FlowDiagnostic< CONF > watch(BlockFlow< CONF > &theFlow)
Vault-Layer implementation namespace root.
Mix-Ins to allow or prohibit various degrees of copying and cloning.
Instant dead
alive while time < dead
Definition activity.hpp:279
bool isDead(Time now) const
Definition activity.hpp:281
Lightweight yet safe parametrisation of memory management.
const size_t ACTIVITIES_PER_FRAME
how many Activity records are typically used to implement a single frame
const Duration DUTY_CYCLE
typical relaxation time or average pre-roll to deadline
const double TARGET_FILL
aim at using this fraction of Epoch space on average (slightly below 100%)
const size_t INITIAL_STREAMS
Number of streams with REFERENCE_FPS to expect for normal use.
const size_t REFERENCE_FPS
frame rate to use as reference point to relate DUTY_CYCLE and default counts
const double BOOST_FACTOR
adjust capacity by this factor on Epoch overflow/underflow events
const size_t OVERLOAD_LIMIT
load factor over normal use where to assume saturation and limit throughput
static const size_t EPOCH_SIZ
Number of storage slots to fit into one »Epoch«
const double DAMP_THRESHOLD
do not account for (almost) empty Epochs to avoid overshooting regulation
specifically rigged GATE Activity, used for managing Epoch metadata
Parametrisation tuned for Render Engine performance.
Policy template to mix into the BlockFlow allocator, providing the parametrisation for self-regulatio...
size_t initialEpochCnt() const
< reserve allocation headroom for two duty cycles
double boostFactorOverflow() const
< reduced logarithmically, since overflow is detected on individual allocations
Duration timeStep_cutOff() const
< prevent stalling Epoch progression when reaching saturation
a family of time value like entities and their relationships.
Tiny helper functions and shortcuts to be used everywhere Consider this header to be effectively incl...
Basic set of definitions and includes commonly used together (Vault).