76 #ifndef SRC_VAULT_GEAR_BLOCK_FLOW_H_ 77 #define SRC_VAULT_GEAR_BLOCK_FLOW_H_ 105 namespace blockFlow {
159 static const CONF configInstance;
160 return configInstance;
164 framesPerEpoch()
const 166 return config().EPOCH_SIZ / config().ACTIVITIES_PER_FRAME;
170 initialFrameRate()
const 172 return config().INITIAL_STREAMS * config().REFERENCE_FPS;
176 initialEpochStep()
const 184 return util::max(2*_raw(config().
DUTY_CYCLE) / _raw(initialEpochStep()), 2u);
188 averageEpochs()
const 190 return util::max (initialEpochCnt(), 6u);
196 return config().BOOST_FACTOR;
227 using RawIter =
typename ALO::iterator;
228 using SIZ =
typename ALO::Extent::SIZ;
255 next =
this + (Epoch::SIZ() - 1);
256 ENSURE (next !=
this);
263 return data_.condition.dead;
267 isAlive (
Time deadline)
269 return data_.condition.isHold()
270 or not data_.condition.isDead (deadline);
276 const Activity* firstAllocPoint{
this + (Epoch::SIZ()-1)};
277 return firstAllocPoint - next;
289 REQUIRE (hasFreeSlot());
296 Time deadline() {
return Time{gate().deadline()}; }
301 return double(gate().filledSlots()) / (SIZ()-1);
306 implantInto (RawIter storageSlot)
308 Epoch& target =
static_cast<Epoch&
> (*storageSlot);
314 setup (RawIter storageSlot,
Time deadline)
316 Epoch& newEpoch{implantInto (storageSlot)};
317 newEpoch.gate().deadline() = deadline;
340 template<
class CONF = blockFlow::DefaultConfig>
345 constexpr
static size_t EPOCH_SIZ = CONF::EPOCH_SIZ;
351 using Extent =
typename Allocator::Extent;
354 using Strategy::config;
365 return static_cast<Epoch&
> (extent);
375 Epoch* curr_{
nullptr};
380 return RawIter::checkPoint()? & asEpoch (RawIter::yield())
388 , curr_{accessEpoch()}
406 RawIter::validatePos(curr_);
408 curr_ = accessEpoch();
412 expandAlloc (
size_t cnt =1)
414 RawIter::expandAlloc(cnt);
415 curr_ = accessEpoch();
424 , epochStep_{Strategy::initialEpochStep()}
434 adjustEpochStep (
double factor)
436 double stretched = _raw(epochStep_) * factor;
437 gavl_time_t microTicks(floor (stretched));
470 template<
typename...ARGS>
474 return *
new(claimSlot())
Activity {std::forward<ARGS> (args)...};
477 Time currDeadline()
const {
return epoch_->deadline(); }
478 bool hasFreeSlot()
const {
return epoch_->gate().hasFreeSlot(); }
485 while (not (epoch_ and
486 epoch_->gate().hasFreeSlot()))
491 auto lastDeadline = flow_->lastEpoch().deadline();
492 epoch_.expandAlloc();
494 Epoch::setup (epoch_, lastDeadline + flow_->getEpochStep());
502 return epoch_->gate().claimNextSlot();
521 Epoch::setup (alloc_.
begin(), deadline +
Time{epochStep_});
522 return AllocatorHandle{alloc_.
begin(),
this};
526 if (firstEpoch().deadline() >= deadline)
528 return AllocatorHandle{alloc_.
begin(),
this};
530 if (lastEpoch().deadline() < deadline)
533 TimeVar lastDeadline = lastEpoch().deadline();
534 auto distance = _raw(deadline) - _raw(lastDeadline);
536 ENSURE (not nextEpoch);
537 auto requiredNew = distance / _raw(epochStep_);
538 ___sanityCheckAlloc(requiredNew);
539 if (distance % _raw(epochStep_) > 0)
541 nextEpoch.expandAlloc (requiredNew);
543 for ( ; 0 < requiredNew; --requiredNew)
546 lastDeadline += epochStep_;
547 Epoch::setup (nextEpoch, lastDeadline);
548 if (deadline <= lastDeadline)
550 ENSURE (requiredNew == 1);
551 return AllocatorHandle{nextEpoch,
this};
555 NOTREACHED (
"Logic of counting new Epochs");
559 if (epochIt->deadline() >= deadline)
560 return AllocatorHandle{epochIt,
this};
562 NOTREACHED (
"Inconsistency in BlockFlow Epoch deadline organisation");
577 or firstEpoch().deadline() > deadline)
581 for (
Epoch& epoch : allEpochs())
583 if (epoch.gate().isAlive (deadline))
586 auto currDeadline = epoch.deadline();
587 auto epochDuration = currDeadline - updatePastDeadline(currDeadline);
588 markEpochUnderflow (epochDuration, epoch.getFillFactor());
604 if (epochStep_ > _cache_timeStep_cutOff)
605 adjustEpochStep (_cache_boostFactorOverflow);
624 auto interpolate = [&](
auto f,
auto v1,
auto v2) {
return f*v2 + (1-f)*v1; };
627 fillFactor /= config().TARGET_FILL;
628 auto THRESH = config().DAMP_THRESHOLD;
630 fillFactor > THRESH? fillFactor
631 : interpolate (1 - fillFactor/THRESH, fillFactor, Strategy::boostFactor());
634 double contribution = double(_raw(actualLen)) / _raw(epochStep_) / adjust;
637 auto N = Strategy::averageEpochs();
638 double avgFactor = (contribution + N-1) / N;
639 adjustEpochStep (avgFactor);
655 currFps += additionalFps;
656 TimeVar adaptedSpacing = Strategy::framesPerEpoch() / currFps;
657 epochStep_ = util::max (adaptedSpacing, _cache_timeStep_cutOff);
665 REQUIRE (not isnil (alloc_));
666 return asEpoch(*alloc_.
begin());
671 REQUIRE (not isnil (alloc_));
672 return asEpoch(*alloc_.
last());
678 return alloc_.
begin();
692 pastDeadline_ = newDeadline - epochStep_;
693 TimeVar previous = pastDeadline_;
694 pastDeadline_ = newDeadline;
700 ___sanityCheckAlloc (
size_t newBlockCnt)
702 if (newBlockCnt > blockFlow::BLOCK_EXPAND_SAFETY_LIMIT)
703 throw err::Fatal{
"Deadline expansion causes allocation of " 704 +util::showSize(newBlockCnt) +
"blocks > " 705 +util::showSize(blockFlow::BLOCK_EXPAND_SAFETY_LIMIT)
706 , err::LUMIERA_ERROR_CAPACITY};
735 Time first() {
return flow_.firstEpoch().deadline();}
736 Time last() {
return flow_.lastEpoch().deadline(); }
737 size_t cntEpochs() {
return watch(flow_.alloc_).active(); }
738 size_t poolSize() {
return watch(flow_.alloc_).size(); }
744 for (Epoch& epoch : flow_.allEpochs())
746 if (util::isSameObject (act, someActivity))
747 return epoch.deadline();
755 if (isnil (flow_.alloc_))
return "";
757 .transform([](Epoch& a){
return TimeValue{a.deadline()}; });
758 return util::join(deadlines,
"|");
766 for (
auto& epoch : flow_.allEpochs())
767 cnt += epoch.gate().filledSlots();
static const Time ANYTIME
border condition marker value. ANYTIME <= any time value
Allocation scheme for the Scheduler, based on Epoch(s).
a mutable time value, behaving like a plain number, allowing copy and re-accessing ...
Record to describe an Activity, to happen within the Scheduler's control flow.
iterator begin()
iterate over all the currently active Extents
< special definitions for the Scheduler activity language
specifically rigged GATE Activity, used for managing Epoch metadata
const Duration DUTY_CYCLE
typical relaxation time or average pre-roll to deadline
auto explore(IT &&srcSeq)
start building a IterExplorer by suitably wrapping the given iterable source.
void openNew(size_t cnt=1)
claim next cnt extents, possibly allocate.
Policy template to mix into the BlockFlow allocator, providing the parametrisation for self-regulatio...
Framerate specified as frames per second.
const size_t OVERLOAD_LIMIT
load factor over normal use where to assume saturation and limit throughput
const size_t BLOCK_EXPAND_SAFETY_LIMIT
< Parametrisation of Scheduler memory management scheme
Any copy and copy construction prohibited.
iterator last()
positioned to the last / latest storage extent opened
Allocation Extent holding scheduler Activities to be performed altogether before a common deadline...
static const gavl_time_t SCALE
Number of micro ticks (µs) per second as basic time scale.
Local handle to allow allocating a collection of Activities, all sharing a common deadline...
void markEpochOverflow()
Notify and adjust Epoch capacity as consequence of exhausting an Epoch.
Time updatePastDeadline(TimeVar newDeadline)
Lumiera's internal time value datatype.
const double BOOST_FACTOR
adjust capacity by this factor on Epoch overflow/underflow events
Lightweight yet safe parametrisation of memory management.
TimeValue find(Activity &someActivity)
find out in which Epoch the given Activity was placed
Derived specific exceptions within Lumiera's exception hierarchy.
static const size_t EPOCH_SIZ
Number of storage slots to fit into one »Epoch«
const double DAMP_THRESHOLD
do not account for (almost) empty Epochs to avoid overshooting regulation
Mix-Ins to allow or prohibit various degrees of copying and cloning.
const size_t REFERENCE_FPS
frame rate to use as reference point to relate DUTY_CYCLE and default counts
void dropOld(size_t cnt)
discard oldest cnt extents
void announceAdditionalFlow(FrameRate additionalFps)
provide a hint to the self-regulating allocation scheme.
Tiny helper functions and shortcuts to be used everywhere Consider this header to be effectively incl...
Parametrisation tuned for Render Engine performance.
Activity & create(ARGS &&...args)
Main API operation: allocate a new Activity record.
const double TARGET_FILL
aim at using this fraction of Epoch space on average (slightly below 100%)
Duration timeStep_cutOff() const
< prevent stalling Epoch progression when reaching saturation
boost::rational< int64_t > FSecs
rational representation of fractional seconds
static Epoch & asEpoch(Extent &extent)
void markEpochUnderflow(TimeVar actualLen, double fillFactor)
On clean-up of past Epochs, the actual fill factor is checked to guess an Epoch duration to make opti...
Memory management scheme for cyclically used memory extents.
size_t initialEpochCnt() const
< reserve allocation headroom for two duty cycles
Basic set of definitions and includes commonly used together (Vault).
const size_t ACTIVITIES_PER_FRAME
how many Activity records are typically used to implement a single frame
static const Time NEVER
border condition marker value. NEVER >= any time value
Adapt the access to the raw storage to present the Extents as Epoch; also caches the address resoluti...
auto setup(FUN &&workFun)
Helper: setup a Worker-Pool configuration for the test.
Decorator-Adapter to make a »*State Core*« iterable as Lumiera Forward Iterator.
Duration is the internal Lumiera time metric.
void discardBefore(Time deadline)
Clean-up all storage related to activities before the given deadline.
const size_t INITIAL_STREAMS
Number of streams with REFERENCE_FPS to expect for normal use.
Building tree expanding and backtracking evaluations within hierarchical scopes.
double boostFactorOverflow() const
< reduced logarithmically, since overflow is detected on individual allocations
a family of time value like entities and their relationships.
basic constant internal time value.
size_t cntElm()
count all currently active allocated elements
Vault-Layer implementation namespace root.
std::string allEpochs()
render deadlines of all currently active Epochs
AllocatorHandle until(Time deadline)
initiate allocations for activities to happen until some deadline
Descriptor for a piece of operational logic performed by the scheduler.