91 #ifndef LIB_SEVERAL_BUILDER_H 92 #define LIB_SEVERAL_BUILDER_H 102 #include <type_traits> 103 #include <functional> 128 using util::positiveDiff;
129 using std::is_nothrow_move_constructible_v;
130 using std::is_trivially_move_constructible_v;
131 using std::is_trivially_destructible_v;
132 using std::has_virtual_destructor_v;
133 using std::is_trivially_copyable_v;
134 using std::is_copy_constructible_v;
135 using std::is_object_v;
136 using std::is_volatile_v;
137 using std::is_const_v;
138 using std::is_same_v;
148 template<
typename TY>
149 size_t inline constexpr
152 size_t quant =
alignof(TY);
153 size_t siz = max (
sizeof(TY), quant);
154 size_t req = (siz/quant) * quant;
161 size_t inline constexpr
164 return positiveDiff (alignment,
alignof(
void*));
178 template<
class I,
template<
typename>
class ALO>
180 :
protected ALO<std::byte>
182 using Allo = ALO<std::byte>;
183 using AlloT = std::allocator_traits<Allo>;
184 using Bucket = ArrayBucket<I>;
186 Allo& baseAllocator() {
return *
this; }
192 using XAllo =
typename AlloT::template rebind_alloc<X>;
193 if constexpr (std::is_constructible_v<XAllo, Allo>)
194 return XAllo{baseAllocator()};
201 : Allo{std::move (allo)}
210 template<
typename O,
template<
typename>
class XALO>
215 create (
size_t cnt,
size_t spread,
size_t alignment =
alignof(I))
219 size_t storageBytes = Bucket::storageOffset + cnt*spread;
220 storageBytes +=
alignRes (alignment);
222 std::byte* loc = AlloT::allocate (baseAllocator(), storageBytes);
223 ENSURE (0 ==
size_t(loc) %
alignof(
void*));
225 size_t offset = (size_t(loc) + Bucket::storageOffset) % alignment;
227 offset = alignment - offset;
228 offset += Bucket::storageOffset;
229 ASSERT (storageBytes - offset >= cnt*spread);
230 Bucket* bucket =
reinterpret_cast<Bucket*
> (loc);
232 using BucketAlloT =
typename AlloT::template rebind_traits<Bucket>;
233 auto bucketAllo = adaptAllocator<Bucket>();
235 try { BucketAlloT::construct (bucketAllo, bucket, storageBytes, offset, spread); }
238 AlloT::deallocate (baseAllocator(), loc, storageBytes);
245 template<
class E,
typename...ARGS>
247 createAt (Bucket* bucket,
size_t idx, ARGS&& ...args)
250 using ElmAlloT =
typename AlloT::template rebind_traits<E>;
251 auto elmAllo = adaptAllocator<E>();
252 E* loc =
reinterpret_cast<E*
> (& bucket->subscript (idx));
253 ElmAlloT::construct (elmAllo, loc, forward<ARGS> (args)...);
261 destroy (ArrayBucket<I>* bucket)
264 if (bucket->isArmed())
270 if (not is_trivially_destructible_v<E>)
272 size_t cnt = bucket->cnt;
273 using ElmAlloT =
typename AlloT::template rebind_traits<E>;
274 auto elmAllo = adaptAllocator<E>();
275 for (
size_t idx=0; idx<cnt; ++idx)
277 E* elm =
reinterpret_cast<E*
> (& bucket->subscript (idx));
278 ElmAlloT::destroy (elmAllo, elm);
281 size_t storageBytes = bucket->getAllocSize();
282 std::byte* loc =
reinterpret_cast<std::byte*
> (bucket);
283 AlloT::deallocate (baseAllocator(), loc, storageBytes);
294 template<
class I,
class E,
template<
typename>
class ALO>
299 using Bucket = ArrayBucket<I>;
304 size_t static constexpr ALLOC_LIMIT = size_t(-1) /
sizeof(E);
310 realloc (Bucket* data,
size_t cnt,
size_t spread)
312 Bucket* newBucket = Fac::create (cnt, spread,
alignof(E));
315 newBucket->installDestructor (data->getDtor());
316 size_t elms = min (cnt, data->cnt);
317 for (
size_t idx=0; idx<elms; ++idx)
318 moveElem(idx, data, newBucket);
322 { newBucket->destroy(); }
327 moveElem (
size_t idx, Bucket* src, Bucket* tar)
329 if constexpr (is_trivially_copyable_v<E>)
331 void* oldPos = & src->subscript(idx);
332 void* newPos = & tar->subscript(idx);
333 size_t amount = min (src->spread, tar->spread);
334 std::memmove (newPos, oldPos, amount);
337 if constexpr (is_nothrow_move_constructible_v<E>
338 or is_copy_constructible_v<E>)
340 E& oldElm =
reinterpret_cast<E&
> (src->subscript (idx));
341 Fac::template createAt<E> (tar, idx
342 ,std::move_if_noexcept (oldElm));
346 (void)src; (void)tar;
347 NOTREACHED(
"realloc immovable type (neither trivially nor typed movable)");
358 template<
class I,
class E>
397 using Policy = POL<I,E>;
400 using Deleter =
typename Bucket::Deleter;
406 template<
typename...ARGS,
typename =
meta::enable_if<std::is_constructible<Policy,ARGS&&...>>>
409 , Policy{forward<ARGS> (alloInit)...}
413 Policy& policyConnect() {
return *
this; }
420 template<
template<
typename>
class ALO =std::void_t
422 auto withAllocator (ARGS&& ...args);
426 template<
typename TY =E>
429 ,
size_t elmSiz =reqSiz<TY>())
431 size_t extraElm = positiveDiff (cntElm, Coll::size());
432 ensureElementCapacity<TY> (elmSiz);
433 ensureStorageCapacity<TY> (elmSiz,extraElm);
434 elmSiz = max (elmSiz, Coll::spread());
435 adjustStorage (cntElm, elmSiz);
445 if (not Coll::empty()
446 or size() < capacity())
452 template<
typename VAL,
typename...VALS>
456 emplace<VAL> (forward<VAL> (val));
457 if constexpr (0 <
sizeof...(VALS))
458 return append (forward<VALS> (vals)...);
468 explore(data).foreach ([
this](
auto it){ emplaceCopy(it); });
474 appendAll (std::initializer_list<X> ili)
476 using Val =
typename meta::Strip<X>::TypeReferred;
477 for (Val
const& x : ili)
478 emplaceNewElm<Val> (x);
483 template<
typename...ARGS>
487 for ( ; 0<cntNew; --cntNew)
488 emplaceNewElm<E> (forward<ARGS> (args)...);
493 template<
class TY,
typename...ARGS>
497 using Val =
typename meta::Strip<TY>::TypeReferred;
498 emplaceNewElm<Val> (forward<ARGS> (args)...);
514 size_t size()
const {
return Coll::size(); }
515 bool empty()
const {
return Coll::empty();}
516 size_t capacity()
const {
return Coll::storageBuffSiz() / Coll::spread(); }
517 size_t capReserve()
const {
return capacity() - size(); }
523 operator[] (
size_t idx)
525 if (idx >= Coll::size())
530 return Coll::operator[] (idx);
537 emplaceCopy (IT& dataSrc)
539 using Val =
typename IT::value_type;
540 emplaceNewElm<Val> (*dataSrc);
543 template<
class TY,
typename...ARGS>
545 emplaceNewElm (ARGS&& ...args)
547 static_assert (is_object_v<TY> and not (is_const_v<TY> or is_volatile_v<TY>));
549 probeMoveCapability<TY>();
550 ensureElementCapacity<TY>();
551 ensureStorageCapacity<TY>();
553 size_t elmSiz = reqSiz<TY>();
554 size_t newPos = Coll::size();
556 adjustStorage (newCnt, max (elmSiz, Coll::spread()));
557 ENSURE (Coll::data_);
559 Policy::template createAt<TY> (Coll::data_, newPos, forward<ARGS> (args)...);
560 Coll::data_->cnt = newPos+1;
570 Deleter deleterFunctor = selectDestructor<TY>();
571 if (Coll::data_->isArmed())
return;
572 Coll::data_->installDestructor (move (deleterFunctor));
580 if (Coll::spread() < requiredSiz and not (Coll::empty() or canWildMove()))
582 "into Several-container for element size %d."}
583 % util::typeStr<TY>() % requiredSiz % Coll::spread()};
591 if (not (Coll::empty()
592 or Coll::hasReserve (requiredSiz, newElms)
593 or Policy::canExpand (Coll::data_, requiredSiz*(Coll::size() + newElms))
595 throw err::Invalid{
_Fmt{
"Several-container is unable to accommodate further element of type %s; " 596 "storage reserve (%d bytes ≙ %d elms) exhausted and unable to move " 597 "elements of mixed unknown detail type, which are not trivially movable." }
598 % util::typeStr<TY>() % Coll::storageBuffSiz() % capacity()};
606 size_t demand{cnt*spread};
607 size_t buffSiz{Coll::storageBuffSiz()};
608 if (demand == buffSiz)
610 if (demand > buffSiz)
612 if (spread > Coll::spread())
613 cnt = max (cnt, buffSiz / Coll::spread());
614 size_t overhead =
sizeof(Bucket) +
alignRes(
alignof(E));
615 size_t safetyLim = LUMIERA_MAX_ORDINAL_NUMBER * Coll::spread();
616 size_t expandAlloc = min (positiveDiff (min (safetyLim
617 ,Policy::ALLOC_LIMIT)
619 ,max (2*buffSiz, cnt*spread));
621 size_t newCnt = expandAlloc / spread;
622 expandAlloc = newCnt * spread;
623 if (expandAlloc < demand)
625 "exceeds safety limit of %d bytes"} % safetyLim
626 ,LERR_(SAFETY_LIMIT)};
628 Coll::data_ = Policy::realloc (Coll::data_, newCnt,spread);
630 ENSURE (Coll::data_);
631 if (canWildMove() and spread != Coll::spread())
632 adjustSpread (spread);
638 REQUIRE (not Coll::empty());
639 if (not (Policy::canExpand (Coll::data_, Coll::size())
641 throw err::Invalid{
"Unable to shrink storage for Several-collection, " 642 "since at least one element can not be moved."};
643 Coll::data_ = Policy::realloc (Coll::data_, Coll::size(), Coll::spread());
650 REQUIRE (Coll::data_);
651 REQUIRE (newSpread * Coll::size() <= Coll::storageBuffSiz());
652 size_t oldSpread = Coll::spread();
653 if (newSpread > oldSpread)
655 for (
size_t i=Coll::size()-1; 0<i; --i)
656 shiftStorage (i, oldSpread, newSpread);
659 for (
size_t i=1; i<Coll::size(); ++i)
660 shiftStorage (i, oldSpread, newSpread);
662 Coll::data_->spread = newSpread;
666 shiftStorage (
size_t idx,
size_t oldSpread,
size_t newSpread)
671 REQUIRE (Coll::data_);
672 byte* oldPos = Coll::data_->storage();
673 byte* newPos = oldPos;
674 oldPos += idx * oldSpread;
675 newPos += idx * newSpread;
676 std::memmove (newPos, oldPos, util::min (oldSpread,newSpread));
683 enum DestructionMethod{ UNKNOWN
689 render (DestructionMethod m)
693 case TRIVIAL:
return "trivial";
694 case ELEMENT:
return "fixed-element-type";
695 case VIRTUAL:
return "virtual-baseclass";
697 throw err::Logic{
"unknown DestructionMethod"};
701 DestructionMethod destructor{UNKNOWN};
702 bool lock_move{
false};
715 template<
typename TY>
721 if (is_Subclass<TY,I>() and has_virtual_destructor_v<I>)
723 __ensureMark<TY> (VIRTUAL);
724 return [factory](ArrayBucket<I>* bucket){ unConst(factory).template destroy<I> (bucket); };
726 if (is_trivially_destructible_v<TY>)
728 __ensureMark<TY> (TRIVIAL);
729 return [factory](ArrayBucket<I>* bucket){ unConst(factory).template destroy<TY> (bucket); };
731 if (is_same_v<TY,E> and is_Subclass<E,I>())
733 __ensureMark<TY> (ELEMENT);
734 return [factory](ArrayBucket<I>* bucket){ unConst(factory).template destroy<E> (bucket); };
736 throw err::Invalid{
_Fmt{
"Unsupported kind of destructor for element type %s."}
737 % util::typeStr<TY>()};
740 template<
typename TY>
742 __ensureMark (DestructionMethod requiredKind)
744 if (destructor != UNKNOWN and destructor != requiredKind)
745 throw err::Invalid{
_Fmt{
"Unable to handle (%s-)destructor for element type %s, " 746 "since this container has been primed to use %s-destructors."}
748 % util::typeStr<TY>()
750 destructor = requiredKind;
757 template<
typename TY>
761 if (not (is_same_v<TY,E> or is_trivially_copyable_v<TY>))
768 return is_trivially_copyable_v<E> and not lock_move;
774 return not lock_move;
797 template<
template<
typename>
class ALO,
typename...ARGS>
801 template<
template<
typename>
class ALO>
804 template<
class I,
class E>
810 template<
template<
typename>
class ALO,
typename X>
813 template<
class I,
class E>
817 Policy (ALO<X> refAllocator)
841 template<
class I,
class E,
template<
class,
class>
class POL>
842 template<
template<
typename>
class ALO,
typename...ARGS>
847 throw err::Logic{
"lib::Several builder withAllocator() must be invoked " 848 "prior to adding any elements to the container"};
853 return BuilderWithAllo(forward<ARGS> (args)...);
867 template<
typename I,
typename E =I>
void ensureStorageCapacity(size_t requiredSiz=reqSiz< TY >(), size_t newElms=1)
ensure sufficient storage reserve for newElms or verify the ability to re-allocate ...
SeveralBuilder && appendAll(IT &&data)
append a copy of all values exposed through an iterator
SeveralBuilder && reserve(size_t cntElm=1, size_t elmSiz=reqSiz< TY >())
ensure up-front that a desired capacity is allocated
Several< I > build()
Terminal Builder: complete and lock the collection contents.
string render(DataCap const &)
void ensureElementCapacity(size_t requiredSiz=reqSiz< TY >())
ensure sufficient element capacity or the ability to adapt element spread
auto explore(IT &&srcSeq)
start building a IterExplorer by suitably wrapping the given iterable source.
SeveralBuilder && emplace(ARGS &&...args)
create a new content element within the managed storage
Metadata record placed immediately before the data storage.
inline string literal This is a marker type to indicate that
Types marked with this mix-in may be moved but not copied.
Extension point: how to configure the SeveralBuilder to use an allocator ALO, initialised by ARGS...
hard wired safety limits.
bool canExpand(Bucket *, size_t)
Extension point: able to adjust dynamically to the requested size?
A front-end for using printf-style formatting.
Abstraction interface: array-like random access by subscript.
Implementation namespace for support and library code.
SeveralBuilder< I, E > makeSeveral()
Entrance Point: start building a lib::Several instance.
Derived specific exceptions within Lumiera's exception hierarchy.
Abstraction: Fixed array of elements.
SeveralBuilder && fillElm(size_t cntNew, ARGS &&...args)
emplace a number of elements of the defined element type E
Deleter selectDestructor()
Select a suitable method for invoking the element destructors and build a λ-object to be stored as de...
Tiny helper functions and shortcuts to be used everywhere Consider this header to be effectively incl...
void adjustSpread(size_t newSpread)
move existing data to accommodate spread
Builder to create and populate a lib::Several<I>.
size_t constexpr alignRes(size_t alignment)
determine size of a reserve buffer to place with proper alignment
Lumiera error handling (C++ interface).
void probeMoveCapability()
mark that we're about to accept an otherwise unknown type, which can not be trivially moved...
SeveralBuilder && shrinkFit()
discard excess reserve capacity.
size_t constexpr reqSiz()
Helper to determine the »spread« required to hold elements of type TY in memory with proper alignment...
Building tree expanding and backtracking evaluations within hierarchical scopes.
ElementFactory(ElementFactory< X, ALO > &relatedFac)
allow cross-initialisation when using same kind of base allocator
Policy Mix-In used to adapt to the ElementFactory and Allocator.
void adjustStorage(size_t cnt, size_t spread)
possibly grow storage and re-arrange elements to accommodate desired capacity
Generic factory to manage objects within an ArrayBucket storage, delegating to a custom allocator ALO...
SeveralBuilder && append(VAL &&val, VALS &&...vals)
append copies of one or several arbitrary elements
SeveralBuilder(ARGS &&...alloInit)
start Several build using a custom allocator
void ensureDeleter()
ensure clean-up can be handled properly.
const uint INITIAL_ELM_CNT
number of storage slots to open initially; starting with an over-allocation similar to std::vector ...