mirror of
https://github.com/VSadov/Satori.git
synced 2025-06-08 03:27:04 +09:00
some renames and member reorder
This commit is contained in:
parent
fe44109c6e
commit
547d8d0ea0
12 changed files with 84 additions and 73 deletions
|
@ -46,7 +46,7 @@ public:
|
|||
return (SatoriRegion*&)this->gc_reserved_2;
|
||||
}
|
||||
|
||||
// stop allocating on all associated regions and pass them to recycler.
|
||||
// stop allocating on all associated regions and optionally detach from the context.
|
||||
void Deactivate(SatoriRecycler* recycler, bool detach);
|
||||
|
||||
private:
|
||||
|
|
|
@ -74,7 +74,6 @@ int SatoriFinalizationQueue::OverflowedGen()
|
|||
return m_overflowedGen;
|
||||
}
|
||||
|
||||
|
||||
void SatoriFinalizationQueue::SetOverflow(int generation)
|
||||
{
|
||||
if (generation > m_overflowedGen)
|
||||
|
|
|
@ -231,7 +231,7 @@ size_t SatoriGC::GetCurrentObjSize()
|
|||
uint64_t SatoriGC::GetTotalAllocatedBytes()
|
||||
{
|
||||
// monotonically increasing number produced by allocator when allocating objects.
|
||||
// threads know the number and we update the total when doing GCs
|
||||
// threads know their number and we update the total when doing GCs
|
||||
return m_heap->Recycler()->GetTotalAllocatedBytes();
|
||||
}
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ bool SatoriHeap::TryAddRegularPage(SatoriPage*& newPage)
|
|||
{
|
||||
// SYNCRONIZATION:
|
||||
// A page map update must be seen by all threads before seeing objects allocated
|
||||
// in the new page or checked barriers may consider the objects not in the heap.
|
||||
// in the new page, otherwise checked barriers may consider the objects not in the heap.
|
||||
//
|
||||
// If another thread checks if object is in heap, its read of the map element is dependent on object,
|
||||
// therefore the read will happen after the object is obtained.
|
||||
|
@ -176,7 +176,7 @@ bool SatoriHeap::TryAddRegularPage(SatoriPage*& newPage)
|
|||
}
|
||||
|
||||
// it is harder to find a contiguous space for a large page
|
||||
// also unlikely that we cross-use one if just comitted
|
||||
// also unlikely that we cross-use one if just comitted.
|
||||
// we scan ahead and claim, but do not move m_nextPageIndex
|
||||
// unless we claimed contiguously.
|
||||
SatoriPage* SatoriHeap::AddLargePage(size_t minSize)
|
||||
|
|
|
@ -95,7 +95,6 @@ void SatoriObject::DirtyCardsForContent()
|
|||
void SatoriObject::Validate()
|
||||
{
|
||||
#ifdef _DEBUG
|
||||
// _ASSERTE(this->GetReloc() == 0);
|
||||
_ASSERTE(this->Size() >= Satori::MIN_FREE_SIZE);
|
||||
|
||||
if (ContainingRegion()->IsEscapeTrackedByCurrentThread())
|
||||
|
|
|
@ -122,8 +122,9 @@ inline void SatoriObject::UnSuppressFinalization()
|
|||
}
|
||||
|
||||
//
|
||||
// Implementation note on mark overflow and relocation - we could use temporary maps,
|
||||
// but we will use unused bits in the syncblock instead.
|
||||
// Implementation note on mark overflow and relocation:
|
||||
// we could use temporary maps (we would have to on 32bit),
|
||||
// but on 64bit we will use unused bits in the syncblock instead.
|
||||
//
|
||||
|
||||
inline int32_t SatoriObject::GetNextInLocalMarkStack()
|
||||
|
@ -180,6 +181,7 @@ inline int SatoriObject::GetMarkBitAndWord(size_t* bitmapIndex)
|
|||
return (start >> 3) & 63; // % bits in a word
|
||||
}
|
||||
|
||||
// used by pinned allocations
|
||||
inline void SatoriObject::SetUnmovable()
|
||||
{
|
||||
((DWORD*)this)[-1] |= BIT_SBLK_GC_RESERVE;
|
||||
|
@ -212,9 +214,9 @@ inline void SatoriObject::ForEachObjectRef(F lambda, bool includeCollectibleAllo
|
|||
CGCDescSeries* cur = map->GetHighestSeries();
|
||||
|
||||
// GetNumSeries is actually signed.
|
||||
// Negative value means the pattern repeats -cnt times such as in a case of arrays
|
||||
ptrdiff_t cnt = (ptrdiff_t)map->GetNumSeries();
|
||||
if (cnt >= 0)
|
||||
// Negative value means the pattern repeats -numSeries times such as in a case of arrays
|
||||
ptrdiff_t numSeries = (ptrdiff_t)map->GetNumSeries();
|
||||
if (numSeries >= 0)
|
||||
{
|
||||
CGCDescSeries* last = map->GetLowestSeries();
|
||||
|
||||
|
@ -248,7 +250,7 @@ inline void SatoriObject::ForEachObjectRef(F lambda, bool includeCollectibleAllo
|
|||
uint32_t componentNum = ((ArrayBase*)this)->GetNumComponents();
|
||||
while (componentNum-- > 0)
|
||||
{
|
||||
for (ptrdiff_t i = 0; i > cnt; i--)
|
||||
for (ptrdiff_t i = 0; i > numSeries; i--)
|
||||
{
|
||||
val_serie_item item = cur->val_serie[i];
|
||||
size_t refPtrStop = refPtr + item.nptrs * sizeof(size_t);
|
||||
|
@ -272,8 +274,9 @@ inline void SatoriObject::ForEachObjectRef(F lambda, size_t start, size_t end)
|
|||
if (start <= Start() && mt->Collectible())
|
||||
{
|
||||
uint8_t* loaderAllocator = GCToEEInterface::GetLoaderAllocatorObjectForGC(this);
|
||||
// NB: Allocator ref location is fake. The actual location is a handle).
|
||||
// it is ok to "update" the ref, but it will have no effect
|
||||
// NB: Allocator ref location is fake. The allocator is accessed via a handle indirection.
|
||||
// It is ok to "update" the ref, but it will have no effect.
|
||||
// The real update is when the handle is updated, which should happen separately.
|
||||
lambda((SatoriObject**)&loaderAllocator);
|
||||
}
|
||||
|
||||
|
@ -286,7 +289,7 @@ inline void SatoriObject::ForEachObjectRef(F lambda, size_t start, size_t end)
|
|||
CGCDescSeries* cur = map->GetHighestSeries();
|
||||
|
||||
// GetNumSeries is actually signed.
|
||||
// Negative value means the pattern repeats -cnt times such as in a case of arrays
|
||||
// Negative value means the pattern repeats -numSeries times such as in a case of arrays
|
||||
ptrdiff_t cnt = (ptrdiff_t)map->GetNumSeries();
|
||||
if (cnt >= 0)
|
||||
{
|
||||
|
|
|
@ -101,7 +101,7 @@ SatoriRegion* SatoriPage::MakeInitialRegion()
|
|||
return SatoriRegion::InitializeAt(this, m_firstRegion, m_end - m_firstRegion, m_initialCommit, used);
|
||||
}
|
||||
|
||||
void SatoriPage::RegionInitialized(SatoriRegion* region)
|
||||
void SatoriPage::OnRegionInitialized(SatoriRegion* region)
|
||||
{
|
||||
_ASSERTE((size_t)region > Start() && (size_t)region < End());
|
||||
size_t startIndex = (region->Start() - Start()) >> Satori::REGION_BITS;
|
||||
|
@ -191,7 +191,8 @@ void SatoriPage::SetCardForAddress(size_t address)
|
|||
}
|
||||
}
|
||||
|
||||
// does not set card groups and page state
|
||||
// only set cards, does not set card groups and page state
|
||||
// used in card refreshing
|
||||
void SatoriPage::SetCardForAddressOnly(size_t address)
|
||||
{
|
||||
size_t offset = address - Start();
|
||||
|
@ -290,7 +291,7 @@ void SatoriPage::DirtyCardsForRange(size_t start, size_t end)
|
|||
memset((void*)(m_cardTable + firstCard), Satori::CardState::DIRTY, lastCard - firstCard + 1);
|
||||
|
||||
// dirtying can be concurrent with cleaning, so we must ensure order
|
||||
// of writes - cards, then groups, then page
|
||||
// of writes: cards, then groups, then page.
|
||||
// cleaning will read in the opposite order
|
||||
VolatileStoreBarrier();
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
class SatoriHeap;
|
||||
class SatoriRegion;
|
||||
|
||||
// The Page is a memory reservation unit. Also manages cards.
|
||||
class SatoriPage
|
||||
{
|
||||
public:
|
||||
|
@ -43,7 +44,7 @@ public:
|
|||
static SatoriPage* InitializeAt(size_t address, size_t pageSize, SatoriHeap* heap);
|
||||
SatoriRegion* MakeInitialRegion();
|
||||
|
||||
void RegionInitialized(SatoriRegion* region);
|
||||
void OnRegionInitialized(SatoriRegion* region);
|
||||
|
||||
SatoriRegion* RegionForAddressChecked(size_t address);
|
||||
SatoriRegion* RegionForCardGroup(size_t group);
|
||||
|
|
|
@ -150,6 +150,8 @@ public:
|
|||
m_tail = item;
|
||||
}
|
||||
|
||||
// does not take locks, does not update contsaining queue.
|
||||
// only used for intermediate merging of queues before consuming.
|
||||
void AppendUnsafe(SatoriQueue<T>* other)
|
||||
{
|
||||
size_t otherCount = other->Count();
|
||||
|
|
|
@ -199,6 +199,7 @@ private:
|
|||
int MaxHelpers();
|
||||
int64_t HelpQuantum();
|
||||
void AskForHelp();
|
||||
void RunWithHelp(void(SatoriRecycler::* method)());
|
||||
bool HelpOnceCore();
|
||||
|
||||
void PushToEphemeralQueues(SatoriRegion* region);
|
||||
|
@ -206,21 +207,25 @@ private:
|
|||
void PushToEphemeralQueue(SatoriRegion* region);
|
||||
void PushToTenuredQueues(SatoriRegion* region);
|
||||
|
||||
void AdjustHeuristics();
|
||||
void DeactivateAllStacks();
|
||||
void PushToMarkQueuesSlow(SatoriWorkChunk*& currentWorkChunk, SatoriObject* o);
|
||||
bool MarkOwnStackAndDrainQueues(int64_t deadline = 0);
|
||||
void MarkOwnStack(gc_alloc_context* aContext, MarkContext* mc);
|
||||
void MarkDemoted(SatoriRegion* curRegion, MarkContext& c);
|
||||
void MarkAllStacksFinalizationAndDemotedRoots();
|
||||
|
||||
void IncrementRootScanTicket();
|
||||
void IncrementCardScanTicket();
|
||||
uint8_t GetCardScanTicket();
|
||||
|
||||
void MarkOwnStack(gc_alloc_context* aContext, MarkContext* mc);
|
||||
void MarkThroughCards();
|
||||
bool MarkThroughCardsConcurrent(int64_t deadline);
|
||||
void MarkDemoted(SatoriRegion* curRegion, MarkContext& c);
|
||||
void MarkAllStacksFinalizationAndDemotedRoots();
|
||||
|
||||
void PushToMarkQueuesSlow(SatoriWorkChunk*& currentWorkChunk, SatoriObject* o);
|
||||
void DrainMarkQueues(SatoriWorkChunk* srcChunk = nullptr);
|
||||
bool DrainMarkQueuesConcurrent(SatoriWorkChunk* srcChunk = nullptr, int64_t deadline = 0);
|
||||
void MarkThroughCards();
|
||||
bool MarkOwnStackAndDrainQueues(int64_t deadline = 0);
|
||||
|
||||
bool HasDirtyCards();
|
||||
bool MarkThroughCardsConcurrent(int64_t deadline);
|
||||
bool ScanDirtyCardsConcurrent(int64_t deadline);
|
||||
bool CleanCards();
|
||||
bool MarkHandles(int64_t deadline = 0);
|
||||
|
@ -228,39 +233,46 @@ private:
|
|||
void ShortWeakPtrScanWorker();
|
||||
void LongWeakPtrScan();
|
||||
void LongWeakPtrScanWorker();
|
||||
|
||||
void ScanFinalizables();
|
||||
void ScanFinalizableRegions(SatoriRegionQueue* regions, MarkContext* c);
|
||||
void ScanAllFinalizableRegionsWorker();
|
||||
void QueueCriticalFinalizablesWorker();
|
||||
|
||||
void DependentHandlesScan();
|
||||
void DependentHandlesInitialScan();
|
||||
void DependentHandlesInitialScanWorker();
|
||||
void DependentHandlesRescan();
|
||||
void DependentHandlesRescanWorker();
|
||||
void PromoteHandlesAndFreeRelocatedRegions();
|
||||
void PromoteSurvivedHandlesAndFreeRelocatedRegionsWorker();
|
||||
|
||||
void AdjustHeuristics();
|
||||
void BlockingCollect();
|
||||
void RunWithHelp(void(SatoriRecycler::* method)());
|
||||
void BlockingMark();
|
||||
void MarkNewReachable();
|
||||
void DependentHandlesScan();
|
||||
void DrainAndCleanWorker();
|
||||
void MarkStrongReferences();
|
||||
void MarkStrongReferencesWorker();
|
||||
void DrainAndCleanWorker();
|
||||
|
||||
void Plan();
|
||||
void PlanWorker();
|
||||
void PlanRegions(SatoriRegionQueue* regions);
|
||||
bool IsRelocatable(SatoriRegion* region);
|
||||
void DenyRelocation();
|
||||
void AddTenuredRegionsToPlan(SatoriRegionQueue* regions);
|
||||
void AddRelocationTarget(SatoriRegion* region);
|
||||
SatoriRegion* TryGetRelocationTarget(size_t size, bool existingRegionOnly);
|
||||
|
||||
void Relocate();
|
||||
void RelocateWorker();
|
||||
void RelocateRegion(SatoriRegion* region);
|
||||
void Update();
|
||||
|
||||
void FreeRelocatedRegionsWorker();
|
||||
|
||||
void PromoteHandlesAndFreeRelocatedRegions();
|
||||
void PromoteSurvivedHandlesAndFreeRelocatedRegionsWorker();
|
||||
|
||||
void Update();
|
||||
void UpdateRootsWorker();
|
||||
void UpdateRegionsWorker();
|
||||
void UpdatePointersThroughCards();
|
||||
void UpdatePointersInObjectRanges();
|
||||
void UpdatePointersInPromotedObjects();
|
||||
void UpdateRegions(SatoriRegionQueue* queue);
|
||||
|
@ -270,13 +282,6 @@ private:
|
|||
bool DrainDeferredSweepQueueConcurrent(int64_t deadline = 0);
|
||||
void DrainDeferredSweepQueueHelp();
|
||||
void SweepAndReturnRegion(SatoriRegion* curRegion);
|
||||
void UpdatePointersThroughCards();
|
||||
void PlanRegions(SatoriRegionQueue* regions);
|
||||
void AddRelocationTarget(SatoriRegion* region);
|
||||
|
||||
void AddTenuredRegionsToPlan(SatoriRegionQueue* regions);
|
||||
|
||||
SatoriRegion* TryGetRelocationTarget(size_t size, bool existingRegionOnly);
|
||||
|
||||
void ASSERT_NO_WORK();
|
||||
};
|
||||
|
|
|
@ -95,7 +95,7 @@ SatoriRegion* SatoriRegion::InitializeAt(SatoriPage* containingPage, size_t addr
|
|||
result->m_escapeFunc = nullptr;
|
||||
result->m_generation = -1;
|
||||
|
||||
result->m_containingPage->RegionInitialized(result);
|
||||
result->m_containingPage->OnRegionInitialized(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -109,6 +109,7 @@ SatoriRecycler* SatoriRegion::Recycler()
|
|||
return m_containingPage->Heap()->Recycler();
|
||||
}
|
||||
|
||||
// rearm cards for a tenured region (ex: after en-masse promotion)
|
||||
void SatoriRegion::RearmCardsForTenured()
|
||||
{
|
||||
_ASSERTE(Generation() == 2);
|
||||
|
@ -129,6 +130,7 @@ void SatoriRegion::FreeDemotedTrackers()
|
|||
}
|
||||
}
|
||||
|
||||
// reset all cards when the region will no longer be tenured.
|
||||
void SatoriRegion::ResetCardsForEphemeral()
|
||||
{
|
||||
_ASSERTE(Generation() == 2);
|
||||
|
@ -480,7 +482,7 @@ void SatoriRegion::Coalesce(SatoriRegion* next)
|
|||
GCToOSInterface::VirtualDecommit(next, toDecommit);
|
||||
}
|
||||
|
||||
m_containingPage->RegionInitialized(this);
|
||||
m_containingPage->OnRegionInitialized(this);
|
||||
}
|
||||
|
||||
bool SatoriRegion::CanDecommit()
|
||||
|
@ -565,7 +567,8 @@ size_t SatoriRegion::AllocateHuge(size_t size, bool zeroInitialize)
|
|||
size_t chunkStart = m_allocStart;
|
||||
size_t chunkEnd = chunkStart + size;
|
||||
|
||||
// in rare cases the object does not cross into the last granule. (when it needs extra because of free obj padding).
|
||||
// in rare cases the object does not cross into the last granule.
|
||||
// (when the allocation is huge only because of parseability padding).
|
||||
// in such case pad it in front.
|
||||
if (chunkEnd < End() - Satori::REGION_SIZE_GRANULARITY)
|
||||
{
|
||||
|
@ -618,7 +621,7 @@ size_t SatoriRegion::AllocateHuge(size_t size, bool zeroInitialize)
|
|||
return chunkStart;
|
||||
}
|
||||
|
||||
// Finds an object that contains given location.
|
||||
// Finds an object that contains the given location.
|
||||
//
|
||||
// Assumptions that caller must arrange or handle:
|
||||
// - we may return a Free object here.
|
||||
|
@ -836,6 +839,8 @@ void SatoriRegion::EscsapeAll()
|
|||
}
|
||||
}
|
||||
|
||||
// do not recurse into children
|
||||
// used when escaping all objects in the region anyways
|
||||
void SatoriRegion::EscapeShallow(SatoriObject* o)
|
||||
{
|
||||
_ASSERTE(o->ContainingRegion() == this);
|
||||
|
@ -870,7 +875,7 @@ void SatoriRegion::SetOccupancy(size_t occupancy, size_t objCount)
|
|||
m_objCount = objCount;
|
||||
}
|
||||
|
||||
// NB: dst is unused, it is just to avoid arg shuffle in x64 barriers
|
||||
// NB: dst is unused, it is just to avoid argument shuffle in x64 barriers
|
||||
void SatoriRegion::EscapeFn(SatoriObject** dst, SatoriObject* src, SatoriRegion* region)
|
||||
{
|
||||
region->EscapeRecursively(src);
|
||||
|
@ -885,7 +890,7 @@ bool SatoriRegion::ThreadLocalCollect(size_t allocBytes)
|
|||
// TUNING: 1/4 is not too greedy? maybe 1/8 ?
|
||||
if (allocBytes - m_allocBytesAtCollect < Satori::REGION_SIZE_GRANULARITY / 4)
|
||||
{
|
||||
// this is too soon. last collection did not buy us as much as we wanted.
|
||||
// this is too soon. last collection did not buy us as much as we wanted
|
||||
// either due to fragmentation or unfortunate allocation pattern in this thread.
|
||||
// we will not collect this region, lest it keeps coming back.
|
||||
return false;
|
||||
|
@ -1000,7 +1005,7 @@ void SatoriRegion::ThreadLocalMark()
|
|||
// - mark ref as finalizer pending to be queued after compaction.
|
||||
// - mark obj as reachable
|
||||
// - push to mark stack
|
||||
// - re-trace to mark children that are now F-reachable
|
||||
// - trace through reachable again to mark children that are now F-reachable
|
||||
ForEachFinalizableThreadLocal(
|
||||
[this](SatoriObject* finalizable)
|
||||
{
|
||||
|
@ -1052,11 +1057,11 @@ void SatoriRegion::ThreadLocalPlan()
|
|||
// - Movable
|
||||
// marked, but not escaped or pinned
|
||||
//
|
||||
// we will shift left all movable objects - as long as they fit between unmovables
|
||||
// we will slide all movable objects towards the region start - as long as they fit between unmovables
|
||||
//
|
||||
|
||||
// planning will coalesce free objects.
|
||||
// that alone does not invalidate the index, unless we fill free objects with junk.
|
||||
// that does not invalidate the index, but it will if we fill free objects with junk.
|
||||
#ifdef JUNK_FILL_FREE_SPACE
|
||||
ClearIndex();
|
||||
#endif
|
||||
|
@ -1535,10 +1540,10 @@ bool SatoriRegion::RegisterForFinalization(SatoriObject* finalizable)
|
|||
return true;
|
||||
}
|
||||
|
||||
// Finalizable trackers are generally accessed exclusively when EE stopped
|
||||
// Finalizable trackers are generally accessed exclusively, when EE is stopped.
|
||||
// The only case where we can have contention is when a user thread re-registers
|
||||
// concurrently with another thread doing the same or
|
||||
// concurrently with thread local collection.
|
||||
// concurrently with a thread local collection.
|
||||
// It is extremely unlikely to have such contention, so a simplest spinlock is ok
|
||||
void SatoriRegion::LockFinalizableTrackers()
|
||||
{
|
||||
|
@ -1623,6 +1628,7 @@ SatoriObject* SatoriRegion::SkipUnmarked(SatoriObject* from)
|
|||
return ObjectForMarkBit(bitmapIndex, markBitOffset);
|
||||
}
|
||||
|
||||
// clears the mark bit
|
||||
SatoriObject* SatoriRegion::SkipUnmarkedAndClear(SatoriObject* from)
|
||||
{
|
||||
_ASSERTE(from->Start() < End());
|
||||
|
@ -1697,7 +1703,7 @@ SatoriObject* SatoriRegion::SkipUnmarked(SatoriObject* from, size_t upTo)
|
|||
void SatoriRegion::UpdateFinalizableTrackers()
|
||||
{
|
||||
// if any finalizable trackers point outside,
|
||||
// their objects have been relocated to this region
|
||||
// their objects have been relocated to this region, we need to update these trackers
|
||||
if (m_finalizableTrackers)
|
||||
{
|
||||
ForEachFinalizable(
|
||||
|
@ -1781,9 +1787,9 @@ bool SatoriRegion::TryDemote()
|
|||
_ASSERTE(ObjCount() != 0);
|
||||
|
||||
// TUNING: heuristic for demoting - could consider occupancy, pinning, etc...
|
||||
// the cost here is increasing gen1, which is supposed to be small.
|
||||
// the cost here is increasing gen1, which is supposed to be short as
|
||||
// demoted objects will have to be marked regardless of cards.
|
||||
// NOTE: caller requires 3rd bucket, so region is ~ 1/4 empty
|
||||
// we can't have > MAX_DEMOTED_OBJECTS_IN_REGION objects, but they can be big
|
||||
|
||||
if (ObjCount() > Satori::MAX_DEMOTED_OBJECTS_IN_REGION)
|
||||
{
|
||||
|
|
|
@ -38,6 +38,7 @@ class SatoriRegionQueue;
|
|||
class SatoriObject;
|
||||
class SatoriAllocationContext;
|
||||
|
||||
// The Region contains objects and their metadata.
|
||||
class SatoriRegion
|
||||
{
|
||||
friend class SatoriObject;
|
||||
|
@ -48,21 +49,19 @@ public:
|
|||
SatoriRegion() = delete;
|
||||
~SatoriRegion() = delete;
|
||||
|
||||
static SatoriRegion* InitializeAt(SatoriPage* containingPage, size_t address, size_t regionSize, size_t committed, size_t used);
|
||||
|
||||
static const int MAX_LARGE_OBJ_SIZE;
|
||||
|
||||
static SatoriRegion* InitializeAt(SatoriPage* containingPage, size_t address, size_t regionSize, size_t committed, size_t used);
|
||||
void MakeBlank();
|
||||
bool ValidateBlank();
|
||||
|
||||
void FreeDemotedTrackers();
|
||||
|
||||
void RearmCardsForTenured();
|
||||
void ResetCardsForEphemeral();
|
||||
|
||||
SatoriRegion* TrySplit(size_t regionSize);
|
||||
bool CanDecommit();
|
||||
bool TryDecommit();
|
||||
bool CanCoalesceWithNext();
|
||||
bool TryCoalesceWithNext();
|
||||
|
||||
static size_t RegionSizeForAlloc(size_t allocSize);
|
||||
|
@ -75,14 +74,13 @@ public:
|
|||
|
||||
size_t StartAllocating(size_t minSize);
|
||||
void StopAllocating(size_t allocPtr);
|
||||
bool IsAllocating();
|
||||
|
||||
void AddFreeSpace(SatoriObject* freeObj);
|
||||
|
||||
bool HasFreeSpaceInTopBucket();
|
||||
bool HasFreeSpaceInTop4Buckets();
|
||||
|
||||
bool IsAllocating();
|
||||
|
||||
void StartEscapeTrackingRelease(size_t threadTag);
|
||||
void StopEscapeTracking();
|
||||
bool IsEscapeTracking();
|
||||
|
@ -96,8 +94,10 @@ public:
|
|||
|
||||
void ResetReusableForRelease();
|
||||
|
||||
bool TryDemote();
|
||||
bool IsDemoted();
|
||||
SatoriWorkChunk* &DemotedObjects();
|
||||
void FreeDemotedTrackers();
|
||||
|
||||
int Generation();
|
||||
int GenerationAcquire();
|
||||
|
@ -121,7 +121,6 @@ public:
|
|||
|
||||
void TakeFinalizerInfoFrom(SatoriRegion* other);
|
||||
void UpdateFinalizableTrackers();
|
||||
bool NothingMarked();
|
||||
void UpdatePointers();
|
||||
void UpdatePointersInObject(SatoriObject* o);
|
||||
|
||||
|
@ -134,10 +133,8 @@ public:
|
|||
bool IsExposed(SatoriObject** location);
|
||||
bool AnyExposed(size_t from, size_t length);
|
||||
void EscapeRecursively(SatoriObject* obj);
|
||||
|
||||
void EscsapeAll();
|
||||
void EscapeShallow(SatoriObject* o);
|
||||
void SetOccupancy(size_t occupancy, size_t objCount);
|
||||
|
||||
template <typename F>
|
||||
void ForEachFinalizable(F lambda);
|
||||
|
@ -153,6 +150,7 @@ public:
|
|||
bool HasFinalizables();
|
||||
bool& HasPendingFinalizables();
|
||||
|
||||
void SetOccupancy(size_t occupancy, size_t objCount);
|
||||
size_t Occupancy();
|
||||
size_t& OccupancyAtReuse();
|
||||
size_t ObjCount();
|
||||
|
@ -161,10 +159,6 @@ public:
|
|||
bool& DoNotSweep();
|
||||
bool& AcceptedPromotedObjects();
|
||||
|
||||
#if _DEBUG
|
||||
bool& HasMarksSet();
|
||||
#endif
|
||||
|
||||
enum class ReuseLevel : uint8_t
|
||||
{
|
||||
None,
|
||||
|
@ -177,6 +171,11 @@ public:
|
|||
|
||||
SatoriQueue<SatoriRegion>* ContainingQueue();
|
||||
|
||||
#if _DEBUG
|
||||
bool& HasMarksSet();
|
||||
#endif
|
||||
|
||||
bool NothingMarked();
|
||||
void ClearMarks();
|
||||
void ClearIndex();
|
||||
void ClearFreeLists();
|
||||
|
@ -188,10 +187,6 @@ public:
|
|||
SatoriPage* ContainingPage();
|
||||
SatoriRegion* NextInPage();
|
||||
|
||||
bool CanCoalesceWithNext();
|
||||
|
||||
bool TryDemote();
|
||||
|
||||
void Verify(bool allowMarked = false);
|
||||
|
||||
private:
|
||||
|
@ -207,7 +202,7 @@ private:
|
|||
// it may be possible to repurpose the bits for other needs as we see fit.
|
||||
//
|
||||
// we will overlap the map and the header for simplicity of map operations.
|
||||
// it is ok because the first BITMAP_START elements of the map cover the header/map and thus will not be used.
|
||||
// it is ok because the first BITMAP_START elements of the map cover the header/map itself and thus will not be used.
|
||||
// +1 to include End(), it will always be 0, but it is conveninet to make it legal map index.
|
||||
size_t m_bitmap[BITMAP_LENGTH + 1];
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue