mirror of
https://github.com/VSadov/Satori.git
synced 2025-06-09 09:34:49 +09:00
Decommit
This commit is contained in:
parent
ecc315e564
commit
5b0908787b
15 changed files with 326 additions and 175 deletions
|
@ -311,7 +311,7 @@ FORCEINLINE void InlinedMemmoveGCRefsHelper(void *dest, const void *src, size_t
|
|||
|
||||
if (len >= sizeof(size_t))
|
||||
{
|
||||
CheckEscapeSatoriRange(dest, (size_t)src, len);
|
||||
CheckEscapeSatoriRange((size_t)dest, (size_t)src, len);
|
||||
}
|
||||
|
||||
// To be able to copy forwards, the destination buffer cannot start inside the source buffer
|
||||
|
|
|
@ -56,7 +56,6 @@ void GCScan::GcDhInitialScan(promote_func* fn, int condemned, int max_gen, ScanC
|
|||
pDhContext->m_pfnPromoteFunction = fn;
|
||||
pDhContext->m_iCondemned = condemned;
|
||||
pDhContext->m_iMaxGen = max_gen;
|
||||
pDhContext->m_pScanContext = sc;
|
||||
|
||||
// Look for dependent handle whose primary has been promoted but whose secondary has not. Promote the
|
||||
// secondary in those cases. Additionally this scan sets the m_fUnpromotedPrimaries and m_fPromoted state
|
||||
|
|
|
@ -1182,7 +1182,9 @@ void Ref_CheckReachable(uint32_t condemned, uint32_t maxgen, uintptr_t lp1)
|
|||
DhContext *Ref_GetDependentHandleContext(ScanContext* sc)
|
||||
{
|
||||
WRAPPER_NO_CONTRACT;
|
||||
return &g_pDependentHandleContexts[getSlotNumber(sc)];
|
||||
DhContext* dhc = &g_pDependentHandleContexts[getSlotNumber(sc)];
|
||||
dhc->m_pScanContext = sc;
|
||||
return dhc;
|
||||
}
|
||||
|
||||
// Scan the dependent handle table promoting any secondary object whose associated primary object is promoted.
|
||||
|
|
|
@ -102,11 +102,13 @@ tryAgain:
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
//TODO: VS when Return and when Add?
|
||||
void SatoriAllocator::ReturnRegion(SatoriRegion* region)
|
||||
{
|
||||
//TUNING: is this too aggressive, or should coalesce with prev too?
|
||||
//TUNING: is this too aggressive?
|
||||
region->TryCoalesceWithNext();
|
||||
region->SetGeneration(-1);
|
||||
region->TryDecommit();
|
||||
|
||||
// TODO: VS select by current core
|
||||
m_queues[SizeToBucket(region->Size())]->Push(region);
|
||||
|
@ -114,9 +116,10 @@ void SatoriAllocator::ReturnRegion(SatoriRegion* region)
|
|||
|
||||
void SatoriAllocator::AddRegion(SatoriRegion* region)
|
||||
{
|
||||
//TUNING: is this too aggressive, or should coalesce with prev too?
|
||||
//TUNING: is this too aggressive?
|
||||
region->TryCoalesceWithNext();
|
||||
region->SetGeneration(-1);
|
||||
region->TryDecommit();
|
||||
|
||||
// TODO: VS select by current core
|
||||
m_queues[SizeToBucket(region->Size())]->Enqueue(region);
|
||||
|
@ -212,8 +215,7 @@ SatoriObject* SatoriAllocator::AllocRegular(SatoriAllocationContext* context, si
|
|||
}
|
||||
|
||||
// try get from the free list
|
||||
size_t desiredFreeSpace = max(size + Satori::MIN_FREE_SIZE, Satori::MIN_REGULAR_ALLOC);
|
||||
if (region->StartAllocating(desiredFreeSpace))
|
||||
if (region->StartAllocating(size))
|
||||
{
|
||||
// we have enough free space in the region to continue
|
||||
context->alloc_ptr = context->alloc_limit = (uint8_t*)region->AllocStart();
|
||||
|
@ -237,7 +239,7 @@ SatoriObject* SatoriAllocator::AllocRegular(SatoriAllocationContext* context, si
|
|||
{
|
||||
// perform thread local collection and see if we have enough space after that.
|
||||
region->ThreadLocalCollect();
|
||||
if (region->StartAllocating(desiredFreeSpace))
|
||||
if (region->StartAllocating(size))
|
||||
{
|
||||
// we have enough free space in the region to continue
|
||||
context->alloc_ptr = context->alloc_limit = (uint8_t*)region->AllocStart();
|
||||
|
|
|
@ -42,7 +42,7 @@ public:
|
|||
void ClearPinnedAndMarked();
|
||||
bool IsEscaped();
|
||||
bool IsEscapedOrPinned();
|
||||
int GetMarkBitAndOffset(size_t* bitmapIndex);
|
||||
int GetMarkBitAndWord(size_t* bitmapIndex);
|
||||
|
||||
void SetPermanentlyPinned();
|
||||
bool IsPermanentlyPinned();
|
||||
|
|
|
@ -198,7 +198,7 @@ inline void SatoriObject::CleanSyncBlock()
|
|||
((size_t*)this)[-1] = 0;
|
||||
}
|
||||
|
||||
inline int SatoriObject::GetMarkBitAndOffset(size_t* bitmapIndex)
|
||||
inline int SatoriObject::GetMarkBitAndWord(size_t* bitmapIndex)
|
||||
{
|
||||
size_t start = Start();
|
||||
*bitmapIndex = (start >> 9) & (SatoriRegion::BITMAP_LENGTH - 1); // % words in the bitmap
|
||||
|
@ -207,12 +207,12 @@ inline int SatoriObject::GetMarkBitAndOffset(size_t* bitmapIndex)
|
|||
|
||||
inline void SatoriObject::SetPermanentlyPinned()
|
||||
{
|
||||
GetHeader()->SetGCBit();
|
||||
((DWORD*)this)[-1] |= BIT_SBLK_GC_RESERVE;
|
||||
}
|
||||
|
||||
inline bool SatoriObject::IsPermanentlyPinned()
|
||||
{
|
||||
return GetHeader()->GetBits() & BIT_SBLK_GC_RESERVE;
|
||||
return ((DWORD*)this)[-1] & BIT_SBLK_GC_RESERVE;
|
||||
}
|
||||
|
||||
template<typename F>
|
||||
|
|
|
@ -76,19 +76,6 @@ void SatoriPage::RegionInitialized(SatoriRegion* region)
|
|||
}
|
||||
}
|
||||
|
||||
void SatoriPage::RegionDestroyed(SatoriRegion* region)
|
||||
{
|
||||
_ASSERTE((size_t)region > Start() && (size_t)region < End());
|
||||
size_t startIndex = (region->Start() - Start()) >> Satori::REGION_BITS;
|
||||
size_t mapCount = region->Size() >> Satori::REGION_BITS;
|
||||
for (int i = 0; i < mapCount; i++)
|
||||
{
|
||||
DWORD log2;
|
||||
BitScanReverse(&log2, i);
|
||||
RegionMap()[startIndex + i] = (uint8_t)(log2 + 2);
|
||||
}
|
||||
}
|
||||
|
||||
SatoriRegion* SatoriPage::RegionForAddress(size_t address)
|
||||
{
|
||||
_ASSERTE(address >= Start() && address < End());
|
||||
|
@ -126,12 +113,6 @@ SatoriRegion* SatoriPage::NextInPage(SatoriRegion* region)
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
size_t mapIndex = (address - Start()) >> Satori::REGION_BITS;
|
||||
if (RegionMap()[mapIndex] == 0)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return (SatoriRegion*)address;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ public:
|
|||
SatoriRegion* MakeInitialRegion();
|
||||
|
||||
void RegionInitialized(SatoriRegion* region);
|
||||
void RegionDestroyed(SatoriRegion* region);
|
||||
|
||||
SatoriRegion* RegionForAddress(size_t address);
|
||||
SatoriRegion* RegionForAddressChecked(size_t address);
|
||||
|
|
|
@ -55,6 +55,9 @@ void SatoriRecycler::Initialize(SatoriHeap* heap)
|
|||
|
||||
m_gen1Count = m_gen2Count = 0;
|
||||
m_condemnedGeneration = 0;
|
||||
|
||||
m_gen1Threshold = 5;
|
||||
m_gen1Budget = 0;
|
||||
}
|
||||
|
||||
// not interlocked. this is not done concurrently.
|
||||
|
@ -89,10 +92,19 @@ int SatoriRecycler::CondemnedGeneration()
|
|||
return m_condemnedGeneration;
|
||||
}
|
||||
|
||||
int SatoriRecycler::Gen1RegionCount()
|
||||
{
|
||||
return m_ephemeralFinalizationTrackingRegions->Count() + m_ephemeralRegions->Count();
|
||||
}
|
||||
|
||||
int SatoriRecycler::Gen2RegionCount()
|
||||
{
|
||||
return m_tenuredFinalizationTrackingRegions->Count() + m_tenuredRegions->Count();
|
||||
}
|
||||
|
||||
int SatoriRecycler::RegionCount()
|
||||
{
|
||||
return m_ephemeralFinalizationTrackingRegions->Count() + m_ephemeralRegions->Count() +
|
||||
m_tenuredFinalizationTrackingRegions->Count() + m_tenuredRegions->Count();
|
||||
return Gen1RegionCount() + Gen2RegionCount();
|
||||
}
|
||||
|
||||
void SatoriRecycler::AddEphemeralRegion(SatoriRegion* region)
|
||||
|
@ -116,9 +128,9 @@ void SatoriRecycler::AddEphemeralRegion(SatoriRegion* region)
|
|||
// TODO: VS this should be moved to heuristics.
|
||||
void SatoriRecycler::MaybeTriggerGC()
|
||||
{
|
||||
int count = RegionCount();
|
||||
int count1 = Gen1RegionCount();
|
||||
|
||||
if (count - m_prevRegionCount > 10)
|
||||
if (count1 > m_gen1Threshold)
|
||||
{
|
||||
if (m_gcInProgress)
|
||||
{
|
||||
|
@ -126,13 +138,40 @@ void SatoriRecycler::MaybeTriggerGC()
|
|||
}
|
||||
else if (Interlocked::CompareExchange(&m_gcInProgress, 1, 0) == 0)
|
||||
{
|
||||
// for now just do every 16th scan (every 8 global GCs)
|
||||
int generation = (m_scanCount + 1) % 16 == 0 ? 2 : 1;
|
||||
Collect(generation, /*force*/ false);
|
||||
// for now just do every 16th Gen1
|
||||
// int generation = (m_gen1Count + 1) % 16 == 0 ? 2 : 1;
|
||||
// Collect(generation, /*force*/ false);
|
||||
|
||||
if (m_gen1Budget <= 0)
|
||||
{
|
||||
Collect2();
|
||||
}
|
||||
else
|
||||
{
|
||||
Collect1();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NOINLINE
|
||||
void SatoriRecycler::Collect1()
|
||||
{
|
||||
Collect(1, false);
|
||||
|
||||
m_condemnedGeneration = 0;
|
||||
m_gcInProgress = false;
|
||||
}
|
||||
|
||||
NOINLINE
|
||||
void SatoriRecycler::Collect2()
|
||||
{
|
||||
Collect(2, false);
|
||||
|
||||
m_condemnedGeneration = 0;
|
||||
m_gcInProgress = false;
|
||||
}
|
||||
|
||||
void SatoriRecycler::Collect(int generation, bool force)
|
||||
{
|
||||
bool wasCoop = GCToEEInterface::EnablePreemptiveGC();
|
||||
|
@ -144,24 +183,41 @@ void SatoriRecycler::Collect(int generation, bool force)
|
|||
// become coop again (it will not block since VM is done suspending)
|
||||
GCToEEInterface::DisablePreemptiveGC();
|
||||
|
||||
int count = RegionCount();
|
||||
if (count - m_prevRegionCount > 10 || force)
|
||||
int count1 = Gen1RegionCount();
|
||||
if (count1 > m_gen1Threshold || force)
|
||||
{
|
||||
m_condemnedGeneration = generation;
|
||||
m_isCompacting = true;
|
||||
|
||||
DeactivateAllStacks();
|
||||
|
||||
m_condemnedRegionsCount = m_condemnedGeneration == 2 ?
|
||||
RegionCount() :
|
||||
Gen1RegionCount();
|
||||
|
||||
Mark();
|
||||
Sweep();
|
||||
Compact();
|
||||
UpdatePointers();
|
||||
|
||||
m_gen1Count++;
|
||||
|
||||
if (m_condemnedGeneration == 2)
|
||||
{
|
||||
m_gen2Count++;
|
||||
}
|
||||
|
||||
// TODO: update stats and heuristics.
|
||||
m_prevRegionCount = RegionCount();
|
||||
if (m_condemnedGeneration == 2)
|
||||
{
|
||||
m_gen1Budget = Gen2RegionCount();
|
||||
}
|
||||
else
|
||||
{
|
||||
m_gen1Budget -= Gen1RegionCount();
|
||||
}
|
||||
|
||||
m_gen1Threshold = Gen1RegionCount() + max(5, Gen2RegionCount() / 4);
|
||||
}
|
||||
|
||||
m_condemnedGeneration = 0;
|
||||
|
@ -464,7 +520,7 @@ void SatoriRecycler::DrainMarkQueues()
|
|||
}
|
||||
},
|
||||
/* includeCollectibleAllocator */ true
|
||||
);
|
||||
);
|
||||
}
|
||||
|
||||
// done with srcChunk
|
||||
|
@ -867,8 +923,6 @@ void SatoriRecycler::Sweep()
|
|||
|
||||
void SatoriRecycler::SweepRegions(SatoriRegionQueue* regions)
|
||||
{
|
||||
bool compacting = true;
|
||||
|
||||
SatoriRegion* curRegion;
|
||||
while (curRegion = regions->TryPop())
|
||||
{
|
||||
|
@ -885,7 +939,7 @@ void SatoriRecycler::SweepRegions(SatoriRegionQueue* regions)
|
|||
{
|
||||
_ASSERTE(curRegion->Generation() != 2);
|
||||
// when not compacting, gen1 GC does not need to sweep.
|
||||
canRecycle = compacting ?
|
||||
canRecycle = m_isCompacting ?
|
||||
curRegion->Sweep(/*turnMarkedIntoEscaped*/ false) :
|
||||
curRegion->NothingMarked();
|
||||
}
|
||||
|
@ -898,7 +952,7 @@ void SatoriRecycler::SweepRegions(SatoriRegionQueue* regions)
|
|||
else
|
||||
{
|
||||
// if not compacting, we are done here
|
||||
if (!compacting)
|
||||
if (!m_isCompacting)
|
||||
{
|
||||
m_stayingRegions->Push(curRegion);
|
||||
continue;
|
||||
|
@ -937,25 +991,41 @@ void SatoriRecycler::AddRelocationTarget(SatoriRegion* region)
|
|||
|
||||
void SatoriRecycler::Compact()
|
||||
{
|
||||
if (m_relocatingRegions->Count() < m_condemnedRegionsCount / 2)
|
||||
{
|
||||
m_isCompacting = false;
|
||||
}
|
||||
|
||||
SatoriRegion* curRegion;
|
||||
while (curRegion = m_relocatingRegions->TryPop())
|
||||
{
|
||||
RelocateRegion(curRegion);
|
||||
if (m_isCompacting)
|
||||
{
|
||||
RelocateRegion(curRegion);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_stayingRegions->Push(curRegion);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SatoriRegion* SatoriRecycler::TryGetRelocationTarget(size_t minSize, bool existingRegionOnly)
|
||||
SatoriRegion* SatoriRecycler::TryGetRelocationTarget(size_t allocSize, bool existingRegionOnly)
|
||||
{
|
||||
//make this occasionally fail in debug to be sure we can handle low memory case.
|
||||
#if _DEBUG
|
||||
if (minSize % 1024 == 0)
|
||||
if (allocSize % 1024 == 0)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
DWORD bucket;
|
||||
BitScanReverse64(&bucket, minSize);
|
||||
BitScanReverse64(&bucket, allocSize);
|
||||
|
||||
// we could search through this bucket, which may have a large enough obj,
|
||||
// but we will just use the next queue, which guarantees it fits
|
||||
bucket++;
|
||||
|
||||
bucket = bucket > Satori::MIN_FREELIST_SIZE_BITS ?
|
||||
bucket - Satori::MIN_FREELIST_SIZE_BITS :
|
||||
|
@ -972,7 +1042,8 @@ SatoriRegion* SatoriRecycler::TryGetRelocationTarget(size_t minSize, bool existi
|
|||
SatoriRegion* region = queue->TryPop();
|
||||
if (region)
|
||||
{
|
||||
region->StartAllocating(minSize);
|
||||
size_t allocStart = region->StartAllocating(allocSize);
|
||||
_ASSERTE(allocStart);
|
||||
return region;
|
||||
}
|
||||
}
|
||||
|
@ -983,7 +1054,7 @@ SatoriRegion* SatoriRecycler::TryGetRelocationTarget(size_t minSize, bool existi
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
SatoriRegion* newRegion = m_heap->Allocator()->GetRegion(ALIGN_UP(minSize, Satori::REGION_SIZE_GRANULARITY));
|
||||
SatoriRegion* newRegion = m_heap->Allocator()->GetRegion(ALIGN_UP(allocSize, Satori::REGION_SIZE_GRANULARITY));
|
||||
if (newRegion)
|
||||
{
|
||||
newRegion->SetGeneration(m_condemnedGeneration);
|
||||
|
@ -1108,43 +1179,46 @@ void SatoriRecycler::UpdateFn(PTR_PTR_Object ppObject, ScanContext* sc, uint32_t
|
|||
|
||||
void SatoriRecycler::UpdatePointers()
|
||||
{
|
||||
ScanContext sc;
|
||||
sc.promotion = FALSE;
|
||||
MarkContext c = MarkContext(this);
|
||||
sc._unused1 = &c;
|
||||
|
||||
//TODO: VS there should be only one thread with "thread_number == 0"
|
||||
//TODO: VS implement two-pass scheme with preferred vs. any stacks
|
||||
IncrementScanCount();
|
||||
|
||||
//generations are meaningless here, so we pass -1
|
||||
GCToEEInterface::GcScanRoots(UpdateFn, -1, -1, &sc);
|
||||
|
||||
// concurrent, per thread/heap
|
||||
// relies on thread_number to select handle buckets and specialcases #0
|
||||
GCScan::GcScanHandles(UpdateFn, m_condemnedGeneration, 2, &sc);
|
||||
_ASSERTE(c.m_markChunk == nullptr);
|
||||
|
||||
// update refs in finalization queue
|
||||
if (m_heap->FinalizationQueue()->HasItems())
|
||||
if (m_isCompacting)
|
||||
{
|
||||
// add finalization queue to mark list
|
||||
m_heap->FinalizationQueue()->ForEachObjectRef(
|
||||
[&](SatoriObject** ppObject)
|
||||
{
|
||||
SatoriObject* o = *ppObject;
|
||||
ptrdiff_t ptr = ((ptrdiff_t*)o)[-1];
|
||||
if (ptr < 0)
|
||||
ScanContext sc;
|
||||
sc.promotion = FALSE;
|
||||
MarkContext c = MarkContext(this);
|
||||
sc._unused1 = &c;
|
||||
|
||||
//TODO: VS there should be only one thread with "thread_number == 0"
|
||||
//TODO: VS implement two-pass scheme with preferred vs. any stacks
|
||||
IncrementScanCount();
|
||||
|
||||
//generations are meaningless here, so we pass -1
|
||||
GCToEEInterface::GcScanRoots(UpdateFn, -1, -1, &sc);
|
||||
|
||||
// concurrent, per thread/heap
|
||||
// relies on thread_number to select handle buckets and specialcases #0
|
||||
GCScan::GcScanHandles(UpdateFn, m_condemnedGeneration, 2, &sc);
|
||||
_ASSERTE(c.m_markChunk == nullptr);
|
||||
|
||||
// update refs in finalization queue
|
||||
if (m_heap->FinalizationQueue()->HasItems())
|
||||
{
|
||||
// add finalization queue to mark list
|
||||
m_heap->FinalizationQueue()->ForEachObjectRef(
|
||||
[&](SatoriObject** ppObject)
|
||||
{
|
||||
*ppObject = (SatoriObject*)-ptr;
|
||||
SatoriObject* o = *ppObject;
|
||||
ptrdiff_t ptr = ((ptrdiff_t*)o)[-1];
|
||||
if (ptr < 0)
|
||||
{
|
||||
*ppObject = (SatoriObject*)-ptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
if (m_condemnedGeneration != 2)
|
||||
{
|
||||
UpdatePointersThroughCards();
|
||||
if (m_condemnedGeneration != 2)
|
||||
{
|
||||
UpdatePointersThroughCards();
|
||||
}
|
||||
}
|
||||
|
||||
// return target regions
|
||||
|
@ -1171,7 +1245,11 @@ void SatoriRecycler::UpdatePointersInRegions(SatoriRegionQueue* queue)
|
|||
SatoriRegion* curRegion;
|
||||
while (curRegion = queue->TryPop())
|
||||
{
|
||||
curRegion->UpdatePointers();
|
||||
if (m_isCompacting)
|
||||
{
|
||||
curRegion->UpdatePointers();
|
||||
}
|
||||
|
||||
if (curRegion->Generation() == 0)
|
||||
{
|
||||
continue;
|
||||
|
|
|
@ -25,25 +25,29 @@ public:
|
|||
void AddEphemeralRegion(SatoriRegion* region);
|
||||
void MaybeTriggerGC();
|
||||
|
||||
void Collect1();
|
||||
void Collect2();
|
||||
|
||||
int GetScanCount();
|
||||
int64_t GetCollectionCount(int gen);
|
||||
int CondemnedGeneration();
|
||||
|
||||
int Gen1RegionCount();
|
||||
|
||||
int Gen2RegionCount();
|
||||
|
||||
void Collect(int generation, bool force);
|
||||
private:
|
||||
SatoriHeap* m_heap;
|
||||
|
||||
// used to ensure each thread is scanned once per scan round.
|
||||
int m_scanCount;
|
||||
|
||||
int64_t m_gen1Count;
|
||||
int64_t m_gen2Count;
|
||||
int m_condemnedGeneration;
|
||||
|
||||
// region count at the end of last GC, used in a crude GC triggering heuristic.
|
||||
int m_prevRegionCount;
|
||||
bool m_isCompacting;
|
||||
int m_gcInProgress;
|
||||
|
||||
SatoriMarkChunkQueue* m_workList;
|
||||
|
||||
// temporary store for Gen0 regions
|
||||
SatoriRegionQueue* m_nurseryRegions;
|
||||
SatoriRegionQueue* m_ephemeralRegions;
|
||||
|
@ -62,7 +66,12 @@ private:
|
|||
SatoriRegionQueue* m_relocationTargets[Satori::FREELIST_COUNT];
|
||||
SatoriRegionQueue* m_relocatedRegions;
|
||||
|
||||
SatoriMarkChunkQueue* m_workList;
|
||||
int64_t m_gen1Count;
|
||||
int64_t m_gen2Count;
|
||||
|
||||
int m_gen1Threshold;
|
||||
int m_gen1Budget;
|
||||
int m_condemnedRegionsCount;
|
||||
|
||||
static void DeactivateFn(gc_alloc_context* context, void* param);
|
||||
static void MarkFn(PTR_PTR_Object ppObject, ScanContext* sc, uint32_t flags);
|
||||
|
|
|
@ -11,6 +11,10 @@
|
|||
#include "../env/gcenv.os.h"
|
||||
#include "../env/gcenv.ee.h"
|
||||
|
||||
#if !defined(_DEBUG)
|
||||
//#pragma optimize("gty", on)
|
||||
#endif
|
||||
|
||||
#include "SatoriGC.h"
|
||||
#include "SatoriAllocator.h"
|
||||
#include "SatoriRecycler.h"
|
||||
|
@ -24,10 +28,6 @@
|
|||
#include "SatoriQueue.h"
|
||||
#include "SatoriMarkChunk.h"
|
||||
|
||||
#if !defined(_DEBUG)
|
||||
// #pragma optimize("gty", on)
|
||||
#endif
|
||||
|
||||
SatoriRegion* SatoriRegion::InitializeAt(SatoriPage* containingPage, size_t address, size_t regionSize, size_t committed, size_t used)
|
||||
{
|
||||
_ASSERTE(used <= committed);
|
||||
|
@ -201,14 +201,16 @@ static const int FREE_LIST_NEXT_OFFSET = sizeof(ArrayBase);
|
|||
|
||||
void SatoriRegion::AddFreeSpace(SatoriObject* freeObj)
|
||||
{
|
||||
size_t size = freeObj->Size();
|
||||
if (size < Satori::MIN_FREELIST_SIZE)
|
||||
// allocSize is smaller than size to make sure the span can always be made parseable
|
||||
// after allocating objects in it.
|
||||
ptrdiff_t allocSize = freeObj->Size() - Satori::MIN_FREE_SIZE;
|
||||
if (allocSize < Satori::MIN_FREELIST_SIZE)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
DWORD bucket;
|
||||
BitScanReverse64(&bucket, size);
|
||||
BitScanReverse64(&bucket, allocSize);
|
||||
bucket -= (Satori::MIN_FREELIST_SIZE_BITS);
|
||||
_ASSERTE(bucket >= 0);
|
||||
_ASSERTE(bucket < Satori::FREELIST_COUNT);
|
||||
|
@ -217,17 +219,17 @@ void SatoriRegion::AddFreeSpace(SatoriObject* freeObj)
|
|||
m_freeLists[bucket] = freeObj;
|
||||
}
|
||||
|
||||
size_t SatoriRegion::StartAllocating(size_t minSize)
|
||||
size_t SatoriRegion::StartAllocating(size_t minAllocSize)
|
||||
{
|
||||
_ASSERTE(!IsAllocating());
|
||||
|
||||
DWORD bucket;
|
||||
BitScanReverse64(&bucket, minSize);
|
||||
BitScanReverse64(&bucket, minAllocSize);
|
||||
|
||||
// when minSize is not a power of two we could search through the current bucket,
|
||||
// when minAllocSize is not a power of two we could search through the current bucket,
|
||||
// which may have a large enough obj,
|
||||
// but we will just use the next bucket, which guarantees it fits
|
||||
if (minSize & (minSize - 1))
|
||||
if (minAllocSize & (minAllocSize - 1))
|
||||
{
|
||||
bucket++;
|
||||
}
|
||||
|
@ -244,6 +246,7 @@ size_t SatoriRegion::StartAllocating(size_t minSize)
|
|||
m_freeLists[bucket] = *(SatoriObject**)(freeObj->Start() + FREE_LIST_NEXT_OFFSET);
|
||||
m_allocStart = freeObj->Start();
|
||||
m_allocEnd = freeObj->End();
|
||||
_ASSERTE(AllocRemaining() >= minAllocSize);
|
||||
return m_allocStart;
|
||||
}
|
||||
}
|
||||
|
@ -263,7 +266,7 @@ size_t SatoriRegion::MaxAllocEstimate()
|
|||
{
|
||||
if (m_freeLists[bucket])
|
||||
{
|
||||
maxRemaining = max(maxRemaining, ((size_t)1 << (bucket + Satori::MIN_FREELIST_SIZE_BITS)) - Satori::MIN_FREE_SIZE);
|
||||
maxRemaining = max(maxRemaining, ((size_t)1 << (bucket + Satori::MIN_FREELIST_SIZE_BITS)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -295,7 +298,7 @@ SatoriRegion* SatoriRegion::Split(size_t regionSize)
|
|||
size_t nextStart, nextCommitted, nextUsed;
|
||||
SplitCore(regionSize, nextStart, nextCommitted, nextUsed);
|
||||
|
||||
// format the rest as a new region
|
||||
// format the rest as a new region
|
||||
SatoriRegion* result = InitializeAt(m_containingPage, nextStart, regionSize, nextCommitted, nextUsed);
|
||||
_ASSERTE(result->ValidateBlank());
|
||||
return result;
|
||||
|
@ -309,9 +312,9 @@ SatoriRegion* SatoriRegion::NextInPage()
|
|||
void SatoriRegion::TryCoalesceWithNext()
|
||||
{
|
||||
SatoriRegion* next = NextInPage();
|
||||
if (next && CanCoalesce(next))
|
||||
if (next)
|
||||
{
|
||||
auto queue = next->m_containingQueue;
|
||||
auto queue = VolatileLoadWithoutBarrier(&next->m_containingQueue);
|
||||
if (queue && queue->Kind() == QueueKind::Allocator)
|
||||
{
|
||||
if (queue->TryRemove(next))
|
||||
|
@ -322,28 +325,47 @@ void SatoriRegion::TryCoalesceWithNext()
|
|||
}
|
||||
}
|
||||
|
||||
bool SatoriRegion::CanCoalesce(SatoriRegion* other)
|
||||
{
|
||||
return m_committed == other->Start();
|
||||
}
|
||||
|
||||
void SatoriRegion::Coalesce(SatoriRegion* next)
|
||||
{
|
||||
_ASSERTE(ValidateBlank());
|
||||
_ASSERTE(next->ValidateBlank());
|
||||
_ASSERTE(next->m_prev == next->m_next);
|
||||
_ASSERTE(next->m_containingQueue == nullptr);
|
||||
_ASSERTE(next->m_containingPage == m_containingPage);
|
||||
_ASSERTE(CanCoalesce(next));
|
||||
_ASSERTE(next->m_prev == next->m_next);
|
||||
_ASSERTE(m_end == next->Start());
|
||||
_ASSERTE(ValidateBlank());
|
||||
_ASSERTE(next->ValidateBlank());
|
||||
|
||||
m_end = next->m_end;
|
||||
m_committed = next->m_committed;
|
||||
m_used = next->m_used;
|
||||
m_allocEnd = next->m_allocEnd;
|
||||
|
||||
if (m_committed == next->Start())
|
||||
{
|
||||
m_committed = next->m_committed;
|
||||
m_used = next->m_used;
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t toDecommit = next->m_committed - next->Start();
|
||||
_ASSERTE(toDecommit > 0);
|
||||
_ASSERTE(toDecommit % Satori::CommitGranularity() == 0);
|
||||
GCToOSInterface::VirtualDecommit(next, toDecommit);
|
||||
}
|
||||
|
||||
m_containingPage->RegionInitialized(this);
|
||||
}
|
||||
|
||||
void SatoriRegion::TryDecommit()
|
||||
{
|
||||
size_t decommitStart = ALIGN_UP((size_t)&m_syncBlock, Satori::CommitGranularity());
|
||||
_ASSERTE(m_committed >= decommitStart);
|
||||
|
||||
size_t decommitSize = m_committed - decommitStart;
|
||||
if (decommitSize > Satori::REGION_SIZE_GRANULARITY / 8)
|
||||
{
|
||||
GCToOSInterface::VirtualDecommit((void*)decommitStart, decommitSize);
|
||||
m_committed = m_used = decommitStart;
|
||||
}
|
||||
}
|
||||
|
||||
size_t SatoriRegion::Allocate(size_t size, bool zeroInitialize)
|
||||
{
|
||||
_ASSERTE(m_containingQueue == nullptr);
|
||||
|
@ -624,10 +646,10 @@ void SatoriRegion::SetExposed(SatoriObject** location)
|
|||
// set the mark bit corresponding to the location to indicate that it is globally exposed
|
||||
// same as: ((SatoriObject*)location)->SetMarked();
|
||||
|
||||
size_t word = (size_t)location;
|
||||
size_t bitmapIndex = (word >> 9) & (SatoriRegion::BITMAP_LENGTH - 1);
|
||||
size_t mask = (size_t)1 << ((word >> 3) & 63);
|
||||
size_t bitmapIndex;
|
||||
int offset = ((SatoriObject*)location)->GetMarkBitAndWord(&bitmapIndex);
|
||||
|
||||
size_t mask = (size_t)1 << offset;
|
||||
m_bitmap[bitmapIndex] |= mask;
|
||||
}
|
||||
|
||||
|
@ -638,13 +660,48 @@ bool SatoriRegion::IsExposed(SatoriObject** location)
|
|||
// check the mark bit corresponding to the location
|
||||
//same as: return ((SatoriObject*)location)->IsMarked();
|
||||
|
||||
size_t word = (size_t)location;
|
||||
size_t bitmapIndex = (word >> 9) & (SatoriRegion::BITMAP_LENGTH - 1);
|
||||
size_t mask = (size_t)1 << ((word >> 3) & 63);
|
||||
size_t bitmapIndex;
|
||||
int offset = ((SatoriObject*)location)->GetMarkBitAndWord(&bitmapIndex);
|
||||
|
||||
size_t mask = (size_t)1 << offset;
|
||||
return m_bitmap[bitmapIndex] & mask;
|
||||
}
|
||||
|
||||
bool SatoriRegion::AnyExposed(size_t first, size_t length)
|
||||
{
|
||||
_ASSERTE(length % 8 == 0);
|
||||
|
||||
size_t last = first + length - sizeof(size_t);
|
||||
_ASSERTE(((SatoriObject*)first)->ContainingRegion() == this);
|
||||
_ASSERTE(((SatoriObject*)last)->ContainingRegion() == this);
|
||||
|
||||
size_t bitmapIndexF;
|
||||
size_t maskF = (size_t)-1 << ((SatoriObject*)first)->GetMarkBitAndWord(&bitmapIndexF);
|
||||
|
||||
size_t bitmapIndexL;
|
||||
size_t maskL = (size_t)-1 >> (63 - ((SatoriObject*)last)->GetMarkBitAndWord(&bitmapIndexL));
|
||||
|
||||
if (bitmapIndexF == bitmapIndexL)
|
||||
{
|
||||
return m_bitmap[bitmapIndexF] & maskF & maskL;
|
||||
}
|
||||
|
||||
if (m_bitmap[bitmapIndexF] & maskF)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
for (size_t i = bitmapIndexF + 1; i < bitmapIndexL; i++)
|
||||
{
|
||||
if (m_bitmap[i])
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return m_bitmap[bitmapIndexL] & maskL;
|
||||
}
|
||||
|
||||
void SatoriRegion::EscapeRecursively(SatoriObject* o)
|
||||
{
|
||||
_ASSERTE(this->OwnedByCurrentThread());
|
||||
|
@ -779,7 +836,7 @@ void SatoriRegion::ThreadLocalMark()
|
|||
obj->Validate();
|
||||
|
||||
// skip the object
|
||||
markBitOffset = obj->Next()->GetMarkBitAndOffset(&bitmapIndex);
|
||||
markBitOffset = obj->Next()->GetMarkBitAndWord(&bitmapIndex);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1093,7 +1150,7 @@ void SatoriRegion::ThreadLocalUpdatePointers()
|
|||
}
|
||||
|
||||
// skip the object
|
||||
markBitOffset = obj->Next()->GetMarkBitAndOffset(&bitmapIndex);
|
||||
markBitOffset = obj->Next()->GetMarkBitAndWord(&bitmapIndex);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1363,12 +1420,12 @@ SatoriObject* SatoriRegion::SkipUnmarked(SatoriObject* from)
|
|||
{
|
||||
_ASSERTE(from->Start() < End());
|
||||
size_t bitmapIndex;
|
||||
int markBitOffset = from->GetMarkBitAndOffset(&bitmapIndex);
|
||||
int markBitOffset = from->GetMarkBitAndWord(&bitmapIndex);
|
||||
|
||||
DWORD offset;
|
||||
if (BitScanForward64(&offset, m_bitmap[bitmapIndex] >> markBitOffset))
|
||||
{
|
||||
// got reachable object.
|
||||
// got mark bit
|
||||
markBitOffset += offset;
|
||||
}
|
||||
else
|
||||
|
@ -1379,7 +1436,7 @@ SatoriObject* SatoriRegion::SkipUnmarked(SatoriObject* from)
|
|||
bitmapIndex++;
|
||||
if (BitScanForward64(&offset, m_bitmap[bitmapIndex]))
|
||||
{
|
||||
// got reachable object.
|
||||
// got mark bit
|
||||
markBitOffset = offset;
|
||||
break;
|
||||
}
|
||||
|
@ -1393,12 +1450,12 @@ SatoriObject* SatoriRegion::SkipUnmarked(SatoriObject* from, size_t upTo)
|
|||
{
|
||||
_ASSERTE(from->Start() < End());
|
||||
size_t bitmapIndex;
|
||||
int markBitOffset = from->GetMarkBitAndOffset(&bitmapIndex);
|
||||
int markBitOffset = from->GetMarkBitAndWord(&bitmapIndex);
|
||||
|
||||
DWORD offset;
|
||||
if (BitScanForward64(&offset, m_bitmap[bitmapIndex] >> markBitOffset))
|
||||
{
|
||||
// got reachable object.
|
||||
// got mark bit
|
||||
markBitOffset += offset;
|
||||
}
|
||||
else
|
||||
|
@ -1411,7 +1468,7 @@ SatoriObject* SatoriRegion::SkipUnmarked(SatoriObject* from, size_t upTo)
|
|||
bitmapIndex++;
|
||||
if (BitScanForward64(&offset, m_bitmap[bitmapIndex]))
|
||||
{
|
||||
// got reachable object.
|
||||
// got mark bit
|
||||
markBitOffset = offset;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -33,10 +33,11 @@ public:
|
|||
void WipeCards();
|
||||
|
||||
SatoriRegion* Split(size_t regionSize);
|
||||
bool CanCoalesce(SatoriRegion* other);
|
||||
void TryCoalesceWithNext();
|
||||
void Coalesce(SatoriRegion* next);
|
||||
|
||||
void TryDecommit();
|
||||
|
||||
size_t AllocStart();
|
||||
size_t AllocRemaining();
|
||||
size_t MaxAllocEstimate();
|
||||
|
@ -74,6 +75,7 @@ public:
|
|||
void UpdatePointers();
|
||||
|
||||
bool IsExposed(SatoriObject** location);
|
||||
bool AnyExposed(size_t from, size_t length);
|
||||
void EscapeRecursively(SatoriObject* obj);
|
||||
|
||||
void EscapeShallow(SatoriObject* o);
|
||||
|
|
|
@ -58,14 +58,15 @@ SatoriRegion* SatoriRegionQueue::TryPopWithSize(size_t regionSize, SatoriRegion*
|
|||
|
||||
m_lock.Leave();
|
||||
|
||||
if (result->Size() > regionSize)
|
||||
{
|
||||
// if there is a diff split it off and put back to appropriate queue.
|
||||
putBack = result->Split(result->Size() - regionSize);
|
||||
}
|
||||
|
||||
_ASSERTE(result->m_prev == nullptr);
|
||||
result->m_next = nullptr;
|
||||
|
||||
if (result->Size() > regionSize)
|
||||
{
|
||||
// if there is a diff split what is needed and put the rest back to appropriate queue.
|
||||
putBack = result;
|
||||
result = putBack->Split(regionSize);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -131,14 +132,15 @@ SatoriRegion* SatoriRegionQueue::TryRemoveWithSize(size_t regionSize, SatoriRegi
|
|||
|
||||
m_lock.Leave();
|
||||
|
||||
if (result->Size() > regionSize)
|
||||
{
|
||||
// if there is a diff split it off and put back to appropriate queue.
|
||||
putBack = result->Split(result->Size() - regionSize);
|
||||
}
|
||||
|
||||
result->m_prev = nullptr;
|
||||
result->m_next = nullptr;
|
||||
|
||||
if (result->Size() > regionSize)
|
||||
{
|
||||
// if there is a diff split what is needed and put the rest back to appropriate queue.
|
||||
putBack = result;
|
||||
result = putBack->Split(regionSize);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
|
@ -1324,50 +1324,70 @@ void CheckEscapeSatori(Object** dst, Object* ref)
|
|||
}
|
||||
}
|
||||
|
||||
void CheckEscapeSatoriRange(void* dst, size_t src, size_t len)
|
||||
void CheckEscapeSatoriRange(size_t dst, size_t src, size_t len)
|
||||
{
|
||||
SatoriPage* page = PageForAddressCheckedSatori((void*)src);
|
||||
if (!page)
|
||||
SatoriRegion* curRegion = (SatoriRegion * )GCToEEInterface::GetAllocContext()->gc_reserved_1;
|
||||
if (!curRegion || !curRegion->IsThreadLocal())
|
||||
{
|
||||
// not tracking escapes
|
||||
return;
|
||||
}
|
||||
|
||||
SatoriRegion* srcRegion = page->RegionForAddressChecked((size_t)src);
|
||||
if (!srcRegion->OwnedByCurrentThread())
|
||||
{
|
||||
return;
|
||||
}
|
||||
_ASSERTE(curRegion->OwnedByCurrentThread());
|
||||
|
||||
// TODO: VS the following IsEscaped checks could be done faster by scanning bitmaps
|
||||
|
||||
// if move is within a region, check if the dest is escaped.
|
||||
if ((((size_t)dst ^ (size_t)src) >> 21) == 0)
|
||||
// if dst is within the current region and is not exposed, we are done
|
||||
if (((dst ^ curRegion->Start()) >> 21) == 0)
|
||||
{
|
||||
SatoriObject* containingDstObj = srcRegion->FindObject((size_t)dst);
|
||||
if (!containingDstObj->IsEscaped())
|
||||
if (!curRegion->AnyExposed(dst, len))
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
SatoriObject* containingSrcObj = srcRegion->FindObject((size_t)src);
|
||||
if (containingSrcObj->IsEscaped())
|
||||
if (!PageForAddressCheckedSatori((void*)dst))
|
||||
{
|
||||
// dest not in heap
|
||||
return;
|
||||
}
|
||||
|
||||
containingSrcObj->ForEachObjectRef(
|
||||
[&](SatoriObject** ref)
|
||||
if (((src ^ curRegion->Start()) >> 21) == 0)
|
||||
{
|
||||
// if src is already escaped, we are done
|
||||
if (!curRegion->AnyExposed(src, len))
|
||||
{
|
||||
SatoriObject* child = *ref;
|
||||
if (child->ContainingRegion() == srcRegion)
|
||||
{
|
||||
srcRegion->EscapeRecursively(child);
|
||||
}
|
||||
},
|
||||
src,
|
||||
src + len
|
||||
);
|
||||
SatoriObject* containingSrcObj = curRegion->FindObject(src);
|
||||
containingSrcObj->ForEachObjectRef(
|
||||
[&](SatoriObject** ref)
|
||||
{
|
||||
SatoriObject* child = *ref;
|
||||
if (child->ContainingRegion() == curRegion)
|
||||
{
|
||||
curRegion->EscapeRecursively(child);
|
||||
}
|
||||
},
|
||||
src,
|
||||
src + len
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (PageForAddressCheckedSatori((void*)src))
|
||||
{
|
||||
// src is not in current region but in heap, it can't escape anything that belong to current thread
|
||||
return;
|
||||
}
|
||||
|
||||
// very rare case where we are copying refs out of non-heap area like stack or native heap.
|
||||
// we do not have a containing type and that would be somewhat inconvenient.
|
||||
// one way to handle this is by concervatively escaping any value that matches an unescaped pointer in curRegion.
|
||||
//
|
||||
// in practice, while theoretically possible, I do not know a code path that could lead here.
|
||||
// as a particular concern, boxing copy typically uses a newly allocated and not yet escaped target.
|
||||
//
|
||||
// in case if this is reachable we will simply stop tracking if this ever occurs.
|
||||
_ASSERTE(!"escaping by copying from outside of heap, we can handle this, but it is unexpected");
|
||||
curRegion->StopEscapeTracking();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ void ErectWriteBarrier(OBJECTREF* dst, OBJECTREF ref);
|
|||
#if FEATURE_SATORI_GC
|
||||
bool IsInHeapSatori(void* ptr);
|
||||
void CheckEscapeSatori(Object** dst, Object* ref);
|
||||
void CheckEscapeSatoriRange(void* dst, size_t src, size_t len);
|
||||
void CheckEscapeSatoriRange(size_t dst, size_t src, size_t len);
|
||||
#endif
|
||||
|
||||
void SetCardsAfterBulkCopy(Object** dst, Object **src, size_t len);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue