1
0
Fork 0
mirror of https://github.com/VSadov/Satori.git synced 2025-06-09 17:44:48 +09:00
This commit is contained in:
vsadov 2020-12-18 12:40:23 -08:00
parent ecc315e564
commit 5b0908787b
15 changed files with 326 additions and 175 deletions

View file

@ -311,7 +311,7 @@ FORCEINLINE void InlinedMemmoveGCRefsHelper(void *dest, const void *src, size_t
if (len >= sizeof(size_t)) if (len >= sizeof(size_t))
{ {
CheckEscapeSatoriRange(dest, (size_t)src, len); CheckEscapeSatoriRange((size_t)dest, (size_t)src, len);
} }
// To be able to copy forwards, the destination buffer cannot start inside the source buffer // To be able to copy forwards, the destination buffer cannot start inside the source buffer

View file

@ -56,7 +56,6 @@ void GCScan::GcDhInitialScan(promote_func* fn, int condemned, int max_gen, ScanC
pDhContext->m_pfnPromoteFunction = fn; pDhContext->m_pfnPromoteFunction = fn;
pDhContext->m_iCondemned = condemned; pDhContext->m_iCondemned = condemned;
pDhContext->m_iMaxGen = max_gen; pDhContext->m_iMaxGen = max_gen;
pDhContext->m_pScanContext = sc;
// Look for dependent handle whose primary has been promoted but whose secondary has not. Promote the // Look for dependent handle whose primary has been promoted but whose secondary has not. Promote the
// secondary in those cases. Additionally this scan sets the m_fUnpromotedPrimaries and m_fPromoted state // secondary in those cases. Additionally this scan sets the m_fUnpromotedPrimaries and m_fPromoted state

View file

@ -1182,7 +1182,9 @@ void Ref_CheckReachable(uint32_t condemned, uint32_t maxgen, uintptr_t lp1)
DhContext *Ref_GetDependentHandleContext(ScanContext* sc) DhContext *Ref_GetDependentHandleContext(ScanContext* sc)
{ {
WRAPPER_NO_CONTRACT; WRAPPER_NO_CONTRACT;
return &g_pDependentHandleContexts[getSlotNumber(sc)]; DhContext* dhc = &g_pDependentHandleContexts[getSlotNumber(sc)];
dhc->m_pScanContext = sc;
return dhc;
} }
// Scan the dependent handle table promoting any secondary object whose associated primary object is promoted. // Scan the dependent handle table promoting any secondary object whose associated primary object is promoted.

View file

@ -102,11 +102,13 @@ tryAgain:
return nullptr; return nullptr;
} }
//TODO: VS when Return and when Add?
void SatoriAllocator::ReturnRegion(SatoriRegion* region) void SatoriAllocator::ReturnRegion(SatoriRegion* region)
{ {
//TUNING: is this too aggressive, or should coalesce with prev too? //TUNING: is this too aggressive?
region->TryCoalesceWithNext(); region->TryCoalesceWithNext();
region->SetGeneration(-1); region->SetGeneration(-1);
region->TryDecommit();
// TODO: VS select by current core // TODO: VS select by current core
m_queues[SizeToBucket(region->Size())]->Push(region); m_queues[SizeToBucket(region->Size())]->Push(region);
@ -114,9 +116,10 @@ void SatoriAllocator::ReturnRegion(SatoriRegion* region)
void SatoriAllocator::AddRegion(SatoriRegion* region) void SatoriAllocator::AddRegion(SatoriRegion* region)
{ {
//TUNING: is this too aggressive, or should coalesce with prev too? //TUNING: is this too aggressive?
region->TryCoalesceWithNext(); region->TryCoalesceWithNext();
region->SetGeneration(-1); region->SetGeneration(-1);
region->TryDecommit();
// TODO: VS select by current core // TODO: VS select by current core
m_queues[SizeToBucket(region->Size())]->Enqueue(region); m_queues[SizeToBucket(region->Size())]->Enqueue(region);
@ -212,8 +215,7 @@ SatoriObject* SatoriAllocator::AllocRegular(SatoriAllocationContext* context, si
} }
// try get from the free list // try get from the free list
size_t desiredFreeSpace = max(size + Satori::MIN_FREE_SIZE, Satori::MIN_REGULAR_ALLOC); if (region->StartAllocating(size))
if (region->StartAllocating(desiredFreeSpace))
{ {
// we have enough free space in the region to continue // we have enough free space in the region to continue
context->alloc_ptr = context->alloc_limit = (uint8_t*)region->AllocStart(); context->alloc_ptr = context->alloc_limit = (uint8_t*)region->AllocStart();
@ -237,7 +239,7 @@ SatoriObject* SatoriAllocator::AllocRegular(SatoriAllocationContext* context, si
{ {
// perform thread local collection and see if we have enough space after that. // perform thread local collection and see if we have enough space after that.
region->ThreadLocalCollect(); region->ThreadLocalCollect();
if (region->StartAllocating(desiredFreeSpace)) if (region->StartAllocating(size))
{ {
// we have enough free space in the region to continue // we have enough free space in the region to continue
context->alloc_ptr = context->alloc_limit = (uint8_t*)region->AllocStart(); context->alloc_ptr = context->alloc_limit = (uint8_t*)region->AllocStart();

View file

@ -42,7 +42,7 @@ public:
void ClearPinnedAndMarked(); void ClearPinnedAndMarked();
bool IsEscaped(); bool IsEscaped();
bool IsEscapedOrPinned(); bool IsEscapedOrPinned();
int GetMarkBitAndOffset(size_t* bitmapIndex); int GetMarkBitAndWord(size_t* bitmapIndex);
void SetPermanentlyPinned(); void SetPermanentlyPinned();
bool IsPermanentlyPinned(); bool IsPermanentlyPinned();

View file

@ -198,7 +198,7 @@ inline void SatoriObject::CleanSyncBlock()
((size_t*)this)[-1] = 0; ((size_t*)this)[-1] = 0;
} }
inline int SatoriObject::GetMarkBitAndOffset(size_t* bitmapIndex) inline int SatoriObject::GetMarkBitAndWord(size_t* bitmapIndex)
{ {
size_t start = Start(); size_t start = Start();
*bitmapIndex = (start >> 9) & (SatoriRegion::BITMAP_LENGTH - 1); // % words in the bitmap *bitmapIndex = (start >> 9) & (SatoriRegion::BITMAP_LENGTH - 1); // % words in the bitmap
@ -207,12 +207,12 @@ inline int SatoriObject::GetMarkBitAndOffset(size_t* bitmapIndex)
inline void SatoriObject::SetPermanentlyPinned() inline void SatoriObject::SetPermanentlyPinned()
{ {
GetHeader()->SetGCBit(); ((DWORD*)this)[-1] |= BIT_SBLK_GC_RESERVE;
} }
inline bool SatoriObject::IsPermanentlyPinned() inline bool SatoriObject::IsPermanentlyPinned()
{ {
return GetHeader()->GetBits() & BIT_SBLK_GC_RESERVE; return ((DWORD*)this)[-1] & BIT_SBLK_GC_RESERVE;
} }
template<typename F> template<typename F>

View file

@ -76,19 +76,6 @@ void SatoriPage::RegionInitialized(SatoriRegion* region)
} }
} }
void SatoriPage::RegionDestroyed(SatoriRegion* region)
{
_ASSERTE((size_t)region > Start() && (size_t)region < End());
size_t startIndex = (region->Start() - Start()) >> Satori::REGION_BITS;
size_t mapCount = region->Size() >> Satori::REGION_BITS;
for (int i = 0; i < mapCount; i++)
{
DWORD log2;
BitScanReverse(&log2, i);
RegionMap()[startIndex + i] = (uint8_t)(log2 + 2);
}
}
SatoriRegion* SatoriPage::RegionForAddress(size_t address) SatoriRegion* SatoriPage::RegionForAddress(size_t address)
{ {
_ASSERTE(address >= Start() && address < End()); _ASSERTE(address >= Start() && address < End());
@ -126,12 +113,6 @@ SatoriRegion* SatoriPage::NextInPage(SatoriRegion* region)
return nullptr; return nullptr;
} }
size_t mapIndex = (address - Start()) >> Satori::REGION_BITS;
if (RegionMap()[mapIndex] == 0)
{
return nullptr;
}
return (SatoriRegion*)address; return (SatoriRegion*)address;
} }

View file

@ -25,7 +25,6 @@ public:
SatoriRegion* MakeInitialRegion(); SatoriRegion* MakeInitialRegion();
void RegionInitialized(SatoriRegion* region); void RegionInitialized(SatoriRegion* region);
void RegionDestroyed(SatoriRegion* region);
SatoriRegion* RegionForAddress(size_t address); SatoriRegion* RegionForAddress(size_t address);
SatoriRegion* RegionForAddressChecked(size_t address); SatoriRegion* RegionForAddressChecked(size_t address);

View file

@ -55,6 +55,9 @@ void SatoriRecycler::Initialize(SatoriHeap* heap)
m_gen1Count = m_gen2Count = 0; m_gen1Count = m_gen2Count = 0;
m_condemnedGeneration = 0; m_condemnedGeneration = 0;
m_gen1Threshold = 5;
m_gen1Budget = 0;
} }
// not interlocked. this is not done concurrently. // not interlocked. this is not done concurrently.
@ -89,10 +92,19 @@ int SatoriRecycler::CondemnedGeneration()
return m_condemnedGeneration; return m_condemnedGeneration;
} }
int SatoriRecycler::Gen1RegionCount()
{
return m_ephemeralFinalizationTrackingRegions->Count() + m_ephemeralRegions->Count();
}
int SatoriRecycler::Gen2RegionCount()
{
return m_tenuredFinalizationTrackingRegions->Count() + m_tenuredRegions->Count();
}
int SatoriRecycler::RegionCount() int SatoriRecycler::RegionCount()
{ {
return m_ephemeralFinalizationTrackingRegions->Count() + m_ephemeralRegions->Count() + return Gen1RegionCount() + Gen2RegionCount();
m_tenuredFinalizationTrackingRegions->Count() + m_tenuredRegions->Count();
} }
void SatoriRecycler::AddEphemeralRegion(SatoriRegion* region) void SatoriRecycler::AddEphemeralRegion(SatoriRegion* region)
@ -116,9 +128,9 @@ void SatoriRecycler::AddEphemeralRegion(SatoriRegion* region)
// TODO: VS this should be moved to heuristics. // TODO: VS this should be moved to heuristics.
void SatoriRecycler::MaybeTriggerGC() void SatoriRecycler::MaybeTriggerGC()
{ {
int count = RegionCount(); int count1 = Gen1RegionCount();
if (count - m_prevRegionCount > 10) if (count1 > m_gen1Threshold)
{ {
if (m_gcInProgress) if (m_gcInProgress)
{ {
@ -126,13 +138,40 @@ void SatoriRecycler::MaybeTriggerGC()
} }
else if (Interlocked::CompareExchange(&m_gcInProgress, 1, 0) == 0) else if (Interlocked::CompareExchange(&m_gcInProgress, 1, 0) == 0)
{ {
// for now just do every 16th scan (every 8 global GCs) // for now just do every 16th Gen1
int generation = (m_scanCount + 1) % 16 == 0 ? 2 : 1; // int generation = (m_gen1Count + 1) % 16 == 0 ? 2 : 1;
Collect(generation, /*force*/ false); // Collect(generation, /*force*/ false);
if (m_gen1Budget <= 0)
{
Collect2();
}
else
{
Collect1();
}
} }
} }
} }
NOINLINE
void SatoriRecycler::Collect1()
{
Collect(1, false);
m_condemnedGeneration = 0;
m_gcInProgress = false;
}
NOINLINE
void SatoriRecycler::Collect2()
{
Collect(2, false);
m_condemnedGeneration = 0;
m_gcInProgress = false;
}
void SatoriRecycler::Collect(int generation, bool force) void SatoriRecycler::Collect(int generation, bool force)
{ {
bool wasCoop = GCToEEInterface::EnablePreemptiveGC(); bool wasCoop = GCToEEInterface::EnablePreemptiveGC();
@ -144,24 +183,41 @@ void SatoriRecycler::Collect(int generation, bool force)
// become coop again (it will not block since VM is done suspending) // become coop again (it will not block since VM is done suspending)
GCToEEInterface::DisablePreemptiveGC(); GCToEEInterface::DisablePreemptiveGC();
int count = RegionCount(); int count1 = Gen1RegionCount();
if (count - m_prevRegionCount > 10 || force) if (count1 > m_gen1Threshold || force)
{ {
m_condemnedGeneration = generation; m_condemnedGeneration = generation;
m_isCompacting = true;
DeactivateAllStacks(); DeactivateAllStacks();
m_condemnedRegionsCount = m_condemnedGeneration == 2 ?
RegionCount() :
Gen1RegionCount();
Mark(); Mark();
Sweep(); Sweep();
Compact(); Compact();
UpdatePointers(); UpdatePointers();
m_gen1Count++; m_gen1Count++;
if (m_condemnedGeneration == 2) if (m_condemnedGeneration == 2)
{ {
m_gen2Count++; m_gen2Count++;
} }
// TODO: update stats and heuristics. // TODO: update stats and heuristics.
m_prevRegionCount = RegionCount(); if (m_condemnedGeneration == 2)
{
m_gen1Budget = Gen2RegionCount();
}
else
{
m_gen1Budget -= Gen1RegionCount();
}
m_gen1Threshold = Gen1RegionCount() + max(5, Gen2RegionCount() / 4);
} }
m_condemnedGeneration = 0; m_condemnedGeneration = 0;
@ -464,7 +520,7 @@ void SatoriRecycler::DrainMarkQueues()
} }
}, },
/* includeCollectibleAllocator */ true /* includeCollectibleAllocator */ true
); );
} }
// done with srcChunk // done with srcChunk
@ -867,8 +923,6 @@ void SatoriRecycler::Sweep()
void SatoriRecycler::SweepRegions(SatoriRegionQueue* regions) void SatoriRecycler::SweepRegions(SatoriRegionQueue* regions)
{ {
bool compacting = true;
SatoriRegion* curRegion; SatoriRegion* curRegion;
while (curRegion = regions->TryPop()) while (curRegion = regions->TryPop())
{ {
@ -885,7 +939,7 @@ void SatoriRecycler::SweepRegions(SatoriRegionQueue* regions)
{ {
_ASSERTE(curRegion->Generation() != 2); _ASSERTE(curRegion->Generation() != 2);
// when not compacting, gen1 GC does not need to sweep. // when not compacting, gen1 GC does not need to sweep.
canRecycle = compacting ? canRecycle = m_isCompacting ?
curRegion->Sweep(/*turnMarkedIntoEscaped*/ false) : curRegion->Sweep(/*turnMarkedIntoEscaped*/ false) :
curRegion->NothingMarked(); curRegion->NothingMarked();
} }
@ -898,7 +952,7 @@ void SatoriRecycler::SweepRegions(SatoriRegionQueue* regions)
else else
{ {
// if not compacting, we are done here // if not compacting, we are done here
if (!compacting) if (!m_isCompacting)
{ {
m_stayingRegions->Push(curRegion); m_stayingRegions->Push(curRegion);
continue; continue;
@ -937,25 +991,41 @@ void SatoriRecycler::AddRelocationTarget(SatoriRegion* region)
void SatoriRecycler::Compact() void SatoriRecycler::Compact()
{ {
if (m_relocatingRegions->Count() < m_condemnedRegionsCount / 2)
{
m_isCompacting = false;
}
SatoriRegion* curRegion; SatoriRegion* curRegion;
while (curRegion = m_relocatingRegions->TryPop()) while (curRegion = m_relocatingRegions->TryPop())
{ {
RelocateRegion(curRegion); if (m_isCompacting)
{
RelocateRegion(curRegion);
}
else
{
m_stayingRegions->Push(curRegion);
}
} }
} }
SatoriRegion* SatoriRecycler::TryGetRelocationTarget(size_t minSize, bool existingRegionOnly) SatoriRegion* SatoriRecycler::TryGetRelocationTarget(size_t allocSize, bool existingRegionOnly)
{ {
//make this occasionally fail in debug to be sure we can handle low memory case. //make this occasionally fail in debug to be sure we can handle low memory case.
#if _DEBUG #if _DEBUG
if (minSize % 1024 == 0) if (allocSize % 1024 == 0)
{ {
return nullptr; return nullptr;
} }
#endif #endif
DWORD bucket; DWORD bucket;
BitScanReverse64(&bucket, minSize); BitScanReverse64(&bucket, allocSize);
// we could search through this bucket, which may have a large enough obj,
// but we will just use the next queue, which guarantees it fits
bucket++;
bucket = bucket > Satori::MIN_FREELIST_SIZE_BITS ? bucket = bucket > Satori::MIN_FREELIST_SIZE_BITS ?
bucket - Satori::MIN_FREELIST_SIZE_BITS : bucket - Satori::MIN_FREELIST_SIZE_BITS :
@ -972,7 +1042,8 @@ SatoriRegion* SatoriRecycler::TryGetRelocationTarget(size_t minSize, bool existi
SatoriRegion* region = queue->TryPop(); SatoriRegion* region = queue->TryPop();
if (region) if (region)
{ {
region->StartAllocating(minSize); size_t allocStart = region->StartAllocating(allocSize);
_ASSERTE(allocStart);
return region; return region;
} }
} }
@ -983,7 +1054,7 @@ SatoriRegion* SatoriRecycler::TryGetRelocationTarget(size_t minSize, bool existi
return nullptr; return nullptr;
} }
SatoriRegion* newRegion = m_heap->Allocator()->GetRegion(ALIGN_UP(minSize, Satori::REGION_SIZE_GRANULARITY)); SatoriRegion* newRegion = m_heap->Allocator()->GetRegion(ALIGN_UP(allocSize, Satori::REGION_SIZE_GRANULARITY));
if (newRegion) if (newRegion)
{ {
newRegion->SetGeneration(m_condemnedGeneration); newRegion->SetGeneration(m_condemnedGeneration);
@ -1108,43 +1179,46 @@ void SatoriRecycler::UpdateFn(PTR_PTR_Object ppObject, ScanContext* sc, uint32_t
void SatoriRecycler::UpdatePointers() void SatoriRecycler::UpdatePointers()
{ {
ScanContext sc; if (m_isCompacting)
sc.promotion = FALSE;
MarkContext c = MarkContext(this);
sc._unused1 = &c;
//TODO: VS there should be only one thread with "thread_number == 0"
//TODO: VS implement two-pass scheme with preferred vs. any stacks
IncrementScanCount();
//generations are meaningless here, so we pass -1
GCToEEInterface::GcScanRoots(UpdateFn, -1, -1, &sc);
// concurrent, per thread/heap
// relies on thread_number to select handle buckets and specialcases #0
GCScan::GcScanHandles(UpdateFn, m_condemnedGeneration, 2, &sc);
_ASSERTE(c.m_markChunk == nullptr);
// update refs in finalization queue
if (m_heap->FinalizationQueue()->HasItems())
{ {
// add finalization queue to mark list ScanContext sc;
m_heap->FinalizationQueue()->ForEachObjectRef( sc.promotion = FALSE;
[&](SatoriObject** ppObject) MarkContext c = MarkContext(this);
{ sc._unused1 = &c;
SatoriObject* o = *ppObject;
ptrdiff_t ptr = ((ptrdiff_t*)o)[-1]; //TODO: VS there should be only one thread with "thread_number == 0"
if (ptr < 0) //TODO: VS implement two-pass scheme with preferred vs. any stacks
IncrementScanCount();
//generations are meaningless here, so we pass -1
GCToEEInterface::GcScanRoots(UpdateFn, -1, -1, &sc);
// concurrent, per thread/heap
// relies on thread_number to select handle buckets and specialcases #0
GCScan::GcScanHandles(UpdateFn, m_condemnedGeneration, 2, &sc);
_ASSERTE(c.m_markChunk == nullptr);
// update refs in finalization queue
if (m_heap->FinalizationQueue()->HasItems())
{
// add finalization queue to mark list
m_heap->FinalizationQueue()->ForEachObjectRef(
[&](SatoriObject** ppObject)
{ {
*ppObject = (SatoriObject*)-ptr; SatoriObject* o = *ppObject;
ptrdiff_t ptr = ((ptrdiff_t*)o)[-1];
if (ptr < 0)
{
*ppObject = (SatoriObject*)-ptr;
}
} }
} );
); }
}
if (m_condemnedGeneration != 2) if (m_condemnedGeneration != 2)
{ {
UpdatePointersThroughCards(); UpdatePointersThroughCards();
}
} }
// return target regions // return target regions
@ -1171,7 +1245,11 @@ void SatoriRecycler::UpdatePointersInRegions(SatoriRegionQueue* queue)
SatoriRegion* curRegion; SatoriRegion* curRegion;
while (curRegion = queue->TryPop()) while (curRegion = queue->TryPop())
{ {
curRegion->UpdatePointers(); if (m_isCompacting)
{
curRegion->UpdatePointers();
}
if (curRegion->Generation() == 0) if (curRegion->Generation() == 0)
{ {
continue; continue;

View file

@ -25,25 +25,29 @@ public:
void AddEphemeralRegion(SatoriRegion* region); void AddEphemeralRegion(SatoriRegion* region);
void MaybeTriggerGC(); void MaybeTriggerGC();
void Collect1();
void Collect2();
int GetScanCount(); int GetScanCount();
int64_t GetCollectionCount(int gen); int64_t GetCollectionCount(int gen);
int CondemnedGeneration(); int CondemnedGeneration();
int Gen1RegionCount();
int Gen2RegionCount();
void Collect(int generation, bool force); void Collect(int generation, bool force);
private: private:
SatoriHeap* m_heap; SatoriHeap* m_heap;
// used to ensure each thread is scanned once per scan round. // used to ensure each thread is scanned once per scan round.
int m_scanCount; int m_scanCount;
int64_t m_gen1Count;
int64_t m_gen2Count;
int m_condemnedGeneration; int m_condemnedGeneration;
bool m_isCompacting;
// region count at the end of last GC, used in a crude GC triggering heuristic.
int m_prevRegionCount;
int m_gcInProgress; int m_gcInProgress;
SatoriMarkChunkQueue* m_workList;
// temporary store for Gen0 regions // temporary store for Gen0 regions
SatoriRegionQueue* m_nurseryRegions; SatoriRegionQueue* m_nurseryRegions;
SatoriRegionQueue* m_ephemeralRegions; SatoriRegionQueue* m_ephemeralRegions;
@ -62,7 +66,12 @@ private:
SatoriRegionQueue* m_relocationTargets[Satori::FREELIST_COUNT]; SatoriRegionQueue* m_relocationTargets[Satori::FREELIST_COUNT];
SatoriRegionQueue* m_relocatedRegions; SatoriRegionQueue* m_relocatedRegions;
SatoriMarkChunkQueue* m_workList; int64_t m_gen1Count;
int64_t m_gen2Count;
int m_gen1Threshold;
int m_gen1Budget;
int m_condemnedRegionsCount;
static void DeactivateFn(gc_alloc_context* context, void* param); static void DeactivateFn(gc_alloc_context* context, void* param);
static void MarkFn(PTR_PTR_Object ppObject, ScanContext* sc, uint32_t flags); static void MarkFn(PTR_PTR_Object ppObject, ScanContext* sc, uint32_t flags);

View file

@ -11,6 +11,10 @@
#include "../env/gcenv.os.h" #include "../env/gcenv.os.h"
#include "../env/gcenv.ee.h" #include "../env/gcenv.ee.h"
#if !defined(_DEBUG)
//#pragma optimize("gty", on)
#endif
#include "SatoriGC.h" #include "SatoriGC.h"
#include "SatoriAllocator.h" #include "SatoriAllocator.h"
#include "SatoriRecycler.h" #include "SatoriRecycler.h"
@ -24,10 +28,6 @@
#include "SatoriQueue.h" #include "SatoriQueue.h"
#include "SatoriMarkChunk.h" #include "SatoriMarkChunk.h"
#if !defined(_DEBUG)
// #pragma optimize("gty", on)
#endif
SatoriRegion* SatoriRegion::InitializeAt(SatoriPage* containingPage, size_t address, size_t regionSize, size_t committed, size_t used) SatoriRegion* SatoriRegion::InitializeAt(SatoriPage* containingPage, size_t address, size_t regionSize, size_t committed, size_t used)
{ {
_ASSERTE(used <= committed); _ASSERTE(used <= committed);
@ -201,14 +201,16 @@ static const int FREE_LIST_NEXT_OFFSET = sizeof(ArrayBase);
void SatoriRegion::AddFreeSpace(SatoriObject* freeObj) void SatoriRegion::AddFreeSpace(SatoriObject* freeObj)
{ {
size_t size = freeObj->Size(); // allocSize is smaller than size to make sure the span can always be made parseable
if (size < Satori::MIN_FREELIST_SIZE) // after allocating objects in it.
ptrdiff_t allocSize = freeObj->Size() - Satori::MIN_FREE_SIZE;
if (allocSize < Satori::MIN_FREELIST_SIZE)
{ {
return; return;
} }
DWORD bucket; DWORD bucket;
BitScanReverse64(&bucket, size); BitScanReverse64(&bucket, allocSize);
bucket -= (Satori::MIN_FREELIST_SIZE_BITS); bucket -= (Satori::MIN_FREELIST_SIZE_BITS);
_ASSERTE(bucket >= 0); _ASSERTE(bucket >= 0);
_ASSERTE(bucket < Satori::FREELIST_COUNT); _ASSERTE(bucket < Satori::FREELIST_COUNT);
@ -217,17 +219,17 @@ void SatoriRegion::AddFreeSpace(SatoriObject* freeObj)
m_freeLists[bucket] = freeObj; m_freeLists[bucket] = freeObj;
} }
size_t SatoriRegion::StartAllocating(size_t minSize) size_t SatoriRegion::StartAllocating(size_t minAllocSize)
{ {
_ASSERTE(!IsAllocating()); _ASSERTE(!IsAllocating());
DWORD bucket; DWORD bucket;
BitScanReverse64(&bucket, minSize); BitScanReverse64(&bucket, minAllocSize);
// when minSize is not a power of two we could search through the current bucket, // when minAllocSize is not a power of two we could search through the current bucket,
// which may have a large enough obj, // which may have a large enough obj,
// but we will just use the next bucket, which guarantees it fits // but we will just use the next bucket, which guarantees it fits
if (minSize & (minSize - 1)) if (minAllocSize & (minAllocSize - 1))
{ {
bucket++; bucket++;
} }
@ -244,6 +246,7 @@ size_t SatoriRegion::StartAllocating(size_t minSize)
m_freeLists[bucket] = *(SatoriObject**)(freeObj->Start() + FREE_LIST_NEXT_OFFSET); m_freeLists[bucket] = *(SatoriObject**)(freeObj->Start() + FREE_LIST_NEXT_OFFSET);
m_allocStart = freeObj->Start(); m_allocStart = freeObj->Start();
m_allocEnd = freeObj->End(); m_allocEnd = freeObj->End();
_ASSERTE(AllocRemaining() >= minAllocSize);
return m_allocStart; return m_allocStart;
} }
} }
@ -263,7 +266,7 @@ size_t SatoriRegion::MaxAllocEstimate()
{ {
if (m_freeLists[bucket]) if (m_freeLists[bucket])
{ {
maxRemaining = max(maxRemaining, ((size_t)1 << (bucket + Satori::MIN_FREELIST_SIZE_BITS)) - Satori::MIN_FREE_SIZE); maxRemaining = max(maxRemaining, ((size_t)1 << (bucket + Satori::MIN_FREELIST_SIZE_BITS)));
} }
} }
@ -295,7 +298,7 @@ SatoriRegion* SatoriRegion::Split(size_t regionSize)
size_t nextStart, nextCommitted, nextUsed; size_t nextStart, nextCommitted, nextUsed;
SplitCore(regionSize, nextStart, nextCommitted, nextUsed); SplitCore(regionSize, nextStart, nextCommitted, nextUsed);
// format the rest as a new region // format the rest as a new region
SatoriRegion* result = InitializeAt(m_containingPage, nextStart, regionSize, nextCommitted, nextUsed); SatoriRegion* result = InitializeAt(m_containingPage, nextStart, regionSize, nextCommitted, nextUsed);
_ASSERTE(result->ValidateBlank()); _ASSERTE(result->ValidateBlank());
return result; return result;
@ -309,9 +312,9 @@ SatoriRegion* SatoriRegion::NextInPage()
void SatoriRegion::TryCoalesceWithNext() void SatoriRegion::TryCoalesceWithNext()
{ {
SatoriRegion* next = NextInPage(); SatoriRegion* next = NextInPage();
if (next && CanCoalesce(next)) if (next)
{ {
auto queue = next->m_containingQueue; auto queue = VolatileLoadWithoutBarrier(&next->m_containingQueue);
if (queue && queue->Kind() == QueueKind::Allocator) if (queue && queue->Kind() == QueueKind::Allocator)
{ {
if (queue->TryRemove(next)) if (queue->TryRemove(next))
@ -322,28 +325,47 @@ void SatoriRegion::TryCoalesceWithNext()
} }
} }
bool SatoriRegion::CanCoalesce(SatoriRegion* other)
{
return m_committed == other->Start();
}
void SatoriRegion::Coalesce(SatoriRegion* next) void SatoriRegion::Coalesce(SatoriRegion* next)
{ {
_ASSERTE(ValidateBlank());
_ASSERTE(next->ValidateBlank());
_ASSERTE(next->m_prev == next->m_next);
_ASSERTE(next->m_containingQueue == nullptr); _ASSERTE(next->m_containingQueue == nullptr);
_ASSERTE(next->m_containingPage == m_containingPage); _ASSERTE(next->m_containingPage == m_containingPage);
_ASSERTE(CanCoalesce(next)); _ASSERTE(next->m_prev == next->m_next);
_ASSERTE(m_end == next->Start());
_ASSERTE(ValidateBlank());
_ASSERTE(next->ValidateBlank());
m_end = next->m_end; m_end = next->m_end;
m_committed = next->m_committed;
m_used = next->m_used;
m_allocEnd = next->m_allocEnd; m_allocEnd = next->m_allocEnd;
if (m_committed == next->Start())
{
m_committed = next->m_committed;
m_used = next->m_used;
}
else
{
size_t toDecommit = next->m_committed - next->Start();
_ASSERTE(toDecommit > 0);
_ASSERTE(toDecommit % Satori::CommitGranularity() == 0);
GCToOSInterface::VirtualDecommit(next, toDecommit);
}
m_containingPage->RegionInitialized(this); m_containingPage->RegionInitialized(this);
} }
void SatoriRegion::TryDecommit()
{
size_t decommitStart = ALIGN_UP((size_t)&m_syncBlock, Satori::CommitGranularity());
_ASSERTE(m_committed >= decommitStart);
size_t decommitSize = m_committed - decommitStart;
if (decommitSize > Satori::REGION_SIZE_GRANULARITY / 8)
{
GCToOSInterface::VirtualDecommit((void*)decommitStart, decommitSize);
m_committed = m_used = decommitStart;
}
}
size_t SatoriRegion::Allocate(size_t size, bool zeroInitialize) size_t SatoriRegion::Allocate(size_t size, bool zeroInitialize)
{ {
_ASSERTE(m_containingQueue == nullptr); _ASSERTE(m_containingQueue == nullptr);
@ -624,10 +646,10 @@ void SatoriRegion::SetExposed(SatoriObject** location)
// set the mark bit corresponding to the location to indicate that it is globally exposed // set the mark bit corresponding to the location to indicate that it is globally exposed
// same as: ((SatoriObject*)location)->SetMarked(); // same as: ((SatoriObject*)location)->SetMarked();
size_t word = (size_t)location; size_t bitmapIndex;
size_t bitmapIndex = (word >> 9) & (SatoriRegion::BITMAP_LENGTH - 1); int offset = ((SatoriObject*)location)->GetMarkBitAndWord(&bitmapIndex);
size_t mask = (size_t)1 << ((word >> 3) & 63);
size_t mask = (size_t)1 << offset;
m_bitmap[bitmapIndex] |= mask; m_bitmap[bitmapIndex] |= mask;
} }
@ -638,13 +660,48 @@ bool SatoriRegion::IsExposed(SatoriObject** location)
// check the mark bit corresponding to the location // check the mark bit corresponding to the location
//same as: return ((SatoriObject*)location)->IsMarked(); //same as: return ((SatoriObject*)location)->IsMarked();
size_t word = (size_t)location; size_t bitmapIndex;
size_t bitmapIndex = (word >> 9) & (SatoriRegion::BITMAP_LENGTH - 1); int offset = ((SatoriObject*)location)->GetMarkBitAndWord(&bitmapIndex);
size_t mask = (size_t)1 << ((word >> 3) & 63);
size_t mask = (size_t)1 << offset;
return m_bitmap[bitmapIndex] & mask; return m_bitmap[bitmapIndex] & mask;
} }
bool SatoriRegion::AnyExposed(size_t first, size_t length)
{
_ASSERTE(length % 8 == 0);
size_t last = first + length - sizeof(size_t);
_ASSERTE(((SatoriObject*)first)->ContainingRegion() == this);
_ASSERTE(((SatoriObject*)last)->ContainingRegion() == this);
size_t bitmapIndexF;
size_t maskF = (size_t)-1 << ((SatoriObject*)first)->GetMarkBitAndWord(&bitmapIndexF);
size_t bitmapIndexL;
size_t maskL = (size_t)-1 >> (63 - ((SatoriObject*)last)->GetMarkBitAndWord(&bitmapIndexL));
if (bitmapIndexF == bitmapIndexL)
{
return m_bitmap[bitmapIndexF] & maskF & maskL;
}
if (m_bitmap[bitmapIndexF] & maskF)
{
return true;
}
for (size_t i = bitmapIndexF + 1; i < bitmapIndexL; i++)
{
if (m_bitmap[i])
{
return true;
}
}
return m_bitmap[bitmapIndexL] & maskL;
}
void SatoriRegion::EscapeRecursively(SatoriObject* o) void SatoriRegion::EscapeRecursively(SatoriObject* o)
{ {
_ASSERTE(this->OwnedByCurrentThread()); _ASSERTE(this->OwnedByCurrentThread());
@ -779,7 +836,7 @@ void SatoriRegion::ThreadLocalMark()
obj->Validate(); obj->Validate();
// skip the object // skip the object
markBitOffset = obj->Next()->GetMarkBitAndOffset(&bitmapIndex); markBitOffset = obj->Next()->GetMarkBitAndWord(&bitmapIndex);
} }
else else
{ {
@ -1093,7 +1150,7 @@ void SatoriRegion::ThreadLocalUpdatePointers()
} }
// skip the object // skip the object
markBitOffset = obj->Next()->GetMarkBitAndOffset(&bitmapIndex); markBitOffset = obj->Next()->GetMarkBitAndWord(&bitmapIndex);
} }
else else
{ {
@ -1363,12 +1420,12 @@ SatoriObject* SatoriRegion::SkipUnmarked(SatoriObject* from)
{ {
_ASSERTE(from->Start() < End()); _ASSERTE(from->Start() < End());
size_t bitmapIndex; size_t bitmapIndex;
int markBitOffset = from->GetMarkBitAndOffset(&bitmapIndex); int markBitOffset = from->GetMarkBitAndWord(&bitmapIndex);
DWORD offset; DWORD offset;
if (BitScanForward64(&offset, m_bitmap[bitmapIndex] >> markBitOffset)) if (BitScanForward64(&offset, m_bitmap[bitmapIndex] >> markBitOffset))
{ {
// got reachable object. // got mark bit
markBitOffset += offset; markBitOffset += offset;
} }
else else
@ -1379,7 +1436,7 @@ SatoriObject* SatoriRegion::SkipUnmarked(SatoriObject* from)
bitmapIndex++; bitmapIndex++;
if (BitScanForward64(&offset, m_bitmap[bitmapIndex])) if (BitScanForward64(&offset, m_bitmap[bitmapIndex]))
{ {
// got reachable object. // got mark bit
markBitOffset = offset; markBitOffset = offset;
break; break;
} }
@ -1393,12 +1450,12 @@ SatoriObject* SatoriRegion::SkipUnmarked(SatoriObject* from, size_t upTo)
{ {
_ASSERTE(from->Start() < End()); _ASSERTE(from->Start() < End());
size_t bitmapIndex; size_t bitmapIndex;
int markBitOffset = from->GetMarkBitAndOffset(&bitmapIndex); int markBitOffset = from->GetMarkBitAndWord(&bitmapIndex);
DWORD offset; DWORD offset;
if (BitScanForward64(&offset, m_bitmap[bitmapIndex] >> markBitOffset)) if (BitScanForward64(&offset, m_bitmap[bitmapIndex] >> markBitOffset))
{ {
// got reachable object. // got mark bit
markBitOffset += offset; markBitOffset += offset;
} }
else else
@ -1411,7 +1468,7 @@ SatoriObject* SatoriRegion::SkipUnmarked(SatoriObject* from, size_t upTo)
bitmapIndex++; bitmapIndex++;
if (BitScanForward64(&offset, m_bitmap[bitmapIndex])) if (BitScanForward64(&offset, m_bitmap[bitmapIndex]))
{ {
// got reachable object. // got mark bit
markBitOffset = offset; markBitOffset = offset;
break; break;
} }

View file

@ -33,10 +33,11 @@ public:
void WipeCards(); void WipeCards();
SatoriRegion* Split(size_t regionSize); SatoriRegion* Split(size_t regionSize);
bool CanCoalesce(SatoriRegion* other);
void TryCoalesceWithNext(); void TryCoalesceWithNext();
void Coalesce(SatoriRegion* next); void Coalesce(SatoriRegion* next);
void TryDecommit();
size_t AllocStart(); size_t AllocStart();
size_t AllocRemaining(); size_t AllocRemaining();
size_t MaxAllocEstimate(); size_t MaxAllocEstimate();
@ -74,6 +75,7 @@ public:
void UpdatePointers(); void UpdatePointers();
bool IsExposed(SatoriObject** location); bool IsExposed(SatoriObject** location);
bool AnyExposed(size_t from, size_t length);
void EscapeRecursively(SatoriObject* obj); void EscapeRecursively(SatoriObject* obj);
void EscapeShallow(SatoriObject* o); void EscapeShallow(SatoriObject* o);

View file

@ -58,14 +58,15 @@ SatoriRegion* SatoriRegionQueue::TryPopWithSize(size_t regionSize, SatoriRegion*
m_lock.Leave(); m_lock.Leave();
if (result->Size() > regionSize)
{
// if there is a diff split it off and put back to appropriate queue.
putBack = result->Split(result->Size() - regionSize);
}
_ASSERTE(result->m_prev == nullptr); _ASSERTE(result->m_prev == nullptr);
result->m_next = nullptr; result->m_next = nullptr;
if (result->Size() > regionSize)
{
// if there is a diff split what is needed and put the rest back to appropriate queue.
putBack = result;
result = putBack->Split(regionSize);
}
} }
return result; return result;
@ -131,14 +132,15 @@ SatoriRegion* SatoriRegionQueue::TryRemoveWithSize(size_t regionSize, SatoriRegi
m_lock.Leave(); m_lock.Leave();
if (result->Size() > regionSize)
{
// if there is a diff split it off and put back to appropriate queue.
putBack = result->Split(result->Size() - regionSize);
}
result->m_prev = nullptr; result->m_prev = nullptr;
result->m_next = nullptr; result->m_next = nullptr;
if (result->Size() > regionSize)
{
// if there is a diff split what is needed and put the rest back to appropriate queue.
putBack = result;
result = putBack->Split(regionSize);
}
} }
return result; return result;

View file

@ -1324,50 +1324,70 @@ void CheckEscapeSatori(Object** dst, Object* ref)
} }
} }
void CheckEscapeSatoriRange(void* dst, size_t src, size_t len) void CheckEscapeSatoriRange(size_t dst, size_t src, size_t len)
{ {
SatoriPage* page = PageForAddressCheckedSatori((void*)src); SatoriRegion* curRegion = (SatoriRegion * )GCToEEInterface::GetAllocContext()->gc_reserved_1;
if (!page) if (!curRegion || !curRegion->IsThreadLocal())
{ {
// not tracking escapes
return; return;
} }
SatoriRegion* srcRegion = page->RegionForAddressChecked((size_t)src); _ASSERTE(curRegion->OwnedByCurrentThread());
if (!srcRegion->OwnedByCurrentThread())
{
return;
}
// TODO: VS the following IsEscaped checks could be done faster by scanning bitmaps // if dst is within the current region and is not exposed, we are done
if (((dst ^ curRegion->Start()) >> 21) == 0)
// if move is within a region, check if the dest is escaped.
if ((((size_t)dst ^ (size_t)src) >> 21) == 0)
{ {
SatoriObject* containingDstObj = srcRegion->FindObject((size_t)dst); if (!curRegion->AnyExposed(dst, len))
if (!containingDstObj->IsEscaped())
{ {
return; return;
} }
} }
SatoriObject* containingSrcObj = srcRegion->FindObject((size_t)src); if (!PageForAddressCheckedSatori((void*)dst))
if (containingSrcObj->IsEscaped())
{ {
// dest not in heap
return; return;
} }
containingSrcObj->ForEachObjectRef( if (((src ^ curRegion->Start()) >> 21) == 0)
[&](SatoriObject** ref) {
// if src is already escaped, we are done
if (!curRegion->AnyExposed(src, len))
{ {
SatoriObject* child = *ref; SatoriObject* containingSrcObj = curRegion->FindObject(src);
if (child->ContainingRegion() == srcRegion) containingSrcObj->ForEachObjectRef(
{ [&](SatoriObject** ref)
srcRegion->EscapeRecursively(child); {
} SatoriObject* child = *ref;
}, if (child->ContainingRegion() == curRegion)
src, {
src + len curRegion->EscapeRecursively(child);
); }
},
src,
src + len
);
}
return;
}
if (PageForAddressCheckedSatori((void*)src))
{
// src is not in current region but in heap, it can't escape anything that belong to current thread
return;
}
// very rare case where we are copying refs out of non-heap area like stack or native heap.
// we do not have a containing type and that would be somewhat inconvenient.
// one way to handle this is by concervatively escaping any value that matches an unescaped pointer in curRegion.
//
// in practice, while theoretically possible, I do not know a code path that could lead here.
// as a particular concern, boxing copy typically uses a newly allocated and not yet escaped target.
//
// in case if this is reachable we will simply stop tracking if this ever occurs.
_ASSERTE(!"escaping by copying from outside of heap, we can handle this, but it is unexpected");
curRegion->StopEscapeTracking();
} }
#endif #endif

View file

@ -70,7 +70,7 @@ void ErectWriteBarrier(OBJECTREF* dst, OBJECTREF ref);
#if FEATURE_SATORI_GC #if FEATURE_SATORI_GC
bool IsInHeapSatori(void* ptr); bool IsInHeapSatori(void* ptr);
void CheckEscapeSatori(Object** dst, Object* ref); void CheckEscapeSatori(Object** dst, Object* ref);
void CheckEscapeSatoriRange(void* dst, size_t src, size_t len); void CheckEscapeSatoriRange(size_t dst, size_t src, size_t len);
#endif #endif
void SetCardsAfterBulkCopy(Object** dst, Object **src, size_t len); void SetCardsAfterBulkCopy(Object** dst, Object **src, size_t len);