mirror of
https://github.com/VSadov/Satori.git
synced 2025-06-09 09:34:49 +09:00
Gen1 compaction (ephemeral only)
This commit is contained in:
parent
5a89274949
commit
7468b0e049
8 changed files with 98 additions and 68 deletions
|
@ -106,6 +106,7 @@ void SatoriAllocator::ReturnRegion(SatoriRegion* region)
|
|||
{
|
||||
//TUNING: is this too aggressive, or should coalesce with prev too?
|
||||
region->TryCoalesceWithNext();
|
||||
region->SetGeneration(-1);
|
||||
|
||||
// TODO: VS select by current core
|
||||
m_queues[SizeToBucket(region->Size())]->Push(region);
|
||||
|
@ -115,6 +116,7 @@ void SatoriAllocator::AddRegion(SatoriRegion* region)
|
|||
{
|
||||
//TUNING: is this too aggressive, or should coalesce with prev too?
|
||||
region->TryCoalesceWithNext();
|
||||
region->SetGeneration(-1);
|
||||
|
||||
// TODO: VS select by current core
|
||||
m_queues[SizeToBucket(region->Size())]->Enqueue(region);
|
||||
|
@ -126,7 +128,6 @@ Object* SatoriAllocator::Alloc(SatoriAllocationContext* context, size_t size, ui
|
|||
|
||||
SatoriObject* result;
|
||||
|
||||
// TODO: VS special region for POH, AllocLarge is ok for now
|
||||
if (flags & GC_ALLOC_PINNED_OBJECT_HEAP)
|
||||
{
|
||||
result = AllocLarge(context, size, flags);
|
||||
|
@ -226,19 +227,22 @@ SatoriObject* SatoriAllocator::AllocRegular(SatoriAllocationContext* context, si
|
|||
GCToEEInterface::GcPoll();
|
||||
|
||||
// if full GC happened, we could have lost the region ownership, check for that.
|
||||
if (!region->OwnedByCurrentThread())
|
||||
if (context->RegularRegion() == nullptr)
|
||||
{
|
||||
region = nullptr;
|
||||
continue;
|
||||
}
|
||||
|
||||
// perform thread local collection and see if we have enough space after that.
|
||||
region->ThreadLocalCollect();
|
||||
if (region->StartAllocating(desiredFreeSpace))
|
||||
if (region->OwnedByCurrentThread())
|
||||
{
|
||||
// we have enough free space in the region to continue
|
||||
context->alloc_ptr = context->alloc_limit = (uint8_t*)region->AllocStart();
|
||||
continue;
|
||||
// perform thread local collection and see if we have enough space after that.
|
||||
region->ThreadLocalCollect();
|
||||
if (region->StartAllocating(desiredFreeSpace))
|
||||
{
|
||||
// we have enough free space in the region to continue
|
||||
context->alloc_ptr = context->alloc_limit = (uint8_t*)region->AllocStart();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -259,6 +263,7 @@ SatoriObject* SatoriAllocator::AllocRegular(SatoriAllocationContext* context, si
|
|||
|
||||
_ASSERTE(region->NothingMarked());
|
||||
context->alloc_ptr = context->alloc_limit = (uint8_t*)region->AllocStart();
|
||||
region->SetGeneration(0);
|
||||
region->m_ownerThreadTag = SatoriUtil::GetCurrentThreadTag();
|
||||
context->RegularRegion() = region;
|
||||
}
|
||||
|
@ -291,7 +296,7 @@ SatoriObject* SatoriAllocator::AllocLarge(SatoriAllocationContext* context, size
|
|||
}
|
||||
}
|
||||
|
||||
// handle huge allocation. It can't be form the current region.
|
||||
// handle huge allocation. It can't be from the current region.
|
||||
size_t regionSize = ALIGN_UP(size + sizeof(SatoriRegion) + Satori::MIN_FREE_SIZE, Satori::REGION_SIZE_GRANULARITY);
|
||||
if (regionSize > Satori::REGION_SIZE_GRANULARITY)
|
||||
{
|
||||
|
@ -329,6 +334,7 @@ SatoriObject* SatoriAllocator::AllocLarge(SatoriAllocationContext* context, size
|
|||
}
|
||||
|
||||
_ASSERTE(region->NothingMarked());
|
||||
region->SetGeneration(0);
|
||||
context->LargeRegion() = region;
|
||||
}
|
||||
}
|
||||
|
@ -344,6 +350,7 @@ SatoriObject* SatoriAllocator::AllocHuge(SatoriAllocationContext* context, size_
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
hugeRegion->SetGeneration(0);
|
||||
bool zeroInitialize = !(flags & GC_ALLOC_ZEROING_OPTIONAL);
|
||||
SatoriObject* result = SatoriObject::At(hugeRegion->AllocateHuge(size, zeroInitialize));
|
||||
if (!result)
|
||||
|
|
|
@ -37,6 +37,11 @@ public:
|
|||
return m_top;
|
||||
}
|
||||
|
||||
size_t FreeSpace()
|
||||
{
|
||||
return (Satori::MARK_CHUNK_SIZE - sizeof(SatoriMarkChunk)) / sizeof(SatoriObject*) - m_top;
|
||||
}
|
||||
|
||||
bool HasSpace()
|
||||
{
|
||||
return m_top < (Satori::MARK_CHUNK_SIZE - sizeof(SatoriMarkChunk)) / sizeof(SatoriObject*);
|
||||
|
|
|
@ -47,14 +47,18 @@ SatoriObject* SatoriObject::FormatAsFree(size_t location, size_t size)
|
|||
_ASSERTE(size < Satori::REGION_SIZE_GRANULARITY);
|
||||
|
||||
SatoriObject* obj = SatoriObject::At(location);
|
||||
// memset((void*)(location - sizeof(size_t)), 0xac, min(size, obj->ContainingRegion()->m_used - location));
|
||||
_ASSERTE(obj->ContainingRegion()->m_used > location + ArrayBase::GetOffsetOfNumComponents() + sizeof(size_t));
|
||||
|
||||
#if _DEBUG
|
||||
size_t dirtySize = min(size, obj->ContainingRegion()->m_used - location);
|
||||
memset((void*)(location - sizeof(size_t)), 0xac, dirtySize);
|
||||
#endif
|
||||
|
||||
obj->CleanSyncBlock();
|
||||
obj->RawSetMethodTable(s_emptyObjectMt);
|
||||
|
||||
// deduct the size of Array header + syncblock
|
||||
// deduct the size of Array header + syncblock and set the size of the free bytes.
|
||||
((DWORD*)obj)[ArrayBase::GetOffsetOfNumComponents() / sizeof(DWORD)] = (DWORD)(size - (sizeof(ArrayBase) + sizeof(size_t)));
|
||||
|
||||
_ASSERTE(obj->ContainingRegion()->m_used > location + ArrayBase::GetOffsetOfNumComponents() + sizeof(size_t));
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ void SatoriPage::RegionDestroyed(SatoriRegion* region)
|
|||
|
||||
SatoriRegion* SatoriPage::RegionForAddress(size_t address)
|
||||
{
|
||||
_ASSERTE(address > Start() && address < End());
|
||||
_ASSERTE(address >= Start() && address < End());
|
||||
size_t mapIndex = (address - Start()) >> Satori::REGION_BITS;
|
||||
while (RegionMap()[mapIndex] > 1)
|
||||
{
|
||||
|
|
|
@ -126,7 +126,8 @@ void SatoriRecycler::MaybeTriggerGC()
|
|||
}
|
||||
else if (Interlocked::CompareExchange(&m_gcInProgress, 1, 0) == 0)
|
||||
{
|
||||
// for now just do every 16th
|
||||
//TODO: VS start with gen1? (though first gen2 is probably cheap)
|
||||
// for now just do every 16th scan (every 8 global GCs)
|
||||
int generation = m_scanCount % 16 == 0 ? 2 : 1;
|
||||
Collect(generation, /*force*/ false);
|
||||
}
|
||||
|
@ -135,9 +136,6 @@ void SatoriRecycler::MaybeTriggerGC()
|
|||
|
||||
void SatoriRecycler::Collect(int generation, bool force)
|
||||
{
|
||||
// TODO: VS hack
|
||||
generation = 2;
|
||||
|
||||
bool wasCoop = GCToEEInterface::EnablePreemptiveGC();
|
||||
_ASSERTE(wasCoop);
|
||||
|
||||
|
@ -301,7 +299,7 @@ private:
|
|||
int m_condemnedGeneration;
|
||||
};
|
||||
|
||||
void SatoriRecycler::PushToMarkQueuesSlow(SatoriMarkChunk* ¤tMarkChunk, SatoriObject* o)
|
||||
void SatoriRecycler::PushToMarkQueuesSlow(SatoriMarkChunk*& currentMarkChunk, SatoriObject* o)
|
||||
{
|
||||
if (currentMarkChunk)
|
||||
{
|
||||
|
@ -347,7 +345,7 @@ void SatoriRecycler::MarkFn(PTR_PTR_Object ppObject, ScanContext* sc, uint32_t f
|
|||
{
|
||||
MarkContext* context = (MarkContext*)sc->_unused1;
|
||||
|
||||
//TODO: VS why checked? can this fail?
|
||||
// byrefs may point to stack, use checked here
|
||||
o = context->m_heap->ObjectForAddressChecked(location);
|
||||
if (o == nullptr)
|
||||
{
|
||||
|
@ -464,7 +462,7 @@ void SatoriRecycler::DrainMarkQueues()
|
|||
}
|
||||
},
|
||||
/* includeCollectibleAllocator */ true
|
||||
);
|
||||
);
|
||||
}
|
||||
|
||||
// done with srcChunk
|
||||
|
@ -847,6 +845,7 @@ void SatoriRecycler::SweepNurseryRegions()
|
|||
}
|
||||
|
||||
curRegion->Verify();
|
||||
m_stayingRegions->Push(curRegion);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -935,6 +934,14 @@ void SatoriRecycler::AddRelocationTarget(SatoriRegion* region)
|
|||
|
||||
SatoriRegion* SatoriRecycler::TryGetRelocationTarget(size_t minSize)
|
||||
{
|
||||
//make this occasionally fail in debug to be sure we can handle low memory case.
|
||||
#if _DEBUG
|
||||
if (minSize % 1024 == 0)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
DWORD bucket;
|
||||
BitScanReverse64(&bucket, minSize);
|
||||
|
||||
|
@ -953,12 +960,19 @@ SatoriRegion* SatoriRecycler::TryGetRelocationTarget(size_t minSize)
|
|||
SatoriRegion* region = queue->TryPop();
|
||||
if (region)
|
||||
{
|
||||
region->StartAllocating(minSize);
|
||||
return region;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return m_heap->Allocator()->GetRegion(ALIGN_UP(minSize, Satori::REGION_SIZE_GRANULARITY));
|
||||
SatoriRegion* newRegion = m_heap->Allocator()->GetRegion(ALIGN_UP(minSize, Satori::REGION_SIZE_GRANULARITY));
|
||||
if (newRegion)
|
||||
{
|
||||
newRegion->SetGeneration(m_condemnedGeneration);
|
||||
}
|
||||
|
||||
return newRegion;
|
||||
}
|
||||
|
||||
void SatoriRecycler::Compact()
|
||||
|
@ -975,20 +989,16 @@ void SatoriRecycler::RelocateRegion(SatoriRegion* relocationSource)
|
|||
relocationSource->Verify(true);
|
||||
|
||||
size_t copySize = relocationSource->Occupancy();
|
||||
//TODO: VS do we need + Satori::MIN_FREE_SIZE ???
|
||||
SatoriRegion* relocationTarget = TryGetRelocationTarget(copySize + Satori::MIN_FREE_SIZE);
|
||||
SatoriRegion* relocationTarget = TryGetRelocationTarget(copySize);
|
||||
|
||||
// could not get a region. we must be low on available memory.
|
||||
// we can try using the source as a target for others.
|
||||
if (!relocationTarget)
|
||||
{
|
||||
m_stayingRegions->Push(relocationSource);
|
||||
AddRelocationTarget(relocationSource);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!relocationTarget->IsAllocating())
|
||||
{
|
||||
relocationTarget->StartAllocating(copySize + Satori::MIN_FREE_SIZE);
|
||||
}
|
||||
|
||||
size_t dstPtr = relocationTarget->Allocate(copySize, /*zeroInitialize*/ false);
|
||||
size_t dstPtrOrig = dstPtr;
|
||||
|
||||
|
@ -1012,19 +1022,17 @@ void SatoriRecycler::RelocateRegion(SatoriRegion* relocationSource)
|
|||
} while (obj->Start() < objLimit);
|
||||
|
||||
_ASSERTE(dstPtr - dstPtrOrig == copySize);
|
||||
relocationSource->Verify(true);
|
||||
relocationTarget->StopAllocating(/*allocPtr*/ 0);
|
||||
|
||||
// transfer finalization trackers if any
|
||||
relocationTarget->TakeFinalizerInfoFrom(relocationSource);
|
||||
|
||||
// the target may yet have more space. put it back.
|
||||
relocationTarget->Verify(true);
|
||||
AddRelocationTarget(relocationTarget);
|
||||
|
||||
// the region is now relocated.
|
||||
m_relocatedRegions->Push(relocationSource);
|
||||
|
||||
// put the target region back. It may have more space.
|
||||
relocationTarget->StopAllocating(dstPtr);
|
||||
relocationTarget->Verify(true);
|
||||
|
||||
AddRelocationTarget(relocationTarget);
|
||||
}
|
||||
|
||||
void SatoriRecycler::UpdateFn(PTR_PTR_Object ppObject, ScanContext* sc, uint32_t flags)
|
||||
|
@ -1046,7 +1054,7 @@ void SatoriRecycler::UpdateFn(PTR_PTR_Object ppObject, ScanContext* sc, uint32_t
|
|||
{
|
||||
MarkContext* context = (MarkContext*)sc->_unused1;
|
||||
|
||||
//TODO: VS why checked? can this fail?
|
||||
// byrefs may point to stack, use checked here
|
||||
o = context->m_heap->ObjectForAddressChecked(location);
|
||||
if (o == nullptr)
|
||||
{
|
||||
|
@ -1144,8 +1152,12 @@ void SatoriRecycler::UpdatePointersInRegions(SatoriRegionQueue* queue)
|
|||
while (curRegion = queue->TryPop())
|
||||
{
|
||||
curRegion->UpdateReferences();
|
||||
curRegion->ClearMarks();
|
||||
if (curRegion->Generation() == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
curRegion->ClearMarks();
|
||||
if (m_condemnedGeneration == 2)
|
||||
{
|
||||
curRegion->SetGeneration(2);
|
||||
|
@ -1213,8 +1225,8 @@ void SatoriRecycler::UpdatePointersThroughCards()
|
|||
size_t start = page->LocationForCard(&cards[j]);
|
||||
do
|
||||
{
|
||||
//TODO: VS no need to reset. assert that has this state already
|
||||
cards[j++] = resetValue;
|
||||
_ASSERTE(cards[j] <= Satori::CARD_HAS_REFERENCES);
|
||||
j++;
|
||||
} while (j < Satori::CARD_BYTES_IN_CARD_GROUP && cards[j] >= Satori::CARD_HAS_REFERENCES);
|
||||
|
||||
size_t end = page->LocationForCard(&cards[j]);
|
||||
|
|
|
@ -225,7 +225,7 @@ size_t SatoriRegion::StartAllocating(size_t minSize)
|
|||
|
||||
// when minSize is not a power of two we could search through the current bucket,
|
||||
// which may have a large enough obj,
|
||||
// but we will just use the next bucket, which guarantees that any obj will fit
|
||||
// but we will just use the next bucket, which guarantees it fits
|
||||
if (minSize & (minSize - 1))
|
||||
{
|
||||
bucket++;
|
||||
|
@ -514,24 +514,16 @@ void SatoriRegion::MarkFn(PTR_PTR_Object ppObject, ScanContext* sc, uint32_t fla
|
|||
size_t location = (size_t)*ppObject;
|
||||
|
||||
// ignore objects outside of the current region.
|
||||
// this also rejects nulls.
|
||||
// this also rejects nulls and byrefs pointing to stack.
|
||||
if (location < (size_t)region->FirstObject() || location > region->End())
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
SatoriObject* o;
|
||||
SatoriObject* o = SatoriObject::At(location);
|
||||
if (flags & GC_CALL_INTERIOR)
|
||||
{
|
||||
o = region->FindObject(location);
|
||||
if (o == nullptr)
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
o = SatoriObject::At(location);
|
||||
}
|
||||
|
||||
if (!o->IsMarked())
|
||||
|
@ -846,6 +838,13 @@ size_t SatoriRegion::ThreadLocalPlan()
|
|||
// we will shift left all movable objects - as long as they fit between unmovables
|
||||
//
|
||||
|
||||
// planning also trashes these, since free spaces get reformatted.
|
||||
// we will do more of that in Compact, but before that Upate may use the index.
|
||||
// TODO: VS consider updating index in FormatAsFree
|
||||
// it should be ok to fill indx with the free, since even if
|
||||
// smth is allocated at start, it would be a valid obj to start
|
||||
memset(&m_index, 0, sizeof(m_index));
|
||||
|
||||
// what we want to move
|
||||
SatoriObject* moveable;
|
||||
size_t moveableSize = 0;
|
||||
|
@ -1088,9 +1087,6 @@ void SatoriRegion::ThreadLocalUpdatePointers()
|
|||
|
||||
void SatoriRegion::ThreadLocalCompact()
|
||||
{
|
||||
memset(m_freeLists, 0, sizeof(m_freeLists));
|
||||
memset(&m_index, 0, sizeof(m_index));
|
||||
|
||||
SatoriObject* s1;
|
||||
SatoriObject* s2 = FirstObject();
|
||||
SatoriObject* d1 = FirstObject();
|
||||
|
@ -1098,6 +1094,13 @@ void SatoriRegion::ThreadLocalCompact()
|
|||
|
||||
size_t foundFree = 0;
|
||||
|
||||
// compacting also trashes these, since free spaces get reformatted.
|
||||
// TODO: VS consider updating index in FormatAsFree
|
||||
memset(&m_index, 0, sizeof(m_index));
|
||||
|
||||
// we will be building new free lists
|
||||
memset(m_freeLists, 0, sizeof(m_freeLists));
|
||||
|
||||
// when d1 reaches the end everything is copied or swept
|
||||
while (d1->Start() < End())
|
||||
{
|
||||
|
@ -1407,18 +1410,13 @@ bool SatoriRegion::Sweep(bool turnMarkedIntoEscaped)
|
|||
void SatoriRegion::UpdateReferences()
|
||||
{
|
||||
size_t objLimit = Start() + Satori::REGION_SIZE_GRANULARITY;
|
||||
|
||||
// TODO: VS assert these
|
||||
// memset(&m_freeLists, 0, sizeof(m_freeLists));
|
||||
// memset(&m_index, 0, sizeof(m_index));
|
||||
|
||||
SatoriObject* obj = FirstObject();
|
||||
do
|
||||
{
|
||||
//TODO: VS walking objects without bitmap.
|
||||
// - we need to get MTs for all live objects anyways
|
||||
// - free objects are near optimal since we have just swept
|
||||
// - besides, relocated objects are not marked (we could, but there is little point if we do not use that here)
|
||||
//NB: walking objects without bitmap.
|
||||
// - we need to get MTs for all live objects anyways
|
||||
// - free objects are near optimal since we have just swept the region
|
||||
// - besides, relocated objects would have to be marked, which is extra work.
|
||||
obj->ForEachObjectRef(
|
||||
[](SatoriObject** ppObject)
|
||||
{
|
||||
|
|
|
@ -150,7 +150,7 @@ private:
|
|||
int32_t m_markStack;
|
||||
|
||||
// counting escaped objects
|
||||
// when number goes to high, we stop escaping and do not do local GC.
|
||||
// when number goes too high, we stop escaping and do not do local GC.
|
||||
int32_t m_escapeCounter;
|
||||
|
||||
size_t m_occupancy;
|
||||
|
|
|
@ -107,14 +107,18 @@ void SatoriRegion::ForEachFinalizable(F& lambda)
|
|||
}
|
||||
}
|
||||
|
||||
//TODO: VS in non-last chunck unfilled slots should count as nulls
|
||||
// unfilled slots in the tail chunks count as nulls
|
||||
if (chunk != m_finalizables)
|
||||
{
|
||||
nulls += chunk->FreeSpace();
|
||||
}
|
||||
|
||||
chunk = chunk->Next();
|
||||
}
|
||||
|
||||
// typically the list is short, so having some dead entries is not a big deal.
|
||||
// compacting at 1/2 occupancy should be good enough (
|
||||
// (50% overhead limit at O(N) maintenance cost)
|
||||
// compacting at 1/2 occupancy should be good enough.
|
||||
// (50% max overhead at O(N) maintenance cost)
|
||||
if (nulls * 2 > items)
|
||||
{
|
||||
CompactFinalizables();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue