mirror of
https://github.com/VSadov/Satori.git
synced 2025-06-08 03:27:04 +09:00
Immortal Allocations (#30)
* can allocate * recycler aware of 3rd gen * barriers are gen3 aware * zeroing and linking * some tweaks to immortal alloc * Update README.md
This commit is contained in:
parent
338834902b
commit
9991147ab5
14 changed files with 210 additions and 69 deletions
|
@ -1057,6 +1057,7 @@ enum GC_ALLOC_FLAGS
|
|||
GC_ALLOC_ZEROING_OPTIONAL = 16,
|
||||
GC_ALLOC_LARGE_OBJECT_HEAP = 32,
|
||||
GC_ALLOC_PINNED_OBJECT_HEAP = 64,
|
||||
GC_ALLOC_IMMORTAL = 128,
|
||||
GC_ALLOC_USER_OLD_HEAP = GC_ALLOC_LARGE_OBJECT_HEAP | GC_ALLOC_PINNED_OBJECT_HEAP,
|
||||
};
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ A simple garbage collector that incorporates various ideas that I had over time.
|
|||
|
||||
### Roadmap: ###
|
||||
- [ ] explicit memory limits
|
||||
- [ ] immortal allocations
|
||||
- [x] immortal allocations
|
||||
- [ ] preallocated objects
|
||||
- [ ] perf tuning (possibly a lot of opportunities)
|
||||
- [ ] more and better diagnostics (support for debuggers and profilers)
|
||||
|
|
|
@ -51,7 +51,7 @@ void SatoriAllocationContext::Deactivate(SatoriRecycler* recycler, bool detach)
|
|||
|
||||
if (detach)
|
||||
{
|
||||
region->DetachFromContextRelease();
|
||||
region->DetachFromAlocatingOwnerRelease();
|
||||
}
|
||||
|
||||
recycler->AddEphemeralRegion(region);
|
||||
|
@ -72,7 +72,7 @@ void SatoriAllocationContext::Deactivate(SatoriRecycler* recycler, bool detach)
|
|||
|
||||
if (detach)
|
||||
{
|
||||
region->DetachFromContextRelease();
|
||||
region->DetachFromAlocatingOwnerRelease();
|
||||
}
|
||||
|
||||
recycler->AddEphemeralRegion(region);
|
||||
|
|
|
@ -51,7 +51,10 @@ void SatoriAllocator::Initialize(SatoriHeap* heap)
|
|||
m_queues[i] = new SatoriRegionQueue(QueueKind::Allocator);
|
||||
}
|
||||
|
||||
m_WorkChunks = new SatoriWorkList();
|
||||
m_workChunks = new SatoriWorkList();
|
||||
|
||||
m_immortalRegion = nullptr;
|
||||
m_immortalAlocLock.Initialize();
|
||||
}
|
||||
|
||||
SatoriRegion* SatoriAllocator::GetRegion(size_t regionSize)
|
||||
|
@ -162,9 +165,17 @@ Object* SatoriAllocator::Alloc(SatoriAllocationContext* context, size_t size, ui
|
|||
|
||||
SatoriObject* result;
|
||||
|
||||
if (flags & GC_ALLOC_PINNED_OBJECT_HEAP)
|
||||
if (flags & GC_ALLOC_IMMORTAL)
|
||||
{
|
||||
return AllocImmortal(context, size, flags);
|
||||
}
|
||||
else if (flags & GC_ALLOC_PINNED_OBJECT_HEAP)
|
||||
{
|
||||
result = AllocLarge(context, size, flags);
|
||||
if (result != nullptr)
|
||||
{
|
||||
result->SetUnmovable();
|
||||
}
|
||||
}
|
||||
else if (context->alloc_ptr + size <= context->alloc_limit)
|
||||
{
|
||||
|
@ -180,7 +191,7 @@ Object* SatoriAllocator::Alloc(SatoriAllocationContext* context, size_t size, ui
|
|||
result = AllocLarge(context, size, flags);
|
||||
}
|
||||
|
||||
if (flags & GC_ALLOC_FINALIZE)
|
||||
if (result != nullptr && flags & GC_ALLOC_FINALIZE)
|
||||
{
|
||||
if (!result->ContainingRegion()->RegisterForFinalization(result))
|
||||
{
|
||||
|
@ -188,11 +199,6 @@ Object* SatoriAllocator::Alloc(SatoriAllocationContext* context, size_t size, ui
|
|||
}
|
||||
}
|
||||
|
||||
if (flags & GC_ALLOC_PINNED_OBJECT_HEAP)
|
||||
{
|
||||
result->SetUnmovable();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -201,7 +207,7 @@ SatoriObject* SatoriAllocator::AllocRegular(SatoriAllocationContext* context, si
|
|||
m_heap->Recycler()->HelpOnce();
|
||||
SatoriRegion* region = context->RegularRegion();
|
||||
|
||||
_ASSERTE(region == nullptr || region->IsAttachedToContext());
|
||||
_ASSERTE(region == nullptr || region->IsAttachedToAllocatingOwner());
|
||||
|
||||
while (true)
|
||||
{
|
||||
|
@ -280,7 +286,7 @@ SatoriObject* SatoriAllocator::AllocRegular(SatoriAllocationContext* context, si
|
|||
}
|
||||
|
||||
context->alloc_ptr = context->alloc_limit = nullptr;
|
||||
region->DetachFromContextRelease();
|
||||
region->DetachFromAlocatingOwnerRelease();
|
||||
m_heap->Recycler()->AddEphemeralRegion(region);
|
||||
}
|
||||
|
||||
|
@ -299,7 +305,7 @@ SatoriObject* SatoriAllocator::AllocRegular(SatoriAllocationContext* context, si
|
|||
// <objects allocated>
|
||||
// 4) (optional: clear escape tag) Detach
|
||||
|
||||
region->AttachToContext(&context->RegularRegion());
|
||||
region->AttachToAllocatingOwner(&context->RegularRegion());
|
||||
if (SatoriUtil::IsThreadLocalGCEnabled())
|
||||
{
|
||||
switch (region->ReusableFor())
|
||||
|
@ -434,7 +440,7 @@ SatoriObject* SatoriAllocator::AllocLarge(SatoriAllocationContext* context, size
|
|||
continue;
|
||||
}
|
||||
|
||||
region->DetachFromContextRelease();
|
||||
region->DetachFromAlocatingOwnerRelease();
|
||||
m_heap->Recycler()->AddEphemeralRegion(region);
|
||||
}
|
||||
|
||||
|
@ -460,7 +466,7 @@ SatoriObject* SatoriAllocator::AllocLarge(SatoriAllocationContext* context, size
|
|||
_ASSERTE(region->NothingMarked());
|
||||
}
|
||||
|
||||
region->AttachToContext(&context->LargeRegion());
|
||||
region->AttachToAllocatingOwner(&context->LargeRegion());
|
||||
region->SetGenerationRelease(1);
|
||||
region->ResetReusableForRelease();
|
||||
}
|
||||
|
@ -507,6 +513,84 @@ SatoriObject* SatoriAllocator::AllocHuge(SatoriAllocationContext* context, size_
|
|||
return result;
|
||||
}
|
||||
|
||||
SatoriObject* SatoriAllocator::AllocImmortal(SatoriAllocationContext* context, size_t size, uint32_t flags)
|
||||
{
|
||||
// immortal allocs should be way less than region size.
|
||||
_ASSERTE(size < Satori::REGION_SIZE_GRANULARITY / 2);
|
||||
|
||||
SatoriLockHolder<SatoriLock> holder(&m_immortalAlocLock);
|
||||
SatoriRegion* region = m_immortalRegion;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (region)
|
||||
{
|
||||
size_t allocRemaining = region->GetAllocRemaining();
|
||||
if (allocRemaining >= size)
|
||||
{
|
||||
// we ensure that region is zero-inited
|
||||
// so we do not need to clear here for typically small objects
|
||||
SatoriObject* result = (SatoriObject*)region->Allocate(size, /*zeroInitialize*/ false);
|
||||
if (result)
|
||||
{
|
||||
context->alloc_bytes_uoh += size;
|
||||
region->SetIndicesForObject(result, result->Start() + size);
|
||||
}
|
||||
else
|
||||
{
|
||||
// OOM
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
if (region->IsAllocating())
|
||||
{
|
||||
region->StopAllocating(/* allocPtr */ 0);
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t desiredFreeSpace = size + Satori::MIN_FREE_SIZE;
|
||||
if (region->StartAllocating(desiredFreeSpace))
|
||||
{
|
||||
// we have enough free space in the region to continue
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get a new regular region.
|
||||
region = GetRegion(Satori::REGION_SIZE_GRANULARITY);
|
||||
if (!region)
|
||||
{
|
||||
//OOM
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
_ASSERTE(region->NothingMarked());
|
||||
// Ensure the region is zeroed, to not clear for each allocated (and typically small) object.
|
||||
// And, while at that, link the previous one in case we have a reason to iterate old regions.
|
||||
region->ZeroInitAndLink(m_immortalRegion);
|
||||
if (m_immortalRegion)
|
||||
{
|
||||
m_immortalRegion->DetachFromAlocatingOwnerRelease();
|
||||
}
|
||||
|
||||
region->AttachToAllocatingOwner(&m_immortalRegion);
|
||||
region->SetGenerationRelease(3);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void SatoriAllocator::DeactivateImmortalRegion()
|
||||
{
|
||||
if (m_immortalRegion && m_immortalRegion->IsAllocating())
|
||||
{
|
||||
m_immortalRegion->StopAllocating(/* allocPtr */ 0);
|
||||
}
|
||||
}
|
||||
|
||||
SatoriWorkChunk* SatoriAllocator::TryGetWorkChunk()
|
||||
{
|
||||
#if _DEBUG
|
||||
|
@ -518,11 +602,11 @@ SatoriWorkChunk* SatoriAllocator::TryGetWorkChunk()
|
|||
return nullptr;
|
||||
}
|
||||
#endif
|
||||
SatoriWorkChunk* chunk = m_WorkChunks->TryPop();
|
||||
SatoriWorkChunk* chunk = m_workChunks->TryPop();
|
||||
|
||||
while (!chunk && AddMoreWorkChunks())
|
||||
{
|
||||
chunk = m_WorkChunks->TryPop();
|
||||
chunk = m_workChunks->TryPop();
|
||||
}
|
||||
|
||||
_ASSERTE(chunk->Count() == 0);
|
||||
|
@ -532,10 +616,10 @@ SatoriWorkChunk* SatoriAllocator::TryGetWorkChunk()
|
|||
// returns NULL only in OOM case
|
||||
SatoriWorkChunk* SatoriAllocator::GetWorkChunk()
|
||||
{
|
||||
SatoriWorkChunk* chunk = m_WorkChunks->TryPop();
|
||||
SatoriWorkChunk* chunk = m_workChunks->TryPop();
|
||||
while (!chunk && AddMoreWorkChunks())
|
||||
{
|
||||
chunk = m_WorkChunks->TryPop();
|
||||
chunk = m_workChunks->TryPop();
|
||||
}
|
||||
|
||||
_ASSERTE(chunk->Count() == 0);
|
||||
|
@ -559,7 +643,7 @@ bool SatoriAllocator::AddMoreWorkChunks()
|
|||
}
|
||||
|
||||
SatoriWorkChunk* chunk = SatoriWorkChunk::InitializeAt(mem);
|
||||
m_WorkChunks->Push(chunk);
|
||||
m_workChunks->Push(chunk);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -568,5 +652,5 @@ bool SatoriAllocator::AddMoreWorkChunks()
|
|||
void SatoriAllocator::ReturnWorkChunk(SatoriWorkChunk* chunk)
|
||||
{
|
||||
_ASSERTE(chunk->Count() == 0);
|
||||
m_WorkChunks->Push(chunk);
|
||||
m_workChunks->Push(chunk);
|
||||
}
|
||||
|
|
|
@ -54,18 +54,22 @@ public:
|
|||
SatoriWorkChunk* GetWorkChunk();
|
||||
void ReturnWorkChunk(SatoriWorkChunk* chunk);
|
||||
|
||||
void DeactivateImmortalRegion();
|
||||
|
||||
private:
|
||||
SatoriHeap* m_heap;
|
||||
SatoriRegionQueue* m_queues[Satori::ALLOCATOR_BUCKET_COUNT];
|
||||
SatoriWorkList* m_workChunks;
|
||||
|
||||
SatoriWorkList* m_WorkChunks;
|
||||
SatoriRegion* m_immortalRegion;
|
||||
SatoriLock m_immortalAlocLock;
|
||||
|
||||
SatoriObject* AllocRegular(SatoriAllocationContext* context, size_t size, uint32_t flags);
|
||||
SatoriObject* AllocLarge(SatoriAllocationContext* context, size_t size, uint32_t flags);
|
||||
SatoriObject* AllocHuge(SatoriAllocationContext* context, size_t size, uint32_t flags);
|
||||
SatoriObject* AllocImmortal(SatoriAllocationContext* context, size_t size, uint32_t flags);
|
||||
|
||||
void TryGetRegularRegion(SatoriRegion*& region);
|
||||
|
||||
bool AddMoreWorkChunks();
|
||||
|
||||
static int SizeToBucket(size_t size)
|
||||
|
|
|
@ -450,7 +450,7 @@ void SatoriGC::PublishObject(uint8_t* obj)
|
|||
// but the region is not parseable until the object has a MethodTable,
|
||||
// so we delay taking the region out of generation -1 and passing to recycler
|
||||
// until we get here.
|
||||
if (!region->IsAttachedToContext())
|
||||
if (!region->IsAttachedToAllocatingOwner() && region->Generation() < 2)
|
||||
{
|
||||
_ASSERTE(region->Size() > Satori::REGION_SIZE_GRANULARITY);
|
||||
region->SetGenerationRelease(1);
|
||||
|
@ -582,8 +582,7 @@ void SatoriGC::UnregisterFrozenSegment(segment_handle seg)
|
|||
|
||||
bool SatoriGC::IsInFrozenSegment(Object* object)
|
||||
{
|
||||
// TODO: VS implement actual frozen objects.
|
||||
return ((SatoriObject*)object)->IsUnmovable();
|
||||
return ((SatoriObject*)object)->ContainingRegion()->Generation() == 3;
|
||||
}
|
||||
|
||||
void SatoriGC::ControlEvents(GCEventKeyword keyword, GCEventLevel level)
|
||||
|
|
|
@ -330,7 +330,7 @@ void SatoriRecycler::AddEphemeralRegion(SatoriRegion* region)
|
|||
// Demoted regions could be pre-marked
|
||||
region->Verify(/* allowMarked */ region->IsDemoted() || SatoriUtil::IsConcurrent());
|
||||
|
||||
if (region->IsAttachedToContext())
|
||||
if (region->IsAttachedToAllocatingOwner())
|
||||
{
|
||||
_ASSERTE(IsBlockingPhase());
|
||||
m_condemnedNurseryRegionsCount++;
|
||||
|
@ -1134,6 +1134,9 @@ void SatoriRecycler::DeactivateAllStacks()
|
|||
m_currentAllocBytesLiveThreads = 0;
|
||||
GCToEEInterface::GcEnumAllocContexts(DeactivateFn, m_heap->Recycler());
|
||||
m_totalAllocBytes = m_currentAllocBytesLiveThreads + m_currentAllocBytesDeadThreads;
|
||||
|
||||
// make immortal region parseable, in case we have byrefs pointing to it.
|
||||
m_heap->Allocator()->DeactivateImmortalRegion();
|
||||
}
|
||||
|
||||
void SatoriRecycler::PushToMarkQueuesSlow(SatoriWorkChunk*& currentWorkChunk, SatoriObject* o)
|
||||
|
@ -1296,7 +1299,7 @@ void SatoriRecycler::MarkFnConcurrent(PTR_PTR_Object ppObject, ScanContext* sc,
|
|||
// Concurrent FindObject is unsafe in active regions. While ref may be in a real obj,
|
||||
// the path to it from the first obj or prev indexed may cross unparsable ranges.
|
||||
// The check must acquire to be sure we check before actually doing FindObject.
|
||||
if (containingRegion->MaybeAttachedToContextAcquire())
|
||||
if (containingRegion->MaybeAttachedToAllocatingOwnerAcquire())
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
@ -1862,7 +1865,7 @@ bool SatoriRecycler::MarkThroughCardsConcurrent(int64_t deadline)
|
|||
_ASSERTE(!region->HasMarksSet());
|
||||
|
||||
// sometimes we set cards without checking dst generation, but REMEMBERED only has meaning in tenured
|
||||
if (region->Generation() != 2)
|
||||
if (region->Generation() < 2)
|
||||
{
|
||||
// This is optimization. Not needed for correctness.
|
||||
// If not dirty, we wipe the group, to not look at this again in the next scans.
|
||||
|
@ -2021,7 +2024,7 @@ bool SatoriRecycler::ScanDirtyCardsConcurrent(int64_t deadline)
|
|||
_ASSERTE(!region->HasMarksSet());
|
||||
|
||||
// allocating region is not parseable.
|
||||
if (region->MaybeAttachedToContextAcquire())
|
||||
if (region->MaybeAttachedToAllocatingOwnerAcquire())
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
@ -2066,7 +2069,7 @@ bool SatoriRecycler::ScanDirtyCardsConcurrent(int64_t deadline)
|
|||
// cannot mark stuff in thread local regions. just mark as dirty to visit later.
|
||||
if (!childRegion->MaybeEscapeTrackingAcquire())
|
||||
{
|
||||
if (!child->IsMarked())
|
||||
if (!child->IsMarkedOrOlderThan(2))
|
||||
{
|
||||
child->SetMarkedAtomic();
|
||||
if (!dstChunk || !dstChunk->TryPush(child))
|
||||
|
@ -2154,7 +2157,7 @@ void SatoriRecycler::MarkThroughCards()
|
|||
_ASSERTE(!region->HasMarksSet());
|
||||
|
||||
// sometimes we set cards without checking dst generation, but REMEMBERED only has meaning in tenured
|
||||
if (region->Generation() != 2)
|
||||
if (region->Generation() < 2)
|
||||
{
|
||||
// This is optimization. Not needed for correctness.
|
||||
// If not dirty, we wipe the group, to not look at this again in the next scans.
|
||||
|
@ -2286,7 +2289,7 @@ void SatoriRecycler::CleanCards()
|
|||
if (groupState == Satori::CardState::DIRTY)
|
||||
{
|
||||
SatoriRegion* region = page->RegionForCardGroup(i);
|
||||
const int8_t resetValue = region->Generation() == 2 ? Satori::CardState::REMEMBERED : Satori::CardState::EPHEMERAL;
|
||||
const int8_t resetValue = region->Generation() >= 2 ? Satori::CardState::REMEMBERED : Satori::CardState::EPHEMERAL;
|
||||
|
||||
// clean the group, but must do that before reading the cards.
|
||||
if (Interlocked::CompareExchange(&page->CardGroupState(i), resetValue, Satori::CardState::DIRTY) != Satori::CardState::DIRTY)
|
||||
|
@ -2298,7 +2301,7 @@ void SatoriRecycler::CleanCards()
|
|||
bool considerAllMarked = region->Generation() > m_condemnedGeneration;
|
||||
|
||||
_ASSERTE(Satori::CardState::EPHEMERAL == -1);
|
||||
const size_t unsetValue = region->Generation() == 2 ? 0 : -1;
|
||||
const size_t unsetValue = region->Generation() >= 2 ? 0 : -1;
|
||||
|
||||
int8_t* cards = page->CardsForGroup(i);
|
||||
for (size_t j = 0; j < Satori::CARD_BYTES_IN_CARD_GROUP; j++)
|
||||
|
@ -2409,7 +2412,7 @@ void SatoriRecycler::UpdatePointersThroughCards()
|
|||
page->CardGroupScanTicket(i) = currentScanTicket;
|
||||
|
||||
SatoriRegion* region = page->RegionForCardGroup(i);
|
||||
_ASSERTE(region->Generation() == 2);
|
||||
_ASSERTE(region->Generation() >= 2);
|
||||
|
||||
_ASSERTE(groupTicket == 0 || currentScanTicket - groupTicket <= 2);
|
||||
int8_t* cards = page->CardsForGroup(i);
|
||||
|
@ -2835,7 +2838,7 @@ bool SatoriRecycler::IsRelocatable(SatoriRegion* region)
|
|||
{
|
||||
if (region->Occupancy() > Satori::REGION_SIZE_GRANULARITY / 2 || // too full
|
||||
region->HasPinnedObjects() || // pinned cannot be evacuated
|
||||
region->IsAttachedToContext() // nursery regions do not participate in relocations
|
||||
region->IsAttachedToAllocatingOwner() // nursery regions do not participate in relocations
|
||||
)
|
||||
{
|
||||
return false;
|
||||
|
@ -2953,7 +2956,7 @@ void SatoriRecycler::PlanRegions(SatoriRegionQueue* regions)
|
|||
_ASSERTE(curRegion->Generation() <= m_condemnedGeneration);
|
||||
|
||||
// nursery regions do not participate in relocations
|
||||
if (curRegion->IsAttachedToContext())
|
||||
if (curRegion->IsAttachedToAllocatingOwner())
|
||||
{
|
||||
m_stayingRegions->Push(curRegion);
|
||||
continue;
|
||||
|
@ -3428,7 +3431,7 @@ void SatoriRecycler::UpdateRegions(SatoriRegionQueue* queue)
|
|||
}
|
||||
|
||||
// recycler owns nursery regions only temporarily, we should not keep them.
|
||||
if (curRegion->IsAttachedToContext())
|
||||
if (curRegion->IsAttachedToAllocatingOwner())
|
||||
{
|
||||
// when promoting, all nursery regions should be detached
|
||||
_ASSERTE(!m_promoteAllRegions);
|
||||
|
@ -3439,7 +3442,7 @@ void SatoriRecycler::UpdateRegions(SatoriRegionQueue* queue)
|
|||
|
||||
if (curRegion->Occupancy() == 0)
|
||||
{
|
||||
curRegion->DetachFromContextRelease();
|
||||
curRegion->DetachFromAlocatingOwnerRelease();
|
||||
curRegion->MakeBlank();
|
||||
m_heap->Allocator()->ReturnRegion(curRegion);
|
||||
}
|
||||
|
@ -3536,7 +3539,7 @@ void SatoriRecycler::KeepRegion(SatoriRegion* curRegion)
|
|||
//
|
||||
|
||||
RecordOccupancy(curRegion->Generation(), curRegion->Occupancy());
|
||||
if (curRegion->Generation() == 2)
|
||||
if (curRegion->Generation() >= 2)
|
||||
{
|
||||
PushToTenuredQueues(curRegion);
|
||||
}
|
||||
|
|
|
@ -519,6 +519,20 @@ bool SatoriRegion::TryDecommit()
|
|||
return false;
|
||||
}
|
||||
|
||||
void SatoriRegion::ZeroInitAndLink(SatoriRegion* prev)
|
||||
{
|
||||
if (m_used > (size_t)&m_syncBlock)
|
||||
{
|
||||
memset(&m_syncBlock, 0, m_used - (size_t)&m_syncBlock);
|
||||
}
|
||||
|
||||
m_next = prev;
|
||||
if (prev)
|
||||
{
|
||||
prev->m_prev = this;
|
||||
}
|
||||
}
|
||||
|
||||
size_t SatoriRegion::Allocate(size_t size, bool zeroInitialize)
|
||||
{
|
||||
_ASSERTE(m_containingQueue == nullptr);
|
||||
|
@ -1516,7 +1530,7 @@ tryAgain:
|
|||
bool SatoriRegion::RegisterForFinalization(SatoriObject* finalizable)
|
||||
{
|
||||
_ASSERTE(finalizable->ContainingRegion() == this);
|
||||
_ASSERTE(this->m_hasFinalizables || this->IsAttachedToContext());
|
||||
_ASSERTE(this->m_hasFinalizables || this->IsAttachedToAllocatingOwner());
|
||||
|
||||
LockFinalizableTrackers();
|
||||
|
||||
|
|
|
@ -64,6 +64,8 @@ public:
|
|||
bool CanCoalesceWithNext();
|
||||
bool TryCoalesceWithNext();
|
||||
|
||||
void ZeroInitAndLink(SatoriRegion* prev);
|
||||
|
||||
static size_t RegionSizeForAlloc(size_t allocSize);
|
||||
|
||||
size_t GetAllocStart();
|
||||
|
@ -87,10 +89,10 @@ public:
|
|||
bool MaybeEscapeTrackingAcquire();
|
||||
bool IsEscapeTrackedByCurrentThread();
|
||||
|
||||
void AttachToContext(SatoriRegion** attachementPoint);
|
||||
void DetachFromContextRelease();
|
||||
bool IsAttachedToContext();
|
||||
bool MaybeAttachedToContextAcquire();
|
||||
void AttachToAllocatingOwner(SatoriRegion** attachementPoint);
|
||||
void DetachFromAlocatingOwnerRelease();
|
||||
bool IsAttachedToAllocatingOwner();
|
||||
bool MaybeAttachedToAllocatingOwnerAcquire();
|
||||
|
||||
void ResetReusableForRelease();
|
||||
|
||||
|
@ -215,7 +217,7 @@ private:
|
|||
void (*m_escapeFunc)(SatoriObject**, SatoriObject*, SatoriRegion*);
|
||||
int m_generation;
|
||||
ReuseLevel m_reusableFor;
|
||||
SatoriRegion** m_allocationContextAttachmentPoint;
|
||||
SatoriRegion** m_allocatingOwnerAttachmentPoint;
|
||||
|
||||
size_t m_end;
|
||||
size_t m_committed;
|
||||
|
|
|
@ -247,7 +247,7 @@ bool SatoriRegion::Sweep()
|
|||
}
|
||||
|
||||
m_escapedSize = 0;
|
||||
bool cannotRecycle = this->IsAttachedToContext();
|
||||
bool cannotRecycle = this->IsAttachedToAllocatingOwner();
|
||||
bool isEscapeTracking = this->IsEscapeTracking();
|
||||
size_t occupancy = 0;
|
||||
size_t objCount = 0;
|
||||
|
@ -373,39 +373,39 @@ inline SatoriQueue<SatoriRegion>* SatoriRegion::ContainingQueue()
|
|||
return VolatileLoadWithoutBarrier(&m_containingQueue);
|
||||
}
|
||||
|
||||
inline void SatoriRegion::AttachToContext(SatoriRegion** attachementPoint)
|
||||
inline void SatoriRegion::AttachToAllocatingOwner(SatoriRegion** attachementPoint)
|
||||
{
|
||||
_ASSERTE(!m_allocationContextAttachmentPoint);
|
||||
_ASSERTE(!m_allocatingOwnerAttachmentPoint);
|
||||
_ASSERTE(!*attachementPoint);
|
||||
|
||||
*attachementPoint = this;
|
||||
m_allocationContextAttachmentPoint = attachementPoint;
|
||||
m_allocatingOwnerAttachmentPoint = attachementPoint;
|
||||
}
|
||||
|
||||
inline void SatoriRegion::DetachFromContextRelease()
|
||||
inline void SatoriRegion::DetachFromAlocatingOwnerRelease()
|
||||
{
|
||||
_ASSERTE(*m_allocationContextAttachmentPoint == this);
|
||||
_ASSERTE(*m_allocatingOwnerAttachmentPoint == this);
|
||||
|
||||
if (IsEscapeTracking())
|
||||
{
|
||||
StopEscapeTracking();
|
||||
}
|
||||
|
||||
*m_allocationContextAttachmentPoint = nullptr;
|
||||
*m_allocatingOwnerAttachmentPoint = nullptr;
|
||||
// all allocations must be committed prior to detachement.
|
||||
VolatileStore(&m_allocationContextAttachmentPoint, (SatoriRegion**)nullptr);
|
||||
VolatileStore(&m_allocatingOwnerAttachmentPoint, (SatoriRegion**)nullptr);
|
||||
}
|
||||
|
||||
inline bool SatoriRegion::IsAttachedToContext()
|
||||
inline bool SatoriRegion::IsAttachedToAllocatingOwner()
|
||||
{
|
||||
return m_allocationContextAttachmentPoint;
|
||||
return m_allocatingOwnerAttachmentPoint;
|
||||
}
|
||||
|
||||
inline bool SatoriRegion::MaybeAttachedToContextAcquire()
|
||||
inline bool SatoriRegion::MaybeAttachedToAllocatingOwnerAcquire()
|
||||
{
|
||||
// must check reusable level before the attach point, before doing whatever follows
|
||||
return VolatileLoad((uint8_t*)&m_reusableFor) ||
|
||||
VolatileLoad(&m_allocationContextAttachmentPoint);
|
||||
VolatileLoad(&m_allocatingOwnerAttachmentPoint);
|
||||
}
|
||||
|
||||
inline void SatoriRegion::ResetReusableForRelease()
|
||||
|
|
|
@ -393,9 +393,9 @@ LEAF_ENTRY JIT_WriteBarrier, _TEXT
|
|||
; TUNING: nonconcurrent and concurrent barriers could be separate pieces of code, but to switch
|
||||
; need to suspend EE, not sure if skipping concurrent check would worth that much.
|
||||
|
||||
; if src is in gen2 and the barrier is not concurrent we do not need to mark cards
|
||||
; if src is in gen2/3 and the barrier is not concurrent we do not need to mark cards
|
||||
cmp dword ptr [r8 + 16], 2
|
||||
jne MarkCards
|
||||
jl MarkCards
|
||||
|
||||
CheckConcurrent:
|
||||
cmp byte ptr [g_sw_ww_enabled_for_gc_heap], 0h
|
||||
|
|
|
@ -300,9 +300,9 @@ LEAF_ENTRY JIT_WriteBarrier, _TEXT
|
|||
|
||||
jz CheckConcurrent // same region, just check if barrier is not concurrent
|
||||
|
||||
// if src is in gen2 and the barrier is not concurrent we do not need to mark cards
|
||||
// if src is in gen2/3 and the barrier is not concurrent we do not need to mark cards
|
||||
cmp dword ptr [rdx + 16], 2
|
||||
jne MarkCards
|
||||
jl MarkCards
|
||||
|
||||
CheckConcurrent:
|
||||
cmp byte ptr [r11], 0
|
||||
|
|
|
@ -991,7 +991,6 @@ OBJECTREF AllocateObject(MethodTable *pMT
|
|||
#ifdef FEATURE_COMINTEROP
|
||||
, bool fHandleCom
|
||||
#endif
|
||||
, bool fUnmovable
|
||||
)
|
||||
{
|
||||
CONTRACTL {
|
||||
|
@ -1043,9 +1042,6 @@ OBJECTREF AllocateObject(MethodTable *pMT
|
|||
if (totalSize >= LARGE_OBJECT_SIZE && totalSize >= GCHeapUtilities::GetGCHeap()->GetLOHThreshold())
|
||||
flags |= GC_ALLOC_LARGE_OBJECT_HEAP;
|
||||
|
||||
if (fUnmovable)
|
||||
flags |= GC_ALLOC_PINNED_OBJECT_HEAP;
|
||||
|
||||
#ifdef FEATURE_64BIT_ALIGNMENT
|
||||
if (pMT->RequiresAlign8())
|
||||
{
|
||||
|
@ -1111,6 +1107,44 @@ OBJECTREF TryAllocateFrozenObject(MethodTable* pObjMT)
|
|||
return ObjectToOBJECTREF(orObject);
|
||||
}
|
||||
|
||||
Object* AllocateImmortalObject(MethodTable* pMT, size_t objectSize)
|
||||
{
|
||||
CONTRACTL{
|
||||
THROWS;
|
||||
GC_TRIGGERS;
|
||||
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
|
||||
PRECONDITION(CheckPointer(pMT));
|
||||
PRECONDITION(pMT->CheckInstanceActivated());
|
||||
} CONTRACTL_END;
|
||||
|
||||
SetTypeHandleOnThreadForAlloc(TypeHandle(pMT));
|
||||
|
||||
GC_ALLOC_FLAGS flags = GC_ALLOC_IMMORTAL;
|
||||
if (pMT->ContainsPointers())
|
||||
flags |= GC_ALLOC_CONTAINS_REF;
|
||||
|
||||
#ifdef FEATURE_64BIT_ALIGNMENT
|
||||
if (pMT->RequiresAlign8())
|
||||
{
|
||||
// The last argument to the allocation, indicates whether the alignment should be "biased". This
|
||||
// means that the object is allocated so that its header lies exactly between two 8-byte
|
||||
// boundaries. This is required in cases where we need to mis-align the header in order to align
|
||||
// the actual payload. Currently this is false for classes (where we apply padding to ensure the
|
||||
// first field is aligned relative to the header) and true for boxed value types (where we can't
|
||||
// do the same padding without introducing more complexity in type layout and unboxing stubs).
|
||||
_ASSERTE(sizeof(Object) == 4);
|
||||
flags |= GC_ALLOC_ALIGN8;
|
||||
if (pMT->IsValueType())
|
||||
flags |= GC_ALLOC_ALIGN8_BIAS;
|
||||
}
|
||||
#endif // FEATURE_64BIT_ALIGNMENT
|
||||
|
||||
Object* orObject = (Object*)Alloc(objectSize, flags);
|
||||
orObject->SetMethodTable(pMT);
|
||||
|
||||
return orObject;
|
||||
}
|
||||
|
||||
//========================================================================
|
||||
//
|
||||
// WRITE BARRIER HELPERS
|
||||
|
|
|
@ -57,8 +57,6 @@ OBJECTREF AllocateObject(MethodTable *pMT
|
|||
#ifdef FEATURE_COMINTEROP
|
||||
, bool fHandleCom = true
|
||||
#endif
|
||||
// TODO: VS this may be unused after immortal objects are implemented.
|
||||
, bool fUnmovable = false
|
||||
);
|
||||
|
||||
inline OBJECTREF AllocateObject(MethodTable *pMT
|
||||
|
@ -74,6 +72,8 @@ inline OBJECTREF AllocateObject(MethodTable *pMT
|
|||
);
|
||||
}
|
||||
|
||||
Object* AllocateImmortalObject(MethodTable* pMT, size_t objectSize);
|
||||
|
||||
extern int StompWriteBarrierEphemeral(bool isRuntimeSuspended);
|
||||
extern int StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck);
|
||||
extern int SwitchToWriteWatchBarrier(bool isRuntimeSuspended);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue