mirror of
https://github.com/VSadov/Satori.git
synced 2025-06-11 02:13:38 +09:00
Generational GC works
This commit is contained in:
parent
2b3f940e4b
commit
21f3dfd634
14 changed files with 193 additions and 134 deletions
21
src/coreclr/gc/env/volatile.h
vendored
21
src/coreclr/gc/env/volatile.h
vendored
|
@ -196,6 +196,27 @@ T VolatileLoadWithoutBarrier(T const * pt)
|
|||
return val;
|
||||
}
|
||||
|
||||
//
|
||||
// Memory ordering barrier that waits for stores in progress to complete.
|
||||
// Any effects of stores that appear before, in program order, will "happen before" relative to this.
|
||||
// Other operations such as computation, instruction prefetch or loads !!! are not guaranteed to be ordered.
|
||||
//
|
||||
// Architectural mapping:
|
||||
// arm64 : dmb ishst
|
||||
// arm : dmb ish
|
||||
// x86/64 : compiler fence
|
||||
inline
|
||||
void VolatileStoreBarrier()
|
||||
{
|
||||
#if defined(HOST_ARM64) && defined(__GNUC__)
|
||||
asm volatile ("dmb ishst" : : : "memory");
|
||||
#elif defined(HOST_ARM64) && defined(_MSC_VER)
|
||||
__dmb(_ARM64_BARRIER_ISHST);
|
||||
#else
|
||||
VOLATILE_MEMORY_BARRIER();
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename T> class Volatile;
|
||||
|
||||
template<typename T>
|
||||
|
|
|
@ -306,6 +306,27 @@ void VolatileLoadBarrier()
|
|||
#endif
|
||||
}
|
||||
|
||||
//
|
||||
// Memory ordering barrier that waits for stores in progress to complete.
|
||||
// Any effects of stores that appear before, in program order, will "happen before" relative to this.
|
||||
// Other operations such as computation, instruction prefetch or loads !!! are not guaranteed to be ordered.
|
||||
//
|
||||
// Architectural mapping:
|
||||
// arm64 : dmb ishst
|
||||
// arm : dmb ish
|
||||
// x86/64 : compiler fence
|
||||
inline
|
||||
void VolatileStoreBarrier()
|
||||
{
|
||||
#if defined(HOST_ARM64) && defined(__GNUC__)
|
||||
asm volatile ("dmb ishst" : : : "memory");
|
||||
#elif defined(HOST_ARM64) && defined(_MSC_VER)
|
||||
__dmb(_ARM64_BARRIER_ISHST);
|
||||
#else
|
||||
VOLATILE_MEMORY_BARRIER();
|
||||
#endif
|
||||
}
|
||||
|
||||
//
|
||||
// Volatile<T> implements accesses with our volatile semantics over a variable of type T.
|
||||
// Wherever you would have used a "volatile Foo" or, equivalently, "Foo volatile", use Volatile<Foo>
|
||||
|
|
|
@ -167,9 +167,9 @@ uint64_t SatoriGC::GetTotalAllocatedBytes()
|
|||
|
||||
HRESULT SatoriGC::GarbageCollect(int generation, bool low_memory_p, int mode)
|
||||
{
|
||||
// TODO: VS we do full GC for now.
|
||||
// we do either Gen1 or Gen2 for now.
|
||||
generation = (generation < 0) ? 2 : min(generation, 2);
|
||||
generation = max(1, generation);
|
||||
generation = min(2, generation);
|
||||
|
||||
m_heap->Recycler()->Collect(generation, /*force*/ true);
|
||||
return S_OK;
|
||||
|
@ -235,8 +235,7 @@ HRESULT SatoriGC::Initialize()
|
|||
return S_OK;
|
||||
}
|
||||
|
||||
// checks if obj is marked.
|
||||
// makes sense only during marking phases.
|
||||
// actually checks if object is considered reachable as a result of a marking phase.
|
||||
bool SatoriGC::IsPromoted(Object* object)
|
||||
{
|
||||
_ASSERTE(object == nullptr || m_heap->IsHeapAddress((size_t)object));
|
||||
|
@ -245,10 +244,7 @@ bool SatoriGC::IsPromoted(Object* object)
|
|||
// objects outside of the collected generation (including null) are considered marked.
|
||||
// (existing behavior)
|
||||
return o == nullptr ||
|
||||
o->IsMarked()
|
||||
// TODO: VS enable when truly generational
|
||||
// || o->ContainingRegion()->Generation() > m_heap->Recycler()->CondemnedGeneration()
|
||||
;
|
||||
o->IsMarkedOrOlderThan(m_heap->Recycler()->CondemnedGeneration());
|
||||
}
|
||||
|
||||
bool SatoriGC::IsHeapPointer(void* object, bool small_heap_only)
|
||||
|
@ -259,9 +255,7 @@ bool SatoriGC::IsHeapPointer(void* object, bool small_heap_only)
|
|||
|
||||
unsigned SatoriGC::GetCondemnedGeneration()
|
||||
{
|
||||
return 2;
|
||||
// TODO: VS enable when truly generational
|
||||
// return m_heap->Recycler()->CondemnedGeneration();
|
||||
return m_heap->Recycler()->CondemnedGeneration();
|
||||
}
|
||||
|
||||
bool SatoriGC::IsGCInProgressHelper(bool bConsiderGCStart)
|
||||
|
|
|
@ -36,6 +36,7 @@ public:
|
|||
bool IsFree();
|
||||
|
||||
bool IsMarked();
|
||||
bool IsMarkedOrOlderThan(int generation);
|
||||
void SetMarked();
|
||||
bool IsPinned();
|
||||
void SetPinned();
|
||||
|
|
|
@ -103,6 +103,11 @@ inline bool SatoriObject::IsMarked()
|
|||
return CheckBit(0);
|
||||
}
|
||||
|
||||
inline bool SatoriObject::IsMarkedOrOlderThan(int generation)
|
||||
{
|
||||
return ContainingRegion()->Generation() > generation || IsMarked();
|
||||
}
|
||||
|
||||
inline void SatoriObject::SetMarked()
|
||||
{
|
||||
SetBit(0);
|
||||
|
|
|
@ -129,22 +129,24 @@ void SatoriPage::SetCardForAddress(size_t address)
|
|||
if (!m_cardTable[cardByteOffset])
|
||||
{
|
||||
m_cardTable[cardByteOffset] = Satori::CARD_HAS_REFERENCES;
|
||||
}
|
||||
|
||||
size_t cardGroupOffset = offset / Satori::REGION_SIZE_GRANULARITY;
|
||||
if (m_cardGroups[cardGroupOffset])
|
||||
{
|
||||
m_cardGroups[cardGroupOffset] = Satori::CARD_HAS_REFERENCES;
|
||||
}
|
||||
size_t cardGroupOffset = offset / Satori::REGION_SIZE_GRANULARITY;
|
||||
if (!m_cardGroups[cardGroupOffset])
|
||||
{
|
||||
m_cardGroups[cardGroupOffset] = Satori::CARD_HAS_REFERENCES;
|
||||
|
||||
if (!m_cardState)
|
||||
{
|
||||
m_cardState = Satori::CARD_HAS_REFERENCES;
|
||||
if (!m_cardState)
|
||||
{
|
||||
m_cardState = Satori::CARD_HAS_REFERENCES;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SatoriPage::SetCardsForRange(size_t start, size_t end)
|
||||
{
|
||||
_ASSERTE(end > start);
|
||||
|
||||
size_t firstByteOffset = start - Start();
|
||||
size_t lastByteOffset = end - Start() - 1;
|
||||
|
||||
|
@ -157,12 +159,12 @@ void SatoriPage::SetCardsForRange(size_t start, size_t end)
|
|||
_ASSERTE(lastCard < m_cardTableSize);
|
||||
|
||||
memset((void*)(m_cardTable + firstCard), Satori::CARD_HAS_REFERENCES, lastCard - firstCard + 1);
|
||||
|
||||
|
||||
size_t firstGroup = firstByteOffset / Satori::REGION_SIZE_GRANULARITY;
|
||||
size_t lastGroup = lastByteOffset / Satori::REGION_SIZE_GRANULARITY;
|
||||
for (size_t i = firstGroup; i <= lastGroup; i++)
|
||||
{
|
||||
if (m_cardGroups[i])
|
||||
if (!m_cardGroups[i])
|
||||
{
|
||||
m_cardGroups[i] = Satori::CARD_HAS_REFERENCES;
|
||||
}
|
||||
|
@ -174,7 +176,6 @@ void SatoriPage::SetCardsForRange(size_t start, size_t end)
|
|||
}
|
||||
}
|
||||
|
||||
//TODO: VS fences.
|
||||
void SatoriPage::DirtyCardForAddress(size_t address)
|
||||
{
|
||||
size_t offset = address - Start();
|
||||
|
@ -186,12 +187,10 @@ void SatoriPage::DirtyCardForAddress(size_t address)
|
|||
m_cardTable[cardByteOffset] = Satori::CARD_DIRTY;
|
||||
|
||||
size_t cardGroupOffset = offset / Satori::REGION_SIZE_GRANULARITY;
|
||||
this->m_cardGroups[cardGroupOffset] = Satori::CARD_DIRTY;
|
||||
|
||||
this->m_cardState = Satori::CARD_DIRTY;
|
||||
VolatileStore(&this->m_cardGroups[cardGroupOffset], Satori::CARD_DIRTY);
|
||||
VolatileStore(&this->m_cardState, Satori::CARD_DIRTY);
|
||||
}
|
||||
|
||||
//TODO: VS fences.
|
||||
void SatoriPage::DirtyCardsForRange(size_t start, size_t end)
|
||||
{
|
||||
size_t firstByteOffset = start - Start();
|
||||
|
@ -210,6 +209,11 @@ void SatoriPage::DirtyCardsForRange(size_t start, size_t end)
|
|||
m_cardTable[i] = Satori::CARD_DIRTY;
|
||||
}
|
||||
|
||||
// dirtying can be concurrent with cleaning, so we must ensure order
|
||||
// of writes - cards, then groups, then page
|
||||
// cleaning will read in the opposite order
|
||||
VolatileStoreBarrier();
|
||||
|
||||
size_t firstGroup = firstByteOffset / Satori::REGION_SIZE_GRANULARITY;
|
||||
size_t lastGroup = lastByteOffset / Satori::REGION_SIZE_GRANULARITY;
|
||||
for (size_t i = firstGroup; i <= lastGroup; i++)
|
||||
|
@ -217,6 +221,8 @@ void SatoriPage::DirtyCardsForRange(size_t start, size_t end)
|
|||
this->m_cardGroups[i] = Satori::CARD_DIRTY;
|
||||
}
|
||||
|
||||
VolatileStoreBarrier();
|
||||
|
||||
this->m_cardState = Satori::CARD_DIRTY;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,18 +43,11 @@ public:
|
|||
void DirtyCardsForRange(size_t start, size_t length);
|
||||
void WipeCardsForRange(size_t start, size_t end);
|
||||
|
||||
int8_t CardState()
|
||||
int8_t& CardState()
|
||||
{
|
||||
// TODO: VS should this be VolatileLoad when we have concurrency?
|
||||
return m_cardState;
|
||||
}
|
||||
|
||||
void SetProcessing()
|
||||
{
|
||||
// TODO: VS should this be VolatileStore when we have concurrency? (same for the groups)
|
||||
m_cardState = Satori::CARD_PROCESSING;
|
||||
}
|
||||
|
||||
bool TrySetClean()
|
||||
{
|
||||
return Interlocked::CompareExchange(&m_cardState, Satori::CARD_HAS_REFERENCES, Satori::CARD_PROCESSING) != Satori::CARD_DIRTY;
|
||||
|
|
|
@ -106,21 +106,13 @@ void SatoriRecycler::MaybeTriggerGC()
|
|||
}
|
||||
else if (Interlocked::CompareExchange(&m_gcInProgress, 1, 0) == 0)
|
||||
{
|
||||
// for now just do 1 , 2, 1, 2, ...
|
||||
int generation = m_scanCount % 2 == 0 ? 1 : 2;
|
||||
// for now just do every 16th
|
||||
int generation = m_scanCount % 16 == 0 ? 2 : 1;
|
||||
Collect(generation, /*force*/ false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: VS gen1
|
||||
// clean all cards after gen2
|
||||
// Volatile for dirty (or put a comment)
|
||||
// pass generation to EE helpers
|
||||
// bariers
|
||||
// do not mark or trace into gen2 regions when in gen1
|
||||
// GcPromotionsGranted
|
||||
|
||||
void SatoriRecycler::Collect(int generation, bool force)
|
||||
{
|
||||
bool wasCoop = GCToEEInterface::EnablePreemptiveGC();
|
||||
|
@ -148,7 +140,7 @@ void SatoriRecycler::Collect(int generation, bool force)
|
|||
MarkHandles();
|
||||
|
||||
// mark through all cards that has interesting refs (remembered set).
|
||||
bool revisitCards = generation == 1 ?
|
||||
bool revisitCards = m_condemnedGeneration == 1 ?
|
||||
MarkThroughCards(/* minState */ Satori::CARD_HAS_REFERENCES) :
|
||||
false;
|
||||
|
||||
|
@ -226,11 +218,18 @@ void SatoriRecycler::Collect(int generation, bool force)
|
|||
// we must sweep in gen2, since unloadable types may invalidate method tables and make
|
||||
// unreachable objects unwalkable.
|
||||
// we do not sweep gen1 though. without compaction there is no benefit, just forcing index rebuilding.
|
||||
bool nothingMarked = generation == 2 ?
|
||||
curRegion->Sweep() :
|
||||
curRegion->NothingMarked();
|
||||
bool canRecycle = false;
|
||||
if (m_condemnedGeneration == 2)
|
||||
{
|
||||
// we must sweep in gen2 since everything will be gen2
|
||||
canRecycle = curRegion->Sweep();
|
||||
}
|
||||
else if (curRegion->Generation() != 2)
|
||||
{
|
||||
canRecycle = curRegion->NothingMarked();
|
||||
}
|
||||
|
||||
if (curRegion->Generation() <= generation && nothingMarked)
|
||||
if (canRecycle)
|
||||
{
|
||||
// TODO: VS wipe cards should be a part of return and MakeBlank too, but make it minimal
|
||||
curRegion->WipeCards();
|
||||
|
@ -239,7 +238,7 @@ void SatoriRecycler::Collect(int generation, bool force)
|
|||
}
|
||||
else
|
||||
{
|
||||
if (generation == 2)
|
||||
if (m_condemnedGeneration == 2)
|
||||
{
|
||||
// everything is Gen2 now.
|
||||
curRegion->SetGeneration(2);
|
||||
|
@ -259,10 +258,18 @@ void SatoriRecycler::Collect(int generation, bool force)
|
|||
|
||||
SweepRegions(m_regularRegions);
|
||||
SweepRegions(m_finalizationTrackingRegions);
|
||||
|
||||
// TODO: VS relocation - this could be done right after marking now,
|
||||
// but will have to be after reference updating.
|
||||
if (m_condemnedGeneration == 2)
|
||||
{
|
||||
PromoteSurvivedHandles();
|
||||
}
|
||||
|
||||
m_prevRegionCount = m_finalizationTrackingRegions->Count() + m_regularRegions->Count();
|
||||
|
||||
m_gen1Count++;
|
||||
if (generation == 2)
|
||||
if (m_condemnedGeneration == 2)
|
||||
{
|
||||
m_gen2Count++;
|
||||
}
|
||||
|
@ -307,6 +314,8 @@ public:
|
|||
: m_markChunk()
|
||||
{
|
||||
m_recycler = recycler;
|
||||
m_condemnedGeneration = recycler->m_condemnedGeneration;
|
||||
m_heap = recycler->m_heap;
|
||||
}
|
||||
|
||||
void PushToMarkQueues(SatoriObject* o)
|
||||
|
@ -323,6 +332,8 @@ public:
|
|||
private:
|
||||
SatoriRecycler* m_recycler;
|
||||
SatoriMarkChunk* m_markChunk;
|
||||
SatoriHeap* m_heap;
|
||||
int m_condemnedGeneration;
|
||||
};
|
||||
|
||||
void SatoriRecycler::PushToMarkQueuesSlow(SatoriMarkChunk* ¤tMarkChunk, SatoriObject* o)
|
||||
|
@ -333,10 +344,10 @@ void SatoriRecycler::PushToMarkQueuesSlow(SatoriMarkChunk* ¤tMarkChunk, Sa
|
|||
}
|
||||
|
||||
#ifdef _DEBUG
|
||||
// Limit worklist to one item in debug/chk.
|
||||
// Limit worklist in debug/chk.
|
||||
// This is just to force more overflows. Otherwise they are rather rare.
|
||||
currentMarkChunk = nullptr;
|
||||
if (m_workList->Count() == 0)
|
||||
if (m_workList->Count() < 3)
|
||||
#endif
|
||||
{
|
||||
currentMarkChunk = m_heap->Allocator()->TryGetMarkChunk();
|
||||
|
@ -371,8 +382,7 @@ void SatoriRecycler::MarkFn(PTR_PTR_Object ppObject, ScanContext* sc, uint32_t f
|
|||
{
|
||||
MarkContext* context = (MarkContext*)sc->_unused1;
|
||||
|
||||
//TODO: VS put heap directly on context.
|
||||
o = context->m_recycler->m_heap->ObjectForAddressChecked(location);
|
||||
o = context->m_heap->ObjectForAddressChecked(location);
|
||||
if (o == nullptr)
|
||||
{
|
||||
return;
|
||||
|
@ -386,16 +396,17 @@ void SatoriRecycler::MarkFn(PTR_PTR_Object ppObject, ScanContext* sc, uint32_t f
|
|||
return;
|
||||
}
|
||||
|
||||
if (!o->IsMarked())
|
||||
MarkContext* context = (MarkContext*)sc->_unused1;
|
||||
if (!o->IsMarkedOrOlderThan(context->m_condemnedGeneration))
|
||||
{
|
||||
// TODO: VS should use threadsafe variant
|
||||
o->SetMarked();
|
||||
MarkContext* context = (MarkContext*)sc->_unused1;
|
||||
context->PushToMarkQueues(o);
|
||||
}
|
||||
|
||||
if (flags & GC_CALL_PINNED)
|
||||
{
|
||||
// TODO: VS should use threadsafe variant
|
||||
o->SetPinned();
|
||||
}
|
||||
};
|
||||
|
@ -470,8 +481,9 @@ void SatoriRecycler::DrainMarkQueues()
|
|||
[&](SatoriObject** ref)
|
||||
{
|
||||
SatoriObject* child = *ref;
|
||||
if (child && !child->IsMarked())
|
||||
if (child && !child->IsMarkedOrOlderThan(m_condemnedGeneration))
|
||||
{
|
||||
_ASSERTE(child->ContainingRegion()->Generation() > 0);
|
||||
child->SetMarked();
|
||||
child->Validate();
|
||||
if (!dstChunk || !dstChunk->TryPush(child))
|
||||
|
@ -507,6 +519,14 @@ void SatoriRecycler::DrainMarkQueues()
|
|||
}
|
||||
}
|
||||
|
||||
//TODO: VS Re: concurrency
|
||||
// Card Marking will be done with EE suspended, so IU barriers do not need
|
||||
// to order card writes.
|
||||
// However marking/clearing itself may cause overflows and that could happen concurrently, thus:
|
||||
// - IU barriers can use regular writes to dirty cards/groups/pages
|
||||
// - Ovf dirtying must use write fences, but those should be very rare
|
||||
// - card maeking/clearing must use read fences, not a lot though - per page and per group.
|
||||
|
||||
bool SatoriRecycler::MarkThroughCards(int8_t minState)
|
||||
{
|
||||
SatoriMarkChunk* dstChunk = nullptr;
|
||||
|
@ -515,27 +535,44 @@ bool SatoriRecycler::MarkThroughCards(int8_t minState)
|
|||
m_heap->ForEachPage(
|
||||
[&](SatoriPage* page)
|
||||
{
|
||||
if (page->CardState() >= minState)
|
||||
// VolatileLoad to allow concurent card clearing.
|
||||
// Since we may concurrently make cards dirty due to overflow,
|
||||
// page must be checked first, then group, then cards.
|
||||
// Dirtying due to overflow will have to do writes in the opposite order.
|
||||
int8_t pageState = VolatileLoad(&page->CardState());
|
||||
if (pageState >= minState)
|
||||
{
|
||||
page->SetProcessing();
|
||||
|
||||
page->CardState() = Satori::CARD_PROCESSING;
|
||||
size_t groupCount = page->CardGroupCount();
|
||||
// TODO: VS when stealing is implemented we should start from a random location
|
||||
for (size_t i = 0; i < groupCount; i++)
|
||||
{
|
||||
// TODO: VS when stealing is implemented we should start from a random location
|
||||
if (page->CardGroup(i) >= minState)
|
||||
// VolatileLoad, see the comment above regading page/group/card read order
|
||||
int8_t groupState = VolatileLoad(&page->CardGroup(i));
|
||||
if (groupState >= minState)
|
||||
{
|
||||
int8_t* cards = page->CardsForGroup(i);
|
||||
SatoriRegion* region = page->RegionForCardGroup(i);
|
||||
int8_t resetValue = region->Generation() == 2 ? Satori::CARD_HAS_REFERENCES : Satori::CARD_BLANK;
|
||||
|
||||
//TODO: VS enable when truly generational.
|
||||
bool considerAllMarked = false; // region->Generation() > m_condemnedGeneration;
|
||||
|
||||
page->CardGroup(i) = resetValue;
|
||||
for (int j = 0; j < Satori::CARD_BYTES_IN_CARD_GROUP; j++)
|
||||
//ephemeral regions are not interesting here unless they are dirty.
|
||||
if (groupState < Satori::CARD_DIRTY && region->Generation() < 2)
|
||||
{
|
||||
//TODO: VS size_t steps, maybe, at least when skipping?
|
||||
continue;
|
||||
}
|
||||
|
||||
int8_t resetValue = region->Generation() == 2 ? Satori::CARD_HAS_REFERENCES : Satori::CARD_BLANK;
|
||||
bool considerAllMarked = region->Generation() > m_condemnedGeneration;
|
||||
|
||||
int8_t* cards = page->CardsForGroup(i);
|
||||
page->CardGroup(i) = resetValue;
|
||||
for (size_t j = 0; j < Satori::CARD_BYTES_IN_CARD_GROUP; j++)
|
||||
{
|
||||
// cards are often sparsely set, if j is aligned, check the entire size_t for 0
|
||||
if (((j & (sizeof(size_t) - 1)) == 0) && *((size_t*)&cards[j]) == 0)
|
||||
{
|
||||
j += sizeof(size_t) - 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (cards[j] < minState)
|
||||
{
|
||||
continue;
|
||||
|
@ -548,26 +585,16 @@ bool SatoriRecycler::MarkThroughCards(int8_t minState)
|
|||
} while (j < Satori::CARD_BYTES_IN_CARD_GROUP && cards[j] >= minState);
|
||||
|
||||
size_t end = page->LocationForCard(&cards[j]);
|
||||
|
||||
SatoriObject* o = region->FindObject(start);
|
||||
do
|
||||
{
|
||||
// we trace only through marked objects.
|
||||
// things to consider:
|
||||
// 1) tracing into dead objects is dangerous. marked objects will not have that.
|
||||
// 2) tracing from unmarked retains too much, in particular blocking gen2 must be precise.
|
||||
// 3) overflow will mark before dirtying, so always ok.
|
||||
// 4) for concurrent dirtying - asignment happening before marking will be traced (careful with HW order!!)
|
||||
// 5) gen2 objects are all considered marked in partial GC, but full GC is precise.
|
||||
// 6) gen2 should not have dead objects and must sweep (the other reason is unloadable types)
|
||||
//
|
||||
if (considerAllMarked || o->IsMarked())
|
||||
{
|
||||
o->ForEachObjectRef(
|
||||
[&](SatoriObject** ref)
|
||||
{
|
||||
SatoriObject* child = *ref;
|
||||
if (child && !child->IsMarked())
|
||||
if (child && !child->IsMarkedOrOlderThan(m_condemnedGeneration))
|
||||
{
|
||||
child->SetMarked();
|
||||
child->Validate();
|
||||
|
@ -609,7 +636,7 @@ void SatoriRecycler::MarkHandles()
|
|||
|
||||
// concurrent, per thread/heap
|
||||
// relies on thread_number to select handle buckets and specialcases #0
|
||||
GCScan::GcScanHandles(MarkFn, 2, 2, &sc);
|
||||
GCScan::GcScanHandles(MarkFn, m_condemnedGeneration, 2, &sc);
|
||||
|
||||
if (c.m_markChunk != nullptr)
|
||||
{
|
||||
|
@ -628,11 +655,11 @@ void SatoriRecycler::WeakPtrScan(bool isShort)
|
|||
// null out the target of short weakref that were not promoted.
|
||||
if (isShort)
|
||||
{
|
||||
GCScan::GcShortWeakPtrScan(nullptr, 2, 2, &sc);
|
||||
GCScan::GcShortWeakPtrScan(nullptr, m_condemnedGeneration, 2, &sc);
|
||||
}
|
||||
else
|
||||
{
|
||||
GCScan::GcWeakPtrScan(nullptr, 2, 2, &sc);
|
||||
GCScan::GcWeakPtrScan(nullptr, m_condemnedGeneration, 2, &sc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -640,7 +667,7 @@ void SatoriRecycler::WeakPtrScanBySingleThread()
|
|||
{
|
||||
// scan for deleted entries in the syncblk cache
|
||||
// does not use a context, so we pass nullptr
|
||||
GCScan::GcWeakPtrScanBySingleThread(2, 2, nullptr);
|
||||
GCScan::GcWeakPtrScanBySingleThread(m_condemnedGeneration, 2, nullptr);
|
||||
}
|
||||
|
||||
// can run concurrently, but not with mutator (since it may reregister for finalization)
|
||||
|
@ -662,7 +689,7 @@ void SatoriRecycler::ScanFinalizables()
|
|||
// finalizer can be suppressed and re-registered again without creating new trackers.
|
||||
// (this is preexisting behavior)
|
||||
|
||||
if (!finalizable->IsMarked())
|
||||
if (!finalizable->IsMarkedOrOlderThan(m_condemnedGeneration))
|
||||
{
|
||||
// eager finalization does not respect suppression (preexisting behavior)
|
||||
if (GCToEEInterface::EagerFinalized(finalizable))
|
||||
|
@ -760,7 +787,7 @@ void SatoriRecycler::ScanFinalizables()
|
|||
[&](SatoriObject** ppObject)
|
||||
{
|
||||
SatoriObject* o = *ppObject;
|
||||
if (!o->IsMarked())
|
||||
if (!o->IsMarkedOrOlderThan(m_condemnedGeneration))
|
||||
{
|
||||
o->SetMarked();
|
||||
c.PushToMarkQueues(*ppObject);
|
||||
|
@ -788,7 +815,7 @@ void SatoriRecycler::DependentHandlesInitialScan()
|
|||
|
||||
// concurrent, per thread/heap
|
||||
// relies on thread_number to select handle buckets and specialcases #0
|
||||
GCScan::GcDhInitialScan(MarkFn, 2, 2, &sc);
|
||||
GCScan::GcDhInitialScan(MarkFn, m_condemnedGeneration, 2, &sc);
|
||||
|
||||
if (c.m_markChunk != nullptr)
|
||||
{
|
||||
|
@ -817,3 +844,15 @@ void SatoriRecycler::DependentHandlesRescan()
|
|||
m_workList->Push(c.m_markChunk);
|
||||
}
|
||||
}
|
||||
|
||||
void SatoriRecycler::PromoteSurvivedHandles()
|
||||
{
|
||||
ScanContext sc;
|
||||
sc.promotion = TRUE;
|
||||
sc.thread_number = 0;
|
||||
|
||||
// no need for context. we do not create more work here.
|
||||
sc._unused1 = nullptr;
|
||||
|
||||
GCScan::GcPromotionsGranted(m_condemnedGeneration, 2, &sc);
|
||||
}
|
||||
|
|
|
@ -78,6 +78,7 @@ private:
|
|||
|
||||
void DependentHandlesInitialScan();
|
||||
void DependentHandlesRescan();
|
||||
void PromoteSurvivedHandles();
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -55,6 +55,8 @@ SatoriRegion* SatoriRegion::InitializeAt(SatoriPage* containingPage, size_t addr
|
|||
committed += toCommit;
|
||||
}
|
||||
|
||||
_ASSERTE(BITMAP_START * sizeof(size_t) == offsetof(SatoriRegion, m_firstObject) / sizeof(size_t) / 8);
|
||||
|
||||
// clear the header if was used before
|
||||
size_t zeroUpTo = min(used, (size_t)&result->m_syncBlock);
|
||||
memset((void*)address, 0, zeroUpTo - address);
|
||||
|
@ -1000,13 +1002,8 @@ bool SatoriRegion::Sweep()
|
|||
bool sawMarked = false;
|
||||
SatoriObject* cur = FirstObject();
|
||||
|
||||
while(true)
|
||||
do
|
||||
{
|
||||
if (cur->Start() >= limit)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
if (cur->IsMarked())
|
||||
{
|
||||
sawMarked = true;
|
||||
|
@ -1022,6 +1019,7 @@ bool SatoriRegion::Sweep()
|
|||
SatoriObject::FormatAsFree(lastMarkedEnd, skipped);
|
||||
}
|
||||
}
|
||||
while (cur->Start() < limit);
|
||||
|
||||
// clean index
|
||||
memset(&m_index, 0, sizeof(m_index));
|
||||
|
|
|
@ -92,7 +92,7 @@ private:
|
|||
static const int BITMAP_LENGTH = Satori::REGION_SIZE_GRANULARITY / sizeof(size_t) / sizeof(size_t) / 8;
|
||||
|
||||
// The first actually useful index is offsetof(m_firstObject) / sizeof(size_t) / 8,
|
||||
static const int BITMAP_START = (BITMAP_LENGTH + Satori::INDEX_LENGTH) / sizeof(size_t) / 8;
|
||||
static const int BITMAP_START = (BITMAP_LENGTH + Satori::INDEX_LENGTH + 2) / sizeof(size_t) / 8;
|
||||
union
|
||||
{
|
||||
// object metadata - one bit per size_t
|
||||
|
|
|
@ -512,11 +512,24 @@ LEAF_ENTRY JIT_WriteBarrier, _TEXT
|
|||
sub r8, rax ; offset in page
|
||||
mov rdx,r8
|
||||
|
||||
shr r8, 21 ; group offset
|
||||
shr rdx,9 ; card offset
|
||||
shr r8, 9 ; card offset
|
||||
cmp byte ptr [rax + r8], 0
|
||||
je SetCard
|
||||
REPRET
|
||||
SetCard:
|
||||
mov byte ptr [rax + r8], 1
|
||||
|
||||
mov byte ptr [rdx + rax], 1 ; set card
|
||||
mov byte ptr [r8 + rax + 80h], 1 ; set group
|
||||
shr rdx, 21 ; group offset
|
||||
cmp byte ptr [rax + rdx + 80h], 0
|
||||
je SetGroup
|
||||
REPRET
|
||||
SetGroup:
|
||||
mov byte ptr [rax + rdx + 80h], 1
|
||||
|
||||
cmp byte ptr [rax], 0
|
||||
je SetPage
|
||||
REPRET
|
||||
SetPage:
|
||||
mov byte ptr [rax], 1 ; set page
|
||||
|
||||
Exit:
|
||||
|
@ -528,29 +541,6 @@ LEAF_ENTRY JIT_PatchedCodeLast, _TEXT
|
|||
ret
|
||||
LEAF_END JIT_PatchedCodeLast, _TEXT
|
||||
|
||||
; A helper for a rare path to invoke recursive escape before doing assignment.
|
||||
; rcx - dest (assumed to be in the heap)
|
||||
; rdx - src
|
||||
; r8 - source region
|
||||
;
|
||||
LEAF_ENTRY JIT_WriteBarrierHelper_SATORI, _TEXT
|
||||
; save rcx and rdx and have enough stack for the callee
|
||||
push rcx
|
||||
push rdx
|
||||
sub rsp, 20h
|
||||
|
||||
; void SatoriRegion::EscapeFn(SatoriObject** dst, SatoriObject* src, SatoriRegion* region)
|
||||
call qword ptr [r8 + 8]
|
||||
|
||||
add rsp, 20h
|
||||
pop rdx
|
||||
pop rcx
|
||||
|
||||
; the actual assignment. (AV here will be attributed to the caller, unwinder knows this method)
|
||||
mov [rcx], rdx
|
||||
ret
|
||||
LEAF_END_MARKED JIT_WriteBarrierHelper_SATORI, _TEXT
|
||||
|
||||
; JIT_ByRefWriteBarrier has weird symantics, see usage in StubLinkerX86.cpp
|
||||
;
|
||||
; Entry:
|
||||
|
|
|
@ -6601,9 +6601,6 @@ EXTERN_C void JIT_WriteBarrier_End();
|
|||
EXTERN_C void JIT_CheckedWriteBarrier_End();
|
||||
EXTERN_C void JIT_ByRefWriteBarrier_End();
|
||||
|
||||
EXTERN_C void JIT_WriteBarrierHelper_SATORI(Object** dst, Object* ref, void* region);
|
||||
EXTERN_C void JIT_WriteBarrierHelper_SATORI_End();
|
||||
|
||||
#endif // TARGET_X86
|
||||
|
||||
#if defined(TARGET_AMD64) && defined(_DEBUG)
|
||||
|
@ -6656,7 +6653,6 @@ bool IsIPInMarkedJitHelper(UINT_PTR uControlPc)
|
|||
CHECK_RANGE(JIT_MemCpy)
|
||||
|
||||
CHECK_RANGE(JIT_WriteBarrier)
|
||||
CHECK_RANGE(JIT_WriteBarrierHelper_SATORI)
|
||||
CHECK_RANGE(JIT_CheckedWriteBarrier)
|
||||
CHECK_RANGE(JIT_ByRefWriteBarrier)
|
||||
#if !defined(TARGET_ARM64)
|
||||
|
|
|
@ -1508,12 +1508,6 @@ SetCardsAfterBulkCopy(Object** dst, Object **src, size_t len)
|
|||
if (len >= sizeof(uintptr_t))
|
||||
{
|
||||
#if FEATURE_SATORI_GC
|
||||
if ((((size_t)dst ^ (size_t)src) >> 21) == 0)
|
||||
{
|
||||
// same region
|
||||
return;
|
||||
}
|
||||
|
||||
SatoriPage* page = PageForAddressCheckedSatori(dst);
|
||||
if (!page)
|
||||
{
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue