mirror of
https://github.com/VSadov/Satori.git
synced 2025-06-08 03:27:04 +09:00
a few fixes
This commit is contained in:
parent
4e162e3e9b
commit
aaec9ea378
6 changed files with 36 additions and 39 deletions
|
@ -1781,6 +1781,9 @@ void Ref_AgeHandles(uint32_t condemned, uint32_t maxgen, ScanContext* sc)
|
|||
#ifdef FEATURE_VARIABLE_HANDLES
|
||||
HNDTYPE_VARIABLE,
|
||||
#endif
|
||||
#if FEATURE_SATORI_GC
|
||||
HNDTYPE_DEPENDENT,
|
||||
#endif
|
||||
#ifdef FEATURE_REFCOUNTED_HANDLES
|
||||
HNDTYPE_REFCOUNTED,
|
||||
#endif
|
||||
|
@ -1821,6 +1824,10 @@ void Ref_RejuvenateHandles(uint32_t condemned, uint32_t maxgen, ScanContext* sc)
|
|||
{
|
||||
WRAPPER_NO_CONTRACT;
|
||||
|
||||
#if FEATURE_SATORI_GC
|
||||
__UNREACHABLE();
|
||||
#endif
|
||||
|
||||
LOG((LF_GC, LL_INFO10000, "Rejuvenating handles.\n"));
|
||||
|
||||
// these are the handle types that need their ages updated
|
||||
|
|
|
@ -485,15 +485,16 @@ SatoriObject* SatoriAllocator::AllocHuge(SatoriAllocationContext* context, size_
|
|||
|
||||
SatoriWorkChunk* SatoriAllocator::TryGetWorkChunk()
|
||||
{
|
||||
SatoriWorkChunk* chunk = m_WorkChunks->TryPop();
|
||||
|
||||
#if _DEBUG
|
||||
static int i = 0;
|
||||
// simulate low memory case once in a while
|
||||
if (!chunk && GCToOSInterface::GetCurrentProcessorNumber() == 2)
|
||||
// This is just to force more overflows. Otherwise they are very rare.
|
||||
if (i++ % 2 == 0)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
#endif
|
||||
SatoriWorkChunk* chunk = m_WorkChunks->TryPop();
|
||||
|
||||
while (!chunk && AddMoreWorkChunks())
|
||||
{
|
||||
|
|
|
@ -71,16 +71,10 @@ public:
|
|||
|
||||
void Enter()
|
||||
{
|
||||
int localBackoff = m_backoff;
|
||||
while (VolatileLoadWithoutBarrier(&m_backoff) ||
|
||||
!CompareExchangeNf(&m_backoff, localBackoff / 4 + 1, 0))
|
||||
if (!CompareExchangeAcq(&m_backoff, 1, 0))
|
||||
{
|
||||
localBackoff = Backoff(localBackoff);
|
||||
EnterSpin();
|
||||
}
|
||||
|
||||
#if !defined(TARGET_AMD64)
|
||||
VolatileLoadBarrier();
|
||||
#endif
|
||||
}
|
||||
|
||||
void Leave()
|
||||
|
@ -90,7 +84,18 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
|
||||
NOINLINE
|
||||
void EnterSpin()
|
||||
{
|
||||
int localBackoff = m_backoff;
|
||||
while (VolatileLoadWithoutBarrier(&m_backoff) ||
|
||||
!CompareExchangeAcq(&m_backoff, localBackoff / 4 + 1, 0))
|
||||
{
|
||||
localBackoff = Backoff(localBackoff);
|
||||
}
|
||||
}
|
||||
|
||||
int Backoff(int backoff)
|
||||
{
|
||||
// TUNING: do we care about 1-proc machines?
|
||||
|
@ -108,16 +113,16 @@ private:
|
|||
return (backoff * 2 + 1) & 0x3FFF;
|
||||
}
|
||||
|
||||
static bool CompareExchangeNf(int volatile* destination, int exchange, int comparand)
|
||||
static bool CompareExchangeAcq(int volatile* destination, int exchange, int comparand)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
#if defined(TARGET_AMD64)
|
||||
return _InterlockedCompareExchange((long*)destination, exchange, comparand) == comparand;
|
||||
#else
|
||||
return _InterlockedCompareExchange_nf((long*)destination, exchange, comparand) == comparand;
|
||||
return _InterlockedCompareExchange_acq((long*)destination, exchange, comparand) == comparand;
|
||||
#endif
|
||||
#else
|
||||
return __atomic_compare_exchange_n(destination, &comparand, exchange, true, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
|
||||
return __atomic_compare_exchange_n(destination, &comparand, exchange, true, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
|
|
@ -302,9 +302,7 @@ void SatoriPage::DirtyCardsForRange(size_t start, size_t end)
|
|||
this->m_cardGroups[i * 2] = Satori::CardState::DIRTY;
|
||||
}
|
||||
|
||||
VolatileStoreBarrier();
|
||||
|
||||
this->m_cardState = Satori::CardState::DIRTY;
|
||||
VolatileStore(&this->m_cardState, Satori::CardState::DIRTY);
|
||||
}
|
||||
|
||||
// dirtying in nonblocking phases could be unordered since we do not clean concurrently with mutator
|
||||
|
|
|
@ -1036,12 +1036,11 @@ void SatoriRecycler::BlockingMark()
|
|||
|
||||
void SatoriRecycler::DrainAndCleanWorker()
|
||||
{
|
||||
bool revisitCards;
|
||||
do
|
||||
{
|
||||
DrainMarkQueues();
|
||||
revisitCards = CleanCards();
|
||||
} while (!m_workList->IsEmpty() || revisitCards);
|
||||
CleanCards();
|
||||
} while (!m_workList->IsEmpty() || HasDirtyCards());
|
||||
}
|
||||
|
||||
void SatoriRecycler::MarkNewReachable()
|
||||
|
@ -1147,16 +1146,7 @@ void SatoriRecycler::PushToMarkQueuesSlow(SatoriWorkChunk*& currentWorkChunk, Sa
|
|||
MaybeAskForHelp();
|
||||
}
|
||||
|
||||
#ifdef _DEBUG
|
||||
// Limit work queue in debug/chk.
|
||||
// This is just to force more overflows. Otherwise they are very rare.
|
||||
currentWorkChunk = nullptr;
|
||||
if (m_workList->Count() < 10)
|
||||
#endif
|
||||
{
|
||||
currentWorkChunk = m_heap->Allocator()->TryGetWorkChunk();
|
||||
}
|
||||
|
||||
currentWorkChunk = m_heap->Allocator()->TryGetWorkChunk();
|
||||
if (currentWorkChunk)
|
||||
{
|
||||
currentWorkChunk->Push(o);
|
||||
|
@ -1686,7 +1676,8 @@ void SatoriRecycler::ScheduleMarkAsChildRanges(SatoriObject* o)
|
|||
SatoriWorkChunk* chunk = m_heap->Allocator()->TryGetWorkChunk();
|
||||
if (chunk == nullptr)
|
||||
{
|
||||
o->ContainingRegion()->ContainingPage()->DirtyCardsForRange(start, remains);
|
||||
o->ContainingRegion()->ContainingPage()->DirtyCardsForRange(start, start + remains);
|
||||
remains = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2261,10 +2252,9 @@ bool SatoriRecycler::HasDirtyCards()
|
|||
}
|
||||
|
||||
// cleaning is not concurrent, but could be parallel
|
||||
bool SatoriRecycler::CleanCards()
|
||||
void SatoriRecycler::CleanCards()
|
||||
{
|
||||
SatoriWorkChunk* dstChunk = nullptr;
|
||||
bool revisit = false;
|
||||
|
||||
m_heap->ForEachPage(
|
||||
[&](SatoriPage* page)
|
||||
|
@ -2366,11 +2356,9 @@ bool SatoriRecycler::CleanCards()
|
|||
}
|
||||
}
|
||||
|
||||
// we do not see more cleaning work so clean the page state, unless the page went dirty while we were working on it
|
||||
// in such case record a missed clean to revisit the whole deal.
|
||||
// we do not see more cleaning work so clean the page state, use interlocked in case the page went dirty while we were working on it
|
||||
int8_t origState = Interlocked::CompareExchange(&page->CardState(), Satori::CardState::REMEMBERED, Satori::CardState::PROCESSING);
|
||||
_ASSERTE(origState != Satori::CardState::BLANK);
|
||||
revisit |= origState == Satori::CardState::DIRTY;
|
||||
}
|
||||
}
|
||||
);
|
||||
|
@ -2379,8 +2367,6 @@ bool SatoriRecycler::CleanCards()
|
|||
{
|
||||
m_workList->Push(dstChunk);
|
||||
}
|
||||
|
||||
return revisit;
|
||||
}
|
||||
|
||||
void SatoriRecycler::UpdatePointersThroughCards()
|
||||
|
|
|
@ -228,7 +228,7 @@ private:
|
|||
|
||||
bool HasDirtyCards();
|
||||
bool ScanDirtyCardsConcurrent(int64_t deadline);
|
||||
bool CleanCards();
|
||||
void CleanCards();
|
||||
bool MarkHandles(int64_t deadline = 0);
|
||||
void ShortWeakPtrScan();
|
||||
void ShortWeakPtrScanWorker();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue