1
0
Fork 0
mirror of https://github.com/VSadov/Satori.git synced 2025-06-08 03:27:04 +09:00
This commit is contained in:
vsadov 2025-03-05 16:34:30 -08:00
parent 69a29160f0
commit 671d080642
2 changed files with 39 additions and 20 deletions

View file

@ -544,6 +544,9 @@ bool SatoriRecycler::HelpOnceCore(bool minQuantum)
bool result;
int concurrentCleaningState;
// NB: m_ccHelpersNum is separate from m_activeWorkers
// because app threads may also be helping
Interlocked::Increment(&m_ccHelpersNum);
{
concurrentCleaningState = m_concurrentCleaningState;
@ -565,18 +568,7 @@ bool SatoriRecycler::HelpOnceCore(bool minQuantum)
{
if (Interlocked::CompareExchange(&m_gcState, GC_STATE_BLOCKING, GC_STATE_CONCURRENT) == GC_STATE_CONCURRENT)
{
m_activeWorkerFn = nullptr;
if (IsWorkerThread())
{
Interlocked::Decrement(&m_activeWorkers);
}
BlockingCollect();
if (IsWorkerThread())
{
Interlocked::Increment(&m_activeWorkers);
}
}
}
@ -901,7 +893,6 @@ treatAsNoWork:
// in such degenerate case we still may want to wrap it up and and block.
if (Interlocked::CompareExchange(&m_gcState, GC_STATE_BLOCKING, GC_STATE_CONCURRENT) == GC_STATE_CONCURRENT)
{
m_activeWorkerFn = nullptr;
BlockingCollect();
}
}
@ -1223,15 +1214,21 @@ void SatoriRecycler::BlockingCollectImpl()
size_t time = GCToOSInterface::QueryPerformanceCounter();
#endif
// we should not normally have active workers here.
// just in case we support forcing blocking stage for Collect or OOM situations
m_activeWorkerFn = nullptr;
if (IsWorkerThread())
{
// do not consider ourselves a worker to not wait forever when we need workers to leave.
Interlocked::Decrement(&m_activeWorkers);
}
else
{
// make sure everyone sees the new Fn before waiting for workers to drain.
MemoryBarrier();
}
while (m_activeWorkers > 0)
{
// since we are waiting for concurrent workers to stop, we could as well try helping
if (!HelpOnceCore(/*minQuantum*/ true))
{
YieldProcessor();
}
YieldProcessor();
}
m_gcState = GC_STATE_BLOCKED;
@ -1285,6 +1282,12 @@ void SatoriRecycler::BlockingCollectImpl()
Relocate();
Update();
_ASSERTE(m_activeWorkers == 0);
// we are done using workers.
// undo the adjustment if we had to do one
if (IsWorkerThread()) m_activeWorkers++;
m_gcCount[0]++;
m_gcCount[1]++;
if (m_condemnedGeneration == 2)
@ -4401,7 +4404,7 @@ void SatoriRecycler::DrainDeferredSweepQueueWorkerFn()
{
SweepAndReturnRegion(curRegion);
Interlocked::Decrement(&m_deferredSweepCount);
} while ((curRegion = m_deferredSweepRegions->TryPop()));
} while (m_activeWorkerFn && (curRegion = m_deferredSweepRegions->TryPop()));
}
}

View file

@ -271,6 +271,14 @@ void ThreadStore::SuspendAllThreads(bool waitForGCEvent)
// set the global trap for pinvoke leave and return
RhpTrapThreads |= (uint32_t)TrapThreadsFlags::TrapThreads;
// TODO: VS remove
//long orig = _InterlockedExchange((volatile long*)&RhpTrapThreads, (long)TrapThreadsFlags::TrapThreads);
//if (orig != 0)
//{
// printf("################################## was not NULL? \n");
// abort();
//}
// Our lock-free algorithm depends on flushing write buffers of all processors running RH code. The
// reason for this is that we essentially implement Dekker's algorithm, which requires write ordering.
PalFlushProcessWriteBuffers();
@ -358,6 +366,14 @@ void ThreadStore::ResumeAllThreads(bool waitForGCEvent)
RhpTrapThreads &= ~(uint32_t)TrapThreadsFlags::TrapThreads;
// TODO: VS remove
//long orig = _InterlockedExchange((volatile long*)&RhpTrapThreads, (long)0);
//if (orig == 0)
//{
// printf("################################## was already NULL? \n");
// abort();
//}
RhpSuspendingThread = NULL;
if (waitForGCEvent)
{