1
0
Fork 0
mirror of https://github.com/VSadov/Satori.git synced 2025-06-08 03:27:04 +09:00

Reduce pacing frequency. (#57)

* DOTNET_gcPace knob

* less frequent alloc pacing
This commit is contained in:
Vladimir Sadov 2025-06-06 20:50:59 -07:00
parent a6a9a025e5
commit fea89fba5f
4 changed files with 54 additions and 31 deletions

View file

@ -149,6 +149,7 @@ public:
BOOL_CONFIG (Gen1GC, "gcGen1", NULL, true, "Specifies whether Gen1 GC can be performed") \
BOOL_CONFIG (UseTHP, "gcTHP", NULL, true, "Specifies whether Transparent Huge Pages can be used. (Linux only)") \
BOOL_CONFIG (TrimmigGC, "gcTrim", NULL, true, "Specifies whether background trimming is enabled") \
BOOL_CONFIG (PacingGC, "gcPace", NULL, true, "Specifies whether allocation pacing is enabled") \
INT_CONFIG (GCRate, "gcRate", NULL, -1, "Specifies soft min limit for time between GCs in milliseconds. -1 - default") \
INT_CONFIG (GCSpin, "gcSpin", NULL, -1, "Spin") \
INT_CONFIG (Gen2Target, "gcGen2Target", NULL, -1, "Specifies target for Gen2 GC (in terms of % of the last known size)") \

View file

@ -321,12 +321,23 @@ const size_t minSharedAllocDelay = 128;
SatoriObject* SatoriAllocator::AllocRegular(SatoriAllocationContext* context, size_t size, uint32_t flags)
{
// tryAgain:
if (!context->RegularRegion())
// when allocations cross certain thresholds, check if GC should start or help is needed.
size_t curAlloc = context->alloc_bytes + context->alloc_bytes_uoh;
size_t expectedAlloc = max(size, SatoriUtil::MinZeroInitSize());
size_t change = (curAlloc ^ (curAlloc + expectedAlloc));
if (curAlloc == 0 || change >= Satori::REGION_SIZE_GRANULARITY)
{
m_heap->Recycler()->MaybeTriggerGC(gc_reason::reason_alloc_soh);
}
else if (change >= Satori::PACE_BUDGET)
{
m_heap->Recycler()->HelpOnce();
}
// tryAgain:
if (!context->RegularRegion())
{
SatoriObject* freeObj = context->alloc_ptr != 0 ? context->FinishAllocFromShared() : nullptr;
size_t usecNow = m_heap->Recycler()->GetNowUsecs();
@ -348,18 +359,6 @@ SatoriObject* SatoriAllocator::AllocRegular(SatoriAllocationContext* context, si
}
}
}
else
{
size_t expectedAlloc = max(size, SatoriUtil::MinZeroInitSize());
if ((context->alloc_bytes ^ (context->alloc_bytes + expectedAlloc)) >= Satori::REGION_SIZE_GRANULARITY)
{
m_heap->Recycler()->MaybeTriggerGC(gc_reason::reason_alloc_soh);
}
else
{
m_heap->Recycler()->HelpOnce();
}
}
SatoriRegion* region = context->RegularRegion();
_ASSERTE(region == nullptr || region->IsAttachedToAllocatingOwner());
@ -678,29 +677,31 @@ SatoriObject* SatoriAllocator::AllocLarge(SatoriAllocationContext* context, size
return AllocHuge(context, size, flags);
}
// when allocations cross certain thresholds, check if GC should start or help is needed.
// when allocations cross certain thresholds, check if GC should start or help is needed.
size_t curAlloc = context->alloc_bytes + context->alloc_bytes_uoh;
size_t expectedAlloc = size;
size_t change = (curAlloc ^ (curAlloc + expectedAlloc));
if (curAlloc == 0 || change >= Satori::REGION_SIZE_GRANULARITY)
{
m_heap->Recycler()->MaybeTriggerGC(gc_reason::reason_alloc_soh);
}
else if (change >= Satori::PACE_BUDGET)
{
m_heap->Recycler()->HelpOnce();
}
tryAgain:
if (!context->LargeRegion() &&
size < Satori::REGION_SIZE_GRANULARITY / 2)
{
m_heap->Recycler()->MaybeTriggerGC(gc_reason::reason_alloc_loh);
//m_largeAllocLock.Enter();
if (m_largeAllocLock.TryEnter())
{
return AllocLargeShared(context, size, flags);
}
}
else
{
if ((context->alloc_bytes_uoh ^ (context->alloc_bytes_uoh + size)) >= Satori::REGION_SIZE_GRANULARITY)
{
m_heap->Recycler()->MaybeTriggerGC(gc_reason::reason_alloc_soh);
}
else
{
m_heap->Recycler()->HelpOnce();
}
}
SatoriRegion* region = context->LargeRegion();
while (true)
@ -941,14 +942,25 @@ SatoriObject* SatoriAllocator::AllocPinned(SatoriAllocationContext* context, siz
return AllocHuge(context, size, flags);
}
m_heap->Recycler()->MaybeTriggerGC(gc_reason::reason_alloc_soh);
// if can't get a lock, let AllocLarge handle this.
if (!m_pinnedAllocLock.TryEnter())
{
return AllocLarge(context, size, flags);
}
// when allocations cross certain thresholds, check if GC should start or help is needed.
size_t curAlloc = context->alloc_bytes + context->alloc_bytes_uoh;
size_t expectedAlloc = size;
size_t change = (curAlloc ^ (curAlloc + expectedAlloc));
if (curAlloc == 0 || change >= Satori::REGION_SIZE_GRANULARITY)
{
m_heap->Recycler()->MaybeTriggerGC(gc_reason::reason_alloc_soh);
}
else if (change >= Satori::PACE_BUDGET)
{
m_heap->Recycler()->HelpOnce();
}
SatoriRegion* region = m_pinnedRegion;
while (true)
{

View file

@ -708,7 +708,8 @@ void SatoriRecycler::HelpOnce()
if (m_gcState != GC_STATE_NONE)
{
if (m_gcState == GC_STATE_CONCURRENT)
if (m_gcState == GC_STATE_CONCURRENT &&
(SatoriUtil::IsPacingEnabled() || m_activeWorkers == 0))
{
if (m_condemnedGeneration == 0)
{

View file

@ -79,6 +79,9 @@ namespace Satori
// if we have more than this much and work list is empty we can share half
const static int SHARE_WORK_THRESHOLD = 8;
// if we allocated this much, check if help is needed
const static int PACE_BUDGET = REGION_SIZE_GRANULARITY / 16;
// address bits set to track finalizable that needs to be scheduled to F-queue
const static size_t FINALIZATION_PENDING = 1;
@ -234,6 +237,12 @@ public:
return (GCConfig::GetTrimmigGC());
}
// DOTNET_gcPace
static bool IsPacingEnabled()
{
return (GCConfig::GetPacingGC());
}
// DOTNET_GCLatencyMode
static bool IsLowLatencyMode()
{