mirror of
https://github.com/VSadov/Satori.git
synced 2025-06-08 03:27:04 +09:00
Fix scenarios around committing the page map (#49)
* Make page map reservation and commit more precise. * fix for arm64 * better comment * one more comment
This commit is contained in:
parent
79ae1ddf76
commit
cb005b7ced
6 changed files with 49 additions and 21 deletions
|
@ -76,22 +76,28 @@ static void UpdateWriteBarrier(void* pageMap, void* pageByteMap, size_t highest_
|
||||||
SatoriHeap* SatoriHeap::Create()
|
SatoriHeap* SatoriHeap::Create()
|
||||||
{
|
{
|
||||||
const int mapSize = (1 << pageCountBits) * sizeof(SatoriPage*);
|
const int mapSize = (1 << pageCountBits) * sizeof(SatoriPage*);
|
||||||
size_t rezerveSize = mapSize + sizeof(SatoriHeap);
|
size_t reserveSize = mapSize + offsetof(SatoriHeap, m_pageMap);
|
||||||
|
reserveSize = ALIGN_UP(reserveSize, SatoriUtil::CommitGranularity());
|
||||||
|
void* reserved = GCToOSInterface::VirtualReserve(nullptr, reserveSize, SatoriUtil::UseTHP());
|
||||||
|
if (reserved == nullptr)
|
||||||
|
{
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
void* reserved = GCToOSInterface::VirtualReserve(rezerveSize, 0, VirtualReserveFlags::None);
|
size_t commitSize = offsetof(SatoriHeap, m_pageMap) + sizeof(SatoriPage*);
|
||||||
size_t commitSize = min(sizeof(SatoriHeap), rezerveSize);
|
|
||||||
commitSize = ALIGN_UP(commitSize, SatoriUtil::CommitGranularity());
|
commitSize = ALIGN_UP(commitSize, SatoriUtil::CommitGranularity());
|
||||||
|
_ASSERTE(commitSize <= reserveSize);
|
||||||
if (!GCToOSInterface::VirtualCommit(reserved, commitSize))
|
if (!GCToOSInterface::VirtualCommit(reserved, commitSize))
|
||||||
{
|
{
|
||||||
// failure
|
// failure
|
||||||
GCToOSInterface::VirtualRelease(reserved, rezerveSize);
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
SatoriHeap* heap = (SatoriHeap*)reserved;
|
SatoriHeap* heap = (SatoriHeap*)reserved;
|
||||||
SatoriHeap::s_pageByteMap = heap->m_pageByteMap;
|
SatoriHeap::s_pageByteMap = heap->m_pageByteMap;
|
||||||
heap->m_reservedMapSize = mapSize;
|
heap->m_reservedMapSize = mapSize;
|
||||||
heap->m_committedMapSize = commitSize - sizeof(SatoriHeap) + sizeof(SatoriPage*);
|
heap->m_committedMapSize = commitSize - offsetof(SatoriHeap, m_pageMap);
|
||||||
|
heap->m_committedBytes = commitSize;
|
||||||
InitWriteBarrier(heap->m_pageMap, s_pageByteMap, heap->CommittedMapLength() * Satori::PAGE_SIZE_GRANULARITY - 1);
|
InitWriteBarrier(heap->m_pageMap, s_pageByteMap, heap->CommittedMapLength() * Satori::PAGE_SIZE_GRANULARITY - 1);
|
||||||
heap->m_mapLock.Initialize();
|
heap->m_mapLock.Initialize();
|
||||||
heap->m_nextPageIndex = 1;
|
heap->m_nextPageIndex = 1;
|
||||||
|
@ -100,27 +106,29 @@ SatoriHeap* SatoriHeap::Create()
|
||||||
heap->m_allocator.Initialize(heap);
|
heap->m_allocator.Initialize(heap);
|
||||||
heap->m_recycler.Initialize(heap);
|
heap->m_recycler.Initialize(heap);
|
||||||
heap->m_finalizationQueue.Initialize(heap);
|
heap->m_finalizationQueue.Initialize(heap);
|
||||||
|
|
||||||
heap->m_committedBytes = commitSize;
|
|
||||||
return heap;
|
return heap;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SatoriHeap::CommitMoreMap(size_t currentCommittedMapSize)
|
bool SatoriHeap::CommitMoreMap(size_t currentCommittedMapSize)
|
||||||
{
|
{
|
||||||
void* commitFrom = (void*)((size_t)&m_pageMap + currentCommittedMapSize);
|
|
||||||
size_t commitSize = SatoriUtil::CommitGranularity();
|
|
||||||
|
|
||||||
SatoriLockHolder holder(&m_mapLock);
|
SatoriLockHolder holder(&m_mapLock);
|
||||||
if (currentCommittedMapSize <= m_committedMapSize)
|
if (currentCommittedMapSize == m_committedMapSize)
|
||||||
{
|
{
|
||||||
|
void* commitFrom = (void*)((size_t)&m_pageMap + currentCommittedMapSize);
|
||||||
|
size_t commitSize = SatoriUtil::CommitGranularity();
|
||||||
|
_ASSERTE(m_committedMapSize + commitSize <= m_reservedMapSize);
|
||||||
if (GCToOSInterface::VirtualCommit(commitFrom, commitSize))
|
if (GCToOSInterface::VirtualCommit(commitFrom, commitSize))
|
||||||
{
|
{
|
||||||
// we did the commit
|
// we did the commit
|
||||||
m_committedMapSize = min(currentCommittedMapSize + commitSize, m_reservedMapSize);
|
m_committedMapSize += commitSize;
|
||||||
IncBytesCommitted(commitSize);
|
IncBytesCommitted(commitSize);
|
||||||
UpdateWriteBarrier(m_pageMap, s_pageByteMap, CommittedMapLength() * Satori::PAGE_SIZE_GRANULARITY - 1);
|
UpdateWriteBarrier(m_pageMap, s_pageByteMap, CommittedMapLength() * Satori::PAGE_SIZE_GRANULARITY - 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
_ASSERTE(m_committedMapSize > currentCommittedMapSize);
|
||||||
|
}
|
||||||
|
|
||||||
// either we did commit or someone else did, otherwise this is a failure.
|
// either we did commit or someone else did, otherwise this is a failure.
|
||||||
return m_committedMapSize > currentCommittedMapSize;
|
return m_committedMapSize > currentCommittedMapSize;
|
||||||
|
@ -133,7 +141,8 @@ bool SatoriHeap::TryAddRegularPage(SatoriPage*& newPage)
|
||||||
for (size_t i = nextPageIndex; i < maxIndex; i++)
|
for (size_t i = nextPageIndex; i < maxIndex; i++)
|
||||||
{
|
{
|
||||||
size_t currentCommittedMapSize = m_committedMapSize;
|
size_t currentCommittedMapSize = m_committedMapSize;
|
||||||
if (i >= currentCommittedMapSize && !CommitMoreMap(currentCommittedMapSize))
|
size_t requiredMapSize = (i + 1) * sizeof(SatoriPage*);
|
||||||
|
if (requiredMapSize > currentCommittedMapSize && !CommitMoreMap(currentCommittedMapSize))
|
||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -194,7 +203,7 @@ SatoriPage* SatoriHeap::AddLargePage(size_t minSize)
|
||||||
for (size_t i = m_nextPageIndex; i < maxIndex; i++)
|
for (size_t i = m_nextPageIndex; i < maxIndex; i++)
|
||||||
{
|
{
|
||||||
size_t currentCommittedMapSize;
|
size_t currentCommittedMapSize;
|
||||||
size_t requiredMapSize = i + mapMarkCount * sizeof(SatoriPage*);
|
size_t requiredMapSize = (i + mapMarkCount) * sizeof(SatoriPage*);
|
||||||
while (requiredMapSize > ((currentCommittedMapSize = m_committedMapSize)))
|
while (requiredMapSize > ((currentCommittedMapSize = m_committedMapSize)))
|
||||||
{
|
{
|
||||||
if (!CommitMoreMap(currentCommittedMapSize))
|
if (!CommitMoreMap(currentCommittedMapSize))
|
||||||
|
|
|
@ -136,8 +136,14 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// we need to cover the whole possible address space (48bit, 52 may be supported as needed).
|
// We need to cover the whole addressable space, but no need to handle inaccessible VA.
|
||||||
static const int availableAddressSpaceBits = 47;
|
// Accessing that should not happen for any valid reason, and will fail anyway.
|
||||||
|
// - the canonical user VA on x64 uses lower 47 bits (128 TB)
|
||||||
|
// - arm64 allows 48 lower bits (256 TB), linux uses that, but other OS use the same range as on x64.
|
||||||
|
// - we can eventually support 53 bit (4 PB) and 57 bit (??) extensions, it is too early to worry about that.
|
||||||
|
//
|
||||||
|
// For consistency and uniform testing, we will default to 48 bit VA.
|
||||||
|
static const int availableAddressSpaceBits = 48;
|
||||||
static const int pageCountBits = availableAddressSpaceBits - Satori::PAGE_BITS;
|
static const int pageCountBits = availableAddressSpaceBits - Satori::PAGE_BITS;
|
||||||
|
|
||||||
int8_t m_pageByteMap[1 << pageCountBits]{};
|
int8_t m_pageByteMap[1 << pageCountBits]{};
|
||||||
|
@ -154,6 +160,12 @@ private:
|
||||||
size_t m_usedMapLength;
|
size_t m_usedMapLength;
|
||||||
size_t m_nextPageIndex;
|
size_t m_nextPageIndex;
|
||||||
SatoriLock m_mapLock;
|
SatoriLock m_mapLock;
|
||||||
|
|
||||||
|
#if _DEBUG
|
||||||
|
// make the Heap obj a bit larger to force some page map commits earlier.
|
||||||
|
int8_t dummy[0x7A70]{};
|
||||||
|
#endif
|
||||||
|
|
||||||
SatoriPage* m_pageMap[1];
|
SatoriPage* m_pageMap[1];
|
||||||
|
|
||||||
bool CommitMoreMap(size_t currentlyCommitted);
|
bool CommitMoreMap(size_t currentlyCommitted);
|
||||||
|
|
|
@ -58,7 +58,6 @@ SatoriPage* SatoriPage::InitializeAt(size_t address, size_t pageSize, SatoriHeap
|
||||||
|
|
||||||
if (!GCToOSInterface::VirtualCommit((void*)address, commitSize))
|
if (!GCToOSInterface::VirtualCommit((void*)address, commitSize))
|
||||||
{
|
{
|
||||||
GCToOSInterface::VirtualRelease((void*)address, pageSize);
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1239,11 +1239,14 @@ void SatoriRecycler::BlockingCollectImpl()
|
||||||
Relocate();
|
Relocate();
|
||||||
Update();
|
Update();
|
||||||
|
|
||||||
_ASSERTE(m_activeWorkers == 0);
|
|
||||||
|
|
||||||
// we are done using workers.
|
// we are done using workers.
|
||||||
// undo the adjustment if we had to do one
|
// undo the adjustment if we had to do one
|
||||||
if (IsWorkerThread()) m_activeWorkers++;
|
if (IsWorkerThread())
|
||||||
|
{
|
||||||
|
// interlocked because even though we saw no workers after m_activeWorkerFn was reset,
|
||||||
|
// some delayed workers may later still come and look for work.
|
||||||
|
Interlocked::Increment(&m_activeWorkers);
|
||||||
|
}
|
||||||
|
|
||||||
m_gcCount[0]++;
|
m_gcCount[0]++;
|
||||||
m_gcCount[1]++;
|
m_gcCount[1]++;
|
||||||
|
|
|
@ -637,7 +637,7 @@ void* GCToOSInterface::VirtualReserve(void* location, size_t size, bool useTHP)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pRetVal != location)
|
if (location != nullptr && pRetVal != location)
|
||||||
{
|
{
|
||||||
munmap(pRetVal, size);
|
munmap(pRetVal, size);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
@ -880,6 +880,11 @@ int WriteBarrierManager::UpdateWriteWatchAndCardTableLocations(bool isRuntimeSus
|
||||||
// If we are told that we require an upper bounds check (GC did some heap reshuffling),
|
// If we are told that we require an upper bounds check (GC did some heap reshuffling),
|
||||||
// we need to switch to the WriteBarrier_PostGrow function for good.
|
// we need to switch to the WriteBarrier_PostGrow function for good.
|
||||||
|
|
||||||
|
#ifdef FEATURE_SATORI_GC
|
||||||
|
// as of now satori does not patch barriers on x64, no need to go further.
|
||||||
|
return SWB_PASS;
|
||||||
|
#endif
|
||||||
|
|
||||||
WriteBarrierType newType;
|
WriteBarrierType newType;
|
||||||
if (NeedDifferentWriteBarrier(bReqUpperBoundsCheck, g_region_use_bitwise_write_barrier, &newType))
|
if (NeedDifferentWriteBarrier(bReqUpperBoundsCheck, g_region_use_bitwise_write_barrier, &newType))
|
||||||
{
|
{
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue