diff --git a/src/coreclr/gc/satori/SatoriHeap.cpp b/src/coreclr/gc/satori/SatoriHeap.cpp index 360c3515868..5545d8e91fd 100644 --- a/src/coreclr/gc/satori/SatoriHeap.cpp +++ b/src/coreclr/gc/satori/SatoriHeap.cpp @@ -76,22 +76,28 @@ static void UpdateWriteBarrier(void* pageMap, void* pageByteMap, size_t highest_ SatoriHeap* SatoriHeap::Create() { const int mapSize = (1 << pageCountBits) * sizeof(SatoriPage*); - size_t rezerveSize = mapSize + sizeof(SatoriHeap); + size_t reserveSize = mapSize + offsetof(SatoriHeap, m_pageMap); + reserveSize = ALIGN_UP(reserveSize, SatoriUtil::CommitGranularity()); + void* reserved = GCToOSInterface::VirtualReserve(nullptr, reserveSize, SatoriUtil::UseTHP()); + if (reserved == nullptr) + { + return nullptr; + } - void* reserved = GCToOSInterface::VirtualReserve(rezerveSize, 0, VirtualReserveFlags::None); - size_t commitSize = min(sizeof(SatoriHeap), rezerveSize); + size_t commitSize = offsetof(SatoriHeap, m_pageMap) + sizeof(SatoriPage*); commitSize = ALIGN_UP(commitSize, SatoriUtil::CommitGranularity()); + _ASSERTE(commitSize <= reserveSize); if (!GCToOSInterface::VirtualCommit(reserved, commitSize)) { // failure - GCToOSInterface::VirtualRelease(reserved, rezerveSize); return nullptr; } SatoriHeap* heap = (SatoriHeap*)reserved; SatoriHeap::s_pageByteMap = heap->m_pageByteMap; heap->m_reservedMapSize = mapSize; - heap->m_committedMapSize = commitSize - sizeof(SatoriHeap) + sizeof(SatoriPage*); + heap->m_committedMapSize = commitSize - offsetof(SatoriHeap, m_pageMap); + heap->m_committedBytes = commitSize; InitWriteBarrier(heap->m_pageMap, s_pageByteMap, heap->CommittedMapLength() * Satori::PAGE_SIZE_GRANULARITY - 1); heap->m_mapLock.Initialize(); heap->m_nextPageIndex = 1; @@ -100,27 +106,29 @@ SatoriHeap* SatoriHeap::Create() heap->m_allocator.Initialize(heap); heap->m_recycler.Initialize(heap); heap->m_finalizationQueue.Initialize(heap); - - heap->m_committedBytes = commitSize; return heap; } bool SatoriHeap::CommitMoreMap(size_t currentCommittedMapSize) { - void* commitFrom = (void*)((size_t)&m_pageMap + currentCommittedMapSize); - size_t commitSize = SatoriUtil::CommitGranularity(); - SatoriLockHolder holder(&m_mapLock); - if (currentCommittedMapSize <= m_committedMapSize) + if (currentCommittedMapSize == m_committedMapSize) { + void* commitFrom = (void*)((size_t)&m_pageMap + currentCommittedMapSize); + size_t commitSize = SatoriUtil::CommitGranularity(); + _ASSERTE(m_committedMapSize + commitSize <= m_reservedMapSize); if (GCToOSInterface::VirtualCommit(commitFrom, commitSize)) { // we did the commit - m_committedMapSize = min(currentCommittedMapSize + commitSize, m_reservedMapSize); + m_committedMapSize += commitSize; IncBytesCommitted(commitSize); UpdateWriteBarrier(m_pageMap, s_pageByteMap, CommittedMapLength() * Satori::PAGE_SIZE_GRANULARITY - 1); } } + else + { + _ASSERTE(m_committedMapSize > currentCommittedMapSize); + } // either we did commit or someone else did, otherwise this is a failure. return m_committedMapSize > currentCommittedMapSize; @@ -133,7 +141,8 @@ bool SatoriHeap::TryAddRegularPage(SatoriPage*& newPage) for (size_t i = nextPageIndex; i < maxIndex; i++) { size_t currentCommittedMapSize = m_committedMapSize; - if (i >= currentCommittedMapSize && !CommitMoreMap(currentCommittedMapSize)) + size_t requiredMapSize = (i + 1) * sizeof(SatoriPage*); + if (requiredMapSize > currentCommittedMapSize && !CommitMoreMap(currentCommittedMapSize)) { break; } @@ -194,7 +203,7 @@ SatoriPage* SatoriHeap::AddLargePage(size_t minSize) for (size_t i = m_nextPageIndex; i < maxIndex; i++) { size_t currentCommittedMapSize; - size_t requiredMapSize = i + mapMarkCount * sizeof(SatoriPage*); + size_t requiredMapSize = (i + mapMarkCount) * sizeof(SatoriPage*); while (requiredMapSize > ((currentCommittedMapSize = m_committedMapSize))) { if (!CommitMoreMap(currentCommittedMapSize)) diff --git a/src/coreclr/gc/satori/SatoriHeap.h b/src/coreclr/gc/satori/SatoriHeap.h index 5d78539a861..6669dd21f5b 100644 --- a/src/coreclr/gc/satori/SatoriHeap.h +++ b/src/coreclr/gc/satori/SatoriHeap.h @@ -136,8 +136,14 @@ public: } private: - // we need to cover the whole possible address space (48bit, 52 may be supported as needed). - static const int availableAddressSpaceBits = 47; + // We need to cover the whole addressable space, but no need to handle inaccessible VA. + // Accessing that should not happen for any valid reason, and will fail anyway. + // - the canonical user VA on x64 uses lower 47 bits (128 TB) + // - arm64 allows 48 lower bits (256 TB), linux uses that, but other OS use the same range as on x64. + // - we can eventually support 53 bit (4 PB) and 57 bit (??) extensions, it is too early to worry about that. + // + // For consistency and uniform testing, we will default to 48 bit VA. + static const int availableAddressSpaceBits = 48; static const int pageCountBits = availableAddressSpaceBits - Satori::PAGE_BITS; int8_t m_pageByteMap[1 << pageCountBits]{}; @@ -154,6 +160,12 @@ private: size_t m_usedMapLength; size_t m_nextPageIndex; SatoriLock m_mapLock; + +#if _DEBUG + // make the Heap obj a bit larger to force some page map commits earlier. + int8_t dummy[0x7A70]{}; +#endif + SatoriPage* m_pageMap[1]; bool CommitMoreMap(size_t currentlyCommitted); diff --git a/src/coreclr/gc/satori/SatoriPage.cpp b/src/coreclr/gc/satori/SatoriPage.cpp index f7b189fabc3..437059f7fda 100644 --- a/src/coreclr/gc/satori/SatoriPage.cpp +++ b/src/coreclr/gc/satori/SatoriPage.cpp @@ -58,7 +58,6 @@ SatoriPage* SatoriPage::InitializeAt(size_t address, size_t pageSize, SatoriHeap if (!GCToOSInterface::VirtualCommit((void*)address, commitSize)) { - GCToOSInterface::VirtualRelease((void*)address, pageSize); return nullptr; } diff --git a/src/coreclr/gc/satori/SatoriRecycler.cpp b/src/coreclr/gc/satori/SatoriRecycler.cpp index d082947554e..3654c453dff 100644 --- a/src/coreclr/gc/satori/SatoriRecycler.cpp +++ b/src/coreclr/gc/satori/SatoriRecycler.cpp @@ -1239,11 +1239,14 @@ void SatoriRecycler::BlockingCollectImpl() Relocate(); Update(); - _ASSERTE(m_activeWorkers == 0); - // we are done using workers. // undo the adjustment if we had to do one - if (IsWorkerThread()) m_activeWorkers++; + if (IsWorkerThread()) + { + // interlocked because even though we saw no workers after m_activeWorkerFn was reset, + // some delayed workers may later still come and look for work. + Interlocked::Increment(&m_activeWorkers); + } m_gcCount[0]++; m_gcCount[1]++; diff --git a/src/coreclr/gc/unix/gcenv.unix.cpp b/src/coreclr/gc/unix/gcenv.unix.cpp index ad83affd9b7..6cc355b6bf8 100644 --- a/src/coreclr/gc/unix/gcenv.unix.cpp +++ b/src/coreclr/gc/unix/gcenv.unix.cpp @@ -637,7 +637,7 @@ void* GCToOSInterface::VirtualReserve(void* location, size_t size, bool useTHP) return NULL; } - if (pRetVal != location) + if (location != nullptr && pRetVal != location) { munmap(pRetVal, size); return NULL; diff --git a/src/coreclr/vm/amd64/jitinterfaceamd64.cpp b/src/coreclr/vm/amd64/jitinterfaceamd64.cpp index 406e56f1401..1d7fe9a8381 100644 --- a/src/coreclr/vm/amd64/jitinterfaceamd64.cpp +++ b/src/coreclr/vm/amd64/jitinterfaceamd64.cpp @@ -880,6 +880,11 @@ int WriteBarrierManager::UpdateWriteWatchAndCardTableLocations(bool isRuntimeSus // If we are told that we require an upper bounds check (GC did some heap reshuffling), // we need to switch to the WriteBarrier_PostGrow function for good. +#ifdef FEATURE_SATORI_GC + // as of now satori does not patch barriers on x64, no need to go further. + return SWB_PASS; +#endif + WriteBarrierType newType; if (NeedDifferentWriteBarrier(bReqUpperBoundsCheck, g_region_use_bitwise_write_barrier, &newType)) {