mirror of
https://github.com/VSadov/Satori.git
synced 2025-06-08 03:27:04 +09:00
more fixups and cleanups
This commit is contained in:
parent
e8cb5b9b1c
commit
7a16f8c156
10 changed files with 32 additions and 77 deletions
|
@ -787,6 +787,10 @@ void BlockResetAgeMapForBlocksWorker(uint32_t *pdwGen, uint32_t dwClumpMask, Sca
|
||||||
STATIC_CONTRACT_GC_NOTRIGGER;
|
STATIC_CONTRACT_GC_NOTRIGGER;
|
||||||
STATIC_CONTRACT_MODE_COOPERATIVE;
|
STATIC_CONTRACT_MODE_COOPERATIVE;
|
||||||
|
|
||||||
|
#if FEATURE_SATORI_GC
|
||||||
|
__UNREACHABLE();
|
||||||
|
#endif
|
||||||
|
|
||||||
// fetch the table segment we are working in
|
// fetch the table segment we are working in
|
||||||
TableSegment *pSegment = pInfo->pCurrentSegment;
|
TableSegment *pSegment = pInfo->pCurrentSegment;
|
||||||
|
|
||||||
|
|
|
@ -245,7 +245,6 @@ The .NET Foundation licenses this file to you under the MIT license.
|
||||||
<LinkerArg Include="-L/usr/local/lib -linotify" Condition="'$(_targetOS)' == 'freebsd'" />
|
<LinkerArg Include="-L/usr/local/lib -linotify" Condition="'$(_targetOS)' == 'freebsd'" />
|
||||||
<LinkerArg Include="@(ExtraLinkerArg->'-Wl,%(Identity)')" />
|
<LinkerArg Include="@(ExtraLinkerArg->'-Wl,%(Identity)')" />
|
||||||
<LinkerArg Include="@(NativeFramework->'-framework %(Identity)')" Condition="'$(_IsApplePlatform)' == 'true'" />
|
<LinkerArg Include="@(NativeFramework->'-framework %(Identity)')" Condition="'$(_IsApplePlatform)' == 'true'" />
|
||||||
<LinkerArg Include="-ld_classic" Condition="'$(_IsApplePlatform)' == 'true'" />
|
|
||||||
<LinkerArg Include="-Wl,--eh-frame-hdr" Condition="'$(_IsApplePlatform)' != 'true'" />
|
<LinkerArg Include="-Wl,--eh-frame-hdr" Condition="'$(_IsApplePlatform)' != 'true'" />
|
||||||
|
|
||||||
<!-- Google requires all the native libraries to be aligned to 16 bytes (for 16k memory page size)
|
<!-- Google requires all the native libraries to be aligned to 16 bytes (for 16k memory page size)
|
||||||
|
|
|
@ -32,6 +32,8 @@ namespace Internal.Runtime
|
||||||
|
|
||||||
private object? TryAllocateObject(MethodTable* type, nuint objectSize)
|
private object? TryAllocateObject(MethodTable* type, nuint objectSize)
|
||||||
{
|
{
|
||||||
|
// TODO: VS Satori can allocate immortal objects naturally. Use that instead. (see: AllocateImmortalObject)
|
||||||
|
|
||||||
HalfBakedObject* obj = null;
|
HalfBakedObject* obj = null;
|
||||||
|
|
||||||
using (m_Crst.EnterScope())
|
using (m_Crst.EnterScope())
|
||||||
|
|
|
@ -38,6 +38,7 @@ Object* FrozenObjectHeapManager::TryAllocateObject(PTR_MethodTable type, size_t
|
||||||
Object* obj = nullptr;
|
Object* obj = nullptr;
|
||||||
|
|
||||||
#if FEATURE_SATORI_GC
|
#if FEATURE_SATORI_GC
|
||||||
|
// TODO: VS Satori does not have any size limitations here.
|
||||||
if (objectSize > FOH_COMMIT_SIZE)
|
if (objectSize > FOH_COMMIT_SIZE)
|
||||||
{
|
{
|
||||||
// The current design doesn't allow objects larger than FOH_COMMIT_SIZE and
|
// The current design doesn't allow objects larger than FOH_COMMIT_SIZE and
|
||||||
|
@ -144,35 +145,6 @@ Object* FrozenObjectHeapManager::TryAllocateObject(PTR_MethodTable type, size_t
|
||||||
#endif // !FEATURE_BASICFREEZE
|
#endif // !FEATURE_BASICFREEZE
|
||||||
}
|
}
|
||||||
|
|
||||||
static void* ReserveMemory(size_t size)
|
|
||||||
{
|
|
||||||
#if defined(TARGET_X86) || defined(TARGET_AMD64)
|
|
||||||
// We have plenty of space in-range on X86/AMD64 so we can afford keeping
|
|
||||||
// FOH segments there so e.g. JIT can use relocs for frozen objects.
|
|
||||||
return ExecutableAllocator::Instance()->Reserve(size);
|
|
||||||
#else
|
|
||||||
return ClrVirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_READWRITE);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static void* CommitMemory(void* ptr, size_t size)
|
|
||||||
{
|
|
||||||
#if defined(TARGET_X86) || defined(TARGET_AMD64)
|
|
||||||
return ExecutableAllocator::Instance()->Commit(ptr, size, /*isExecutable*/ false);
|
|
||||||
#else
|
|
||||||
return ClrVirtualAlloc(ptr, size, MEM_COMMIT, PAGE_READWRITE);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ReleaseMemory(void* ptr)
|
|
||||||
{
|
|
||||||
#if defined(TARGET_X86) || defined(TARGET_AMD64)
|
|
||||||
ExecutableAllocator::Instance()->Release(ptr);
|
|
||||||
#else
|
|
||||||
ClrVirtualFree(ptr, 0, MEM_RELEASE);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reserve sizeHint bytes of memory for the given frozen segment.
|
// Reserve sizeHint bytes of memory for the given frozen segment.
|
||||||
// The requested size can be be ignored in case of memory pressure and FOH_SEGMENT_DEFAULT_SIZE is used instead.
|
// The requested size can be be ignored in case of memory pressure and FOH_SEGMENT_DEFAULT_SIZE is used instead.
|
||||||
FrozenObjectSegment::FrozenObjectSegment(size_t sizeHint) :
|
FrozenObjectSegment::FrozenObjectSegment(size_t sizeHint) :
|
||||||
|
@ -186,7 +158,7 @@ FrozenObjectSegment::FrozenObjectSegment(size_t sizeHint) :
|
||||||
_ASSERT(m_Size > FOH_COMMIT_SIZE);
|
_ASSERT(m_Size > FOH_COMMIT_SIZE);
|
||||||
_ASSERT(m_Size % FOH_COMMIT_SIZE == 0);
|
_ASSERT(m_Size % FOH_COMMIT_SIZE == 0);
|
||||||
|
|
||||||
void* alloc = ReserveMemory(m_Size);
|
void* alloc = ClrVirtualAlloc(nullptr, m_Size, MEM_RESERVE, PAGE_READWRITE);
|
||||||
if (alloc == nullptr)
|
if (alloc == nullptr)
|
||||||
{
|
{
|
||||||
// Try again with the default FOH size
|
// Try again with the default FOH size
|
||||||
|
@ -195,7 +167,7 @@ FrozenObjectSegment::FrozenObjectSegment(size_t sizeHint) :
|
||||||
m_Size = FOH_SEGMENT_DEFAULT_SIZE;
|
m_Size = FOH_SEGMENT_DEFAULT_SIZE;
|
||||||
_ASSERT(m_Size > FOH_COMMIT_SIZE);
|
_ASSERT(m_Size > FOH_COMMIT_SIZE);
|
||||||
_ASSERT(m_Size % FOH_COMMIT_SIZE == 0);
|
_ASSERT(m_Size % FOH_COMMIT_SIZE == 0);
|
||||||
alloc = ReserveMemory(m_Size);
|
alloc = ClrVirtualAlloc(nullptr, m_Size, MEM_RESERVE, PAGE_READWRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (alloc == nullptr)
|
if (alloc == nullptr)
|
||||||
|
@ -205,10 +177,10 @@ FrozenObjectSegment::FrozenObjectSegment(size_t sizeHint) :
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit a chunk in advance
|
// Commit a chunk in advance
|
||||||
void* committedAlloc = CommitMemory(alloc, FOH_COMMIT_SIZE);
|
void* committedAlloc = ClrVirtualAlloc(alloc, FOH_COMMIT_SIZE, MEM_COMMIT, PAGE_READWRITE);
|
||||||
if (committedAlloc == nullptr)
|
if (committedAlloc == nullptr)
|
||||||
{
|
{
|
||||||
ReleaseMemory(alloc);
|
ClrVirtualFree(alloc, 0, MEM_RELEASE);
|
||||||
ThrowOutOfMemory();
|
ThrowOutOfMemory();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -290,7 +262,7 @@ Object* FrozenObjectSegment::TryAllocateObject(PTR_MethodTable type, size_t obje
|
||||||
// Make sure we don't go out of bounds during this commit
|
// Make sure we don't go out of bounds during this commit
|
||||||
_ASSERT(m_SizeCommitted + FOH_COMMIT_SIZE <= m_Size);
|
_ASSERT(m_SizeCommitted + FOH_COMMIT_SIZE <= m_Size);
|
||||||
|
|
||||||
if (CommitMemory(m_pStart + m_SizeCommitted, FOH_COMMIT_SIZE) == nullptr)
|
if (ClrVirtualAlloc(m_pStart + m_SizeCommitted, FOH_COMMIT_SIZE, MEM_COMMIT, PAGE_READWRITE) == nullptr)
|
||||||
{
|
{
|
||||||
ThrowOutOfMemory();
|
ThrowOutOfMemory();
|
||||||
}
|
}
|
||||||
|
|
|
@ -305,6 +305,7 @@ void GCToEEInterface::GcScanCurrentStackRoots(promote_func* fn, ScanContext* sc)
|
||||||
#ifdef FEATURE_EVENT_TRACE
|
#ifdef FEATURE_EVENT_TRACE
|
||||||
sc->dwEtwRootKind = kEtwGCRootKindOther;
|
sc->dwEtwRootKind = kEtwGCRootKindOther;
|
||||||
#endif // FEATURE_EVENT_TRACE
|
#endif // FEATURE_EVENT_TRACE
|
||||||
|
|
||||||
STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId());
|
STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -127,7 +127,11 @@ public:
|
||||||
|
|
||||||
static bool UseThreadAllocationContexts()
|
static bool UseThreadAllocationContexts()
|
||||||
{
|
{
|
||||||
|
#ifdef FEATURE_SATORI_GC
|
||||||
|
return true;
|
||||||
|
#else
|
||||||
return s_useThreadAllocationContexts;
|
return s_useThreadAllocationContexts;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||||
|
|
|
@ -1536,14 +1536,13 @@ void ErectWriteBarrierForMT(MethodTable **dst, MethodTable *ref)
|
||||||
STATIC_CONTRACT_NOTHROW;
|
STATIC_CONTRACT_NOTHROW;
|
||||||
STATIC_CONTRACT_GC_NOTRIGGER;
|
STATIC_CONTRACT_GC_NOTRIGGER;
|
||||||
|
|
||||||
|
#if FEATURE_SATORI_GC
|
||||||
|
// this whole thing is unnecessary in Satori
|
||||||
|
__UNREACHABLE();
|
||||||
|
#else
|
||||||
|
|
||||||
*dst = ref;
|
*dst = ref;
|
||||||
|
|
||||||
#if FEATURE_SATORI_GC
|
|
||||||
|
|
||||||
// Satori large objects are allocated in either gen1 or gen2.
|
|
||||||
// PublishObject will sort this out and mark cards as needed.
|
|
||||||
|
|
||||||
#else
|
|
||||||
#ifdef WRITE_BARRIER_CHECK
|
#ifdef WRITE_BARRIER_CHECK
|
||||||
updateGCShadow((Object **)dst, (Object *)ref); // support debugging write barrier, updateGCShadow only cares that these are pointers
|
updateGCShadow((Object **)dst, (Object *)ref); // support debugging write barrier, updateGCShadow only cares that these are pointers
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1384,9 +1384,12 @@ void* __cdecl GCSafeMemCpy(void * dest, const void * src, size_t len)
|
||||||
STATIC_CONTRACT_GC_NOTRIGGER;
|
STATIC_CONTRACT_GC_NOTRIGGER;
|
||||||
STATIC_CONTRACT_FORBID_FAULT;
|
STATIC_CONTRACT_FORBID_FAULT;
|
||||||
|
|
||||||
|
#ifdef FEATURE_SATORI_GC
|
||||||
if (IsInHeapSatori((Object**)dest))
|
if (IsInHeapSatori((Object**)dest))
|
||||||
//if (!(((*(BYTE**)&dest) < g_lowest_address ) ||
|
#else
|
||||||
// ((*(BYTE**)&dest) >= g_highest_address)))
|
if (!(((*(BYTE**)&dest) < g_lowest_address ) ||
|
||||||
|
((*(BYTE**)&dest) >= g_highest_address)))
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
Thread* pThread = GetThreadNULLOk();
|
Thread* pThread = GetThreadNULLOk();
|
||||||
|
|
||||||
|
|
|
@ -157,8 +157,13 @@ class Object
|
||||||
VOID SetMethodTableForUOHObject(MethodTable *pMT)
|
VOID SetMethodTableForUOHObject(MethodTable *pMT)
|
||||||
{
|
{
|
||||||
WRAPPER_NO_CONTRACT;
|
WRAPPER_NO_CONTRACT;
|
||||||
|
#if FEATURE_SATORI_GC
|
||||||
|
// nothing extra needs to happen in Satori.
|
||||||
|
m_pMethTab = pMT;
|
||||||
|
#else
|
||||||
// This function must be used if the allocation occurs on a UOH heap, and the method table might be a collectible type
|
// This function must be used if the allocation occurs on a UOH heap, and the method table might be a collectible type
|
||||||
ErectWriteBarrierForMT(&m_pMethTab, pMT);
|
ErectWriteBarrierForMT(&m_pMethTab, pMT);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
#endif //!DACCESS_COMPILE
|
#endif //!DACCESS_COMPILE
|
||||||
|
|
||||||
|
|
|
@ -3252,40 +3252,6 @@ COR_PRF_SUSPEND_REASON GCSuspendReasonToProfSuspendReason(ThreadSuspend::SUSPEND
|
||||||
}
|
}
|
||||||
#endif // PROFILING_SUPPORTED
|
#endif // PROFILING_SUPPORTED
|
||||||
|
|
||||||
// exponential spinwait with an approximate time limit for waiting in microsecond range.
|
|
||||||
void SpinWait(int usecLimit)
|
|
||||||
{
|
|
||||||
LARGE_INTEGER li;
|
|
||||||
QueryPerformanceCounter(&li);
|
|
||||||
int64_t startTicks = li.QuadPart;
|
|
||||||
|
|
||||||
QueryPerformanceFrequency(&li);
|
|
||||||
int64_t ticksPerSecond = li.QuadPart;
|
|
||||||
int64_t endTicks = startTicks + (usecLimit * ticksPerSecond) / 1000000;
|
|
||||||
|
|
||||||
#ifdef TARGET_UNIX
|
|
||||||
if (usecLimit > 10)
|
|
||||||
{
|
|
||||||
PAL_nanosleep(usecLimit * 1000);
|
|
||||||
}
|
|
||||||
#endif // TARGET_UNIX
|
|
||||||
|
|
||||||
for (int i = 0; i < 30; i++)
|
|
||||||
{
|
|
||||||
QueryPerformanceCounter(&li);
|
|
||||||
int64_t currentTicks = li.QuadPart;
|
|
||||||
if (currentTicks > endTicks)
|
|
||||||
{
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int j = 0; j < (1 << i); j++)
|
|
||||||
{
|
|
||||||
System_YieldProcessor();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//************************************************************************************
|
//************************************************************************************
|
||||||
//
|
//
|
||||||
// SuspendRuntime is responsible for ensuring that all managed threads reach a
|
// SuspendRuntime is responsible for ensuring that all managed threads reach a
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue