1
0
Fork 0
mirror of https://github.com/VSadov/Satori.git synced 2025-06-10 18:11:04 +09:00

Feature to be able to selectively deoptimize methods (#88797)

Add ICorDebugFunction5::DisableOpimizations with support for inliners in r2r and jit methods.

Co-authored-by: Mikelle <mirogers@microsoft.com>
Co-authored-by: Juan Sebastian Hoyos Ayala <juan.hoyos@microsoft.com>
This commit is contained in:
David Mason 2023-07-13 19:42:52 -07:00 committed by GitHub
parent 34d95e1929
commit b4118a6a81
Signed by: github
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 9305 additions and 7599 deletions

View file

@ -5225,7 +5225,7 @@ EnumMethodInstances::Next(ClrDataAccess* dac,
}
}
if (!m_methodIter.Current()->HasNativeCode())
if (!m_methodIter.Current()->HasNativeCodeReJITAware())
{
goto NextMethod;
}
@ -5243,7 +5243,7 @@ EnumMethodInstances::CdStart(MethodDesc* methodDesc,
CLRDATA_ENUM* handle)
{
if (!methodDesc->HasClassOrMethodInstantiation() &&
!methodDesc->HasNativeCode())
!methodDesc->HasNativeCodeReJITAware())
{
*handle = 0;
return S_FALSE;

View file

@ -136,6 +136,10 @@ HRESULT CordbFunction::QueryInterface(REFIID id, void **pInterface)
{
*pInterface = static_cast<ICorDebugFunction4*>(this);
}
else if (id == IID_ICorDebugFunction5)
{
*pInterface = static_cast<ICorDebugFunction5*>(this);
}
else if (id == IID_IUnknown)
{
*pInterface = static_cast<IUnknown*>(static_cast<ICorDebugFunction*>(this));
@ -606,6 +610,92 @@ HRESULT CordbFunction::CreateNativeBreakpoint(ICorDebugFunctionBreakpoint **ppBr
return hr;
}
//-----------------------------------------------------------------------------
// CordbFunction::DisableOptimizations
// Public method for ICorDebugFunction5::DisableOptimizations.
// Triggers a new JIT so the next time the function is called, it will be unoptimized.
//
// Parameters
//
//
// Returns:
// S_OK on success.
//-----------------------------------------------------------------------------
HRESULT CordbFunction::DisableOptimizations()
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
HRESULT hr = S_OK;
CordbProcess * pProcess = GetProcess();
RSLockHolder lockHolder(pProcess->GetProcessLock());
DebuggerIPCEvent event;
CordbAppDomain * pAppDomain = GetAppDomain();
_ASSERTE (pAppDomain != NULL);
pProcess->InitIPCEvent(&event, DB_IPCE_DISABLE_OPTS, true, pAppDomain->GetADToken());
event.DisableOptData.funcMetadataToken = m_MDToken;
event.DisableOptData.pModule = m_pModule->GetRuntimeModule();
lockHolder.Release();
hr = pProcess->m_cordb->SendIPCEvent(pProcess, &event, sizeof(DebuggerIPCEvent));
lockHolder.Acquire();
_ASSERTE(event.type == DB_IPCE_DISABLE_OPTS_RESULT);
return event.hr;
}
//-----------------------------------------------------------------------------
// CordbFunction::AreOptimizationsDisabled
// Public method for ICorDebugFunction5::AreOptimizationsDisabled.
// Indicates whether this method had optimizations disabled already.
//
// Parameters:
// BOOL *pOptimizationsDisabled
//
//
// Returns:
// S_OK on success.
//-----------------------------------------------------------------------------
HRESULT CordbFunction::AreOptimizationsDisabled(BOOL *pOptimizationsDisabled)
{
PUBLIC_API_ENTRY(this);
FAIL_IF_NEUTERED(this);
ATT_REQUIRE_STOPPED_MAY_FAIL(GetProcess());
HRESULT hr = S_OK;
if (pOptimizationsDisabled == NULL)
{
return E_INVALIDARG;
}
CordbProcess * pProcess = GetProcess();
RSLockHolder lockHolder(pProcess->GetProcessLock());
DebuggerIPCEvent event;
CordbAppDomain * pAppDomain = GetAppDomain();
_ASSERTE (pAppDomain != NULL);
pProcess->InitIPCEvent(&event, DB_IPCE_IS_OPTS_DISABLED, true, pAppDomain->GetADToken());
event.DisableOptData.funcMetadataToken = m_MDToken;
event.DisableOptData.pModule = m_pModule->GetRuntimeModule();
lockHolder.Release();
hr = pProcess->m_cordb->SendIPCEvent(pProcess, &event, sizeof(DebuggerIPCEvent));
lockHolder.Acquire();
_ASSERTE(event.type == DB_IPCE_IS_OPTS_DISABLED_RESULT);
*pOptimizationsDisabled = event.IsOptsDisabledData.value;
return event.hr;;
}
// determine whether we have a native-only implementation
// Arguments:
// Input: none (we use information in various data members of this instance of CordbFunction: m_isNativeImpl,

View file

@ -5349,7 +5349,8 @@ class CordbFunction : public CordbBase,
public ICorDebugFunction,
public ICorDebugFunction2,
public ICorDebugFunction3,
public ICorDebugFunction4
public ICorDebugFunction4,
public ICorDebugFunction5
{
public:
//-----------------------------------------------------------
@ -5412,6 +5413,12 @@ public:
//-----------------------------------------------------------
COM_METHOD CreateNativeBreakpoint(ICorDebugFunctionBreakpoint **ppBreakpoint);
//-----------------------------------------------------------
// ICorDebugFunction5
//-----------------------------------------------------------
COM_METHOD AreOptimizationsDisabled(BOOL *pOptimizationsDisabled);
COM_METHOD DisableOptimizations();
//-----------------------------------------------------------
// Internal members
//-----------------------------------------------------------

View file

@ -10442,6 +10442,60 @@ bool Debugger::HandleIPCEvent(DebuggerIPCEvent * pEvent)
break;
}
case DB_IPCE_DISABLE_OPTS:
{
Module *pModule = pEvent->DisableOptData.pModule.GetRawPtr();
mdToken methodDef = pEvent->DisableOptData.funcMetadataToken;
_ASSERTE(TypeFromToken(methodDef) == mdtMethodDef);
HRESULT hr = E_INVALIDARG;
EX_TRY
{
hr = DeoptimizeMethod(pModule, methodDef);
}
EX_CATCH_HRESULT(hr);
DebuggerIPCEvent * pIPCResult = m_pRCThread->GetIPCEventReceiveBuffer();
InitIPCEvent(pIPCResult,
DB_IPCE_DISABLE_OPTS_RESULT,
g_pEEInterface->GetThread(),
pEvent->vmAppDomain);
pIPCResult->hr = hr;
m_pRCThread->SendIPCReply();
}
break;
case DB_IPCE_IS_OPTS_DISABLED:
{
Module *pModule = pEvent->DisableOptData.pModule.GetRawPtr();
mdToken methodDef = pEvent->DisableOptData.funcMetadataToken;
_ASSERTE(TypeFromToken(methodDef) == mdtMethodDef);
HRESULT hr = E_INVALIDARG;
BOOL deoptimized = FALSE;
EX_TRY
{
hr = IsMethodDeoptimized(pModule, methodDef, &deoptimized);
}
EX_CATCH_HRESULT(hr);
DebuggerIPCEvent * pIPCResult = m_pRCThread->GetIPCEventReceiveBuffer();
InitIPCEvent(pIPCResult,
DB_IPCE_IS_OPTS_DISABLED_RESULT,
g_pEEInterface->GetThread(),
pEvent->vmAppDomain);
pIPCResult->IsOptsDisabledData.value = deoptimized;
pIPCResult->hr = hr;
m_pRCThread->SendIPCReply();
}
break;
case DB_IPCE_BREAKPOINT_ADD:
{
@ -12211,6 +12265,151 @@ HRESULT Debugger::ReleaseRemoteBuffer(void *pBuffer, bool removeFromBlobList)
return S_OK;
}
#ifndef DACCESS_COMPILE
HRESULT Debugger::DeoptimizeMethodHelper(Module* pModule, mdMethodDef methodDef)
{
CONTRACTL
{
THROWS;
CAN_TAKE_LOCK;
GC_NOTRIGGER;
}
CONTRACTL_END;
_ASSERTE(!CodeVersionManager::IsLockOwnedByCurrentThread());
HRESULT hr = S_OK;
ILCodeVersion ilCodeVersion;
CodeVersionManager *pCodeVersionManager = pModule->GetCodeVersionManager();
{
CodeVersionManager::LockHolder codeVersioningLockHolder;
if (FAILED(hr = pCodeVersionManager->AddILCodeVersion(pModule, methodDef, &ilCodeVersion, TRUE)))
{
LOG((LF_TIEREDCOMPILATION, LL_INFO100, "TieredCompilationManager::DeOptimizeMethodHelper Module=0x%x Method=0x%x, AddILCodeVersion returned hr 0x%x\n",
pModule, methodDef,
hr));
return hr;
}
// We are using the profiler ReJIT infrastructure to trigger a new jit. We don't want to modify the IL or
// call back in to anything so set it all here to match the original IL and debug codegen flags
ilCodeVersion.SetIL(ILCodeVersion(pModule, methodDef).GetIL());
ilCodeVersion.SetJitFlags(COR_PRF_CODEGEN_DISABLE_ALL_OPTIMIZATIONS | COR_PRF_CODEGEN_DEBUG_INFO);
ilCodeVersion.SetRejitState(ILCodeVersion::kStateActive);
ilCodeVersion.SetEnableReJITCallback(false);
}
_ASSERTE(!ilCodeVersion.IsNull());
{
if (FAILED(hr = pCodeVersionManager->SetActiveILCodeVersions(&ilCodeVersion, 1, NULL)))
{
LOG((LF_TIEREDCOMPILATION, LL_INFO100, "TieredCompilationManager::DeOptimizeMethodHelper Module=0x%x Method=0x%x, SetActiveILCodeVersions returned hr 0x%x\n",
pModule, methodDef,
hr));
return hr;
}
}
return hr;
}
HRESULT Debugger::DeoptimizeMethod(Module* pModule, mdMethodDef methodDef)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
}
CONTRACTL_END;
// First deoptimize the method itself
HRESULT hr = DeoptimizeMethodHelper(pModule, methodDef);
if (FAILED(hr))
{
LOG((LF_TIEREDCOMPILATION, LL_INFO100, "TieredCompilationManager::DeOptimizeMethod Module=0x%x Method=0x%x,, initial ReJIT returned hr 0x%x, aborting\n",
pModule, methodDef, hr));
return hr;
}
// Now deoptimize anything that has inlined it in a R2R method
AppDomain::AssemblyIterator domainAssemblyIterator = SystemDomain::System()->DefaultDomain()->IterateAssembliesEx((AssemblyIterationFlags) (kIncludeLoaded | kIncludeExecution));
CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
NativeImageInliningIterator inlinerIter;
while (domainAssemblyIterator.Next(pDomainAssembly.This()))
{
Module *pCandidateModule = pDomainAssembly->GetModule();
if (pCandidateModule->HasReadyToRunInlineTrackingMap())
{
inlinerIter.Reset(pCandidateModule, MethodInModule(pModule, methodDef));
while (inlinerIter.Next())
{
MethodInModule inliner = inlinerIter.GetMethod();
_ASSERTE(TypeFromToken(inliner.m_methodDef) == mdtMethodDef);
DeoptimizeMethodHelper(inliner.m_module, inliner.m_methodDef);
}
}
}
// Next any JIT methods
MethodDesc *pMethodDesc = pModule->LookupMethodDef(methodDef);
if (pMethodDesc != NULL && pModule->HasJitInlineTrackingMap())
{
InlineSArray<MethodDesc *, 10> inliners;
auto lambda = [&inliners](MethodDesc *inliner, MethodDesc *inlinee)
{
_ASSERTE(!inliner->IsNoMetadata());
if (inliner->IsIL())
{
inliners.Append(inliner);
}
// Keep going
return true;
};
JITInlineTrackingMap *pMap = pModule->GetJitInlineTrackingMap();
pMap->VisitInliners(pMethodDesc, lambda);
for (auto it = inliners.Begin(); it != inliners.End(); ++it)
{
Module *inlinerModule = (*it)->GetModule();
mdMethodDef inlinerMethodDef = (*it)->GetMemberDef();
_ASSERTE(TypeFromToken(inlinerMethodDef) == mdtMethodDef);
DeoptimizeMethodHelper(inlinerModule, inlinerMethodDef);
}
}
return hr;
}
#endif //DACCESS_COMPILE
HRESULT Debugger::IsMethodDeoptimized(Module *pModule, mdMethodDef methodDef, BOOL *pResult)
{
CONTRACTL
{
NOTHROW;
CAN_TAKE_LOCK;
GC_NOTRIGGER;
}
CONTRACTL_END;
if (pModule == NULL || pResult == NULL || TypeFromToken(methodDef) != mdtMethodDef)
{
return E_INVALIDARG;
}
{
CodeVersionManager::LockHolder codeVersioningLockHolder;
CodeVersionManager *pCodeVersionManager = pModule->GetCodeVersionManager();
ILCodeVersion activeILVersion = pCodeVersionManager->GetActiveILCodeVersion(pModule, methodDef);
*pResult = activeILVersion.IsDeoptimized();
}
return S_OK;
}
//
// UnrecoverableError causes the Left Side to enter a state where no more
// debugging can occur and we leave around enough information for the

View file

@ -2212,6 +2212,15 @@ public:
return m_trappingRuntimeThreads;
}
#ifndef DACCESS_COMPILE
private:
HRESULT DeoptimizeMethodHelper(Module* pModule, mdMethodDef methodDef);
public:
HRESULT DeoptimizeMethod(Module* pModule, mdMethodDef methodDef);
#endif //DACCESS_COMPILE
HRESULT IsMethodDeoptimized(Module *pModule, mdMethodDef methodDef, BOOL *pResult);
//
// The debugger mutex is used to protect any "global" Left Side
// data structures. The RCThread takes it when handling a Right

View file

@ -1580,7 +1580,11 @@ DebuggerJitInfo *DebuggerMethodInfo::FindOrCreateInitAndAddJitInfo(MethodDesc* f
startAddr = g_pEEInterface->GetFunctionAddress(fd);
if (startAddr == NULL)
{
return NULL;
startAddr = fd->GetNativeCodeReJITAware();
if (startAddr == NULL)
{
return NULL;
}
}
}
else

View file

@ -2013,6 +2013,17 @@ struct MSLAYOUT DebuggerIPCEvent
LSPTR_METHODDESC nativeCodeMethodDescToken; // points to the MethodDesc if !isIL
} BreakpointData;
struct MSLAYOUT
{
mdMethodDef funcMetadataToken;
VMPTR_Module pModule;
} DisableOptData;
struct MSLAYOUT
{
BOOL value;
} IsOptsDisabledData;
struct MSLAYOUT
{
LSPTR_BREAKPOINT breakpointToken;

View file

@ -92,7 +92,9 @@ IPC_EVENT_TYPE1(DB_IPCE_RESOLVE_UPDATE_METADATA_2_RESULT,0x015F)
IPC_EVENT_TYPE1(DB_IPCE_DATA_BREAKPOINT ,0x0160)
IPC_EVENT_TYPE1(DB_IPCE_BEFORE_GARBAGE_COLLECTION ,0x0161)
IPC_EVENT_TYPE1(DB_IPCE_AFTER_GARBAGE_COLLECTION ,0x0162)
IPC_EVENT_TYPE0(DB_IPCE_RUNTIME_LAST ,0x0163) // The last event from runtime
IPC_EVENT_TYPE1(DB_IPCE_DISABLE_OPTS_RESULT ,0x0163)
IPC_EVENT_TYPE1(DB_IPCE_IS_OPTS_DISABLED_RESULT ,0x0164)
IPC_EVENT_TYPE0(DB_IPCE_RUNTIME_LAST ,0x0165) // The last event from runtime
@ -141,5 +143,7 @@ IPC_EVENT_TYPE2(DB_IPCE_DEBUGGER_INVALID ,0x0249) // An invalid ev
IPC_EVENT_TYPE2(DB_IPCE_GET_GCHANDLE_INFO ,0x0251)
IPC_EVENT_TYPE2(DB_IPCE_RESOLVE_UPDATE_METADATA_1 ,0x0256)
IPC_EVENT_TYPE2(DB_IPCE_RESOLVE_UPDATE_METADATA_2 ,0x0257)
IPC_EVENT_TYPE0(DB_IPCE_DEBUGGER_LAST ,0x0258) // The last event from the debugger
IPC_EVENT_TYPE2(DB_IPCE_DISABLE_OPTS ,0x0258)
IPC_EVENT_TYPE2(DB_IPCE_IS_OPTS_DISABLED ,0x0259)
IPC_EVENT_TYPE0(DB_IPCE_DEBUGGER_LAST ,0x025A) // The last event from the debugger

View file

@ -2202,8 +2202,10 @@ DWORD DbgTransportSession::GetEventSize(DebuggerIPCEvent *pEvent)
case DB_IPCE_CONTROL_C_EVENT_RESULT:
case DB_IPCE_BEFORE_GARBAGE_COLLECTION:
case DB_IPCE_AFTER_GARBAGE_COLLECTION:
case DB_IPCE_DISABLE_OPTS_RESULT:
cbAdditionalSize = 0;
break;
case DB_IPCE_DATA_BREAKPOINT:
cbAdditionalSize = sizeof(pEvent->DataBreakpointData);
break;
@ -2496,6 +2498,15 @@ DWORD DbgTransportSession::GetEventSize(DebuggerIPCEvent *pEvent)
cbAdditionalSize = sizeof(pEvent->CustomNotification);
break;
case DB_IPCE_DISABLE_OPTS:
case DB_IPCE_IS_OPTS_DISABLED:
cbAdditionalSize = sizeof(pEvent->DisableOptData);
break;
case DB_IPCE_IS_OPTS_DISABLED_RESULT:
cbAdditionalSize = sizeof(pEvent->IsOptsDisabledData);
break;
default:
printf("Unknown debugger event type: 0x%x\n", (pEvent->type & DB_IPCE_TYPE_MASK));
_ASSERTE(!"Unknown debugger event type");

View file

@ -159,6 +159,7 @@ End
Crst DebuggerMutex
AcquiredBefore AvailableParamTypes
DynamicIL LoaderHeap ModuleLookupTable
MethodDescBackpatchInfoTracker JitInlineTrackingMap CodeVersioning
// Disabled per bug 581892
// AcquiredBefore DebuggerController
@ -489,6 +490,7 @@ Crst ThreadStore
DebuggerHeapLock DebuggerJitInfo DynamicIL ExecuteManRangeLock HandleTable IbcProfile
JitGenericHandleCache JumpStubCache LoaderHeap ModuleLookupTable ProfilerGCRefDataFreeList
SingleUseLock SyncBlockCache SystemDomainDelayedUnloadList ThreadIdDispenser DebuggerMutex
JitInlineTrackingMap
End
Crst TypeIDMap
@ -540,7 +542,7 @@ Crst InlineTrackingMap
End
Crst JitInlineTrackingMap
AcquiredBefore CodeVersioning ThreadStore MethodDescBackpatchInfoTracker
AcquiredBefore CodeVersioning MethodDescBackpatchInfoTracker
End
Crst EventPipe

View file

@ -286,7 +286,8 @@ interface ICorDebugDataTarget : IUnknown
CORDB_PLATFORM_POSIX_X86, // Posix supporting OS on Intel x86
CORDB_PLATFORM_POSIX_ARM, // Posix supporting OS on ARM32
CORDB_PLATFORM_POSIX_ARM64, // Posix supporting OS on ARM64
CORDB_PLATFORM_POSIX_LOONGARCH64 // Posix supporting OS on LoongArch64
CORDB_PLATFORM_POSIX_LOONGARCH64, // Posix supporting OS on LoongArch64
CORDB_PLATFORM_POSIX_RISCV64 // Posix supporting OS on RISC64
} CorDebugPlatform;
HRESULT GetPlatform([out] CorDebugPlatform * pTargetPlatform);
@ -3985,6 +3986,72 @@ interface ICorDebugRegisterSet : IUnknown
REGISTER_LOONGARCH64_F30,
REGISTER_LOONGARCH64_F31,
REGISTER_RISCV64_PC = 0,
REGISTER_RISCV64_RA,
REGISTER_RISCV64_SP,
REGISTER_RISCV64_GP,
REGISTER_RISCV64_TP,
REGISTER_RISCV64_T0,
REGISTER_RISCV64_T1,
REGISTER_RISCV64_T2,
REGISTER_RISCV64_FP,
REGISTER_RISCV64_S1,
REGISTER_RISCV64_A0,
REGISTER_RISCV64_A1,
REGISTER_RISCV64_A2,
REGISTER_RISCV64_A3,
REGISTER_RISCV64_A4,
REGISTER_RISCV64_A5,
REGISTER_RISCV64_A6,
REGISTER_RISCV64_A7,
REGISTER_RISCV64_S2,
REGISTER_RISCV64_S3,
REGISTER_RISCV64_S4,
REGISTER_RISCV64_S5,
REGISTER_RISCV64_S6,
REGISTER_RISCV64_S7,
REGISTER_RISCV64_S8,
REGISTER_RISCV64_S9,
REGISTER_RISCV64_S10,
REGISTER_RISCV64_S11,
REGISTER_RISCV64_T3,
REGISTER_RISCV64_T4,
REGISTER_RISCV64_T5,
REGISTER_RISCV64_T6,
REGISTER_RISCV64_F0,
REGISTER_RISCV64_F1,
REGISTER_RISCV64_F2,
REGISTER_RISCV64_F3,
REGISTER_RISCV64_F4,
REGISTER_RISCV64_F5,
REGISTER_RISCV64_F6,
REGISTER_RISCV64_F7,
REGISTER_RISCV64_F8,
REGISTER_RISCV64_F9,
REGISTER_RISCV64_F10,
REGISTER_RISCV64_F11,
REGISTER_RISCV64_F12,
REGISTER_RISCV64_F13,
REGISTER_RISCV64_F14,
REGISTER_RISCV64_F15,
REGISTER_RISCV64_F16,
REGISTER_RISCV64_F17,
REGISTER_RISCV64_F18,
REGISTER_RISCV64_F19,
REGISTER_RISCV64_F20,
REGISTER_RISCV64_F21,
REGISTER_RISCV64_F22,
REGISTER_RISCV64_F23,
REGISTER_RISCV64_F24,
REGISTER_RISCV64_F25,
REGISTER_RISCV64_F26,
REGISTER_RISCV64_F27,
REGISTER_RISCV64_F28,
REGISTER_RISCV64_F29,
REGISTER_RISCV64_F30,
REGISTER_RISCV64_F31,
REGISTER_RISCV64_X0, // TODO-RISCV64-CQ: Add X0 for an use in debug. Need to check.
// other architectures here
} CorDebugRegister;
@ -5706,6 +5773,29 @@ interface ICorDebugFunction4 : IUnknown
HRESULT CreateNativeBreakpoint(ICorDebugFunctionBreakpoint **ppBreakpoint);
};
/*
ICorDebugFunction5 is a logical extension to ICorDebugFunction.
*/
[
object,
local,
uuid(9D4DAB7B-3401-4F37-BD08-CA09F3FDF10F),
pointer_default(unique)
]
interface ICorDebugFunction5 : IUnknown
{
/*
* Triggers a new JIT so the next time the function is called, it will be unoptimized. Will
* trigger a JIT even for R2R code.
*/
HRESULT DisableOptimizations();
/*
* Indicates whether this method had optimizations disabled already.
*/
HRESULT AreOptimizationsDisabled(BOOL *pOptimizationsDisabled);
};
/*
ICorDebugCode represents an IL or native code blob.

View file

@ -2648,6 +2648,7 @@ typedef enum
{
COR_PRF_CODEGEN_DISABLE_INLINING = 0x0001,
COR_PRF_CODEGEN_DISABLE_ALL_OPTIMIZATIONS = 0x0002,
COR_PRF_CODEGEN_DEBUG_INFO = 0x0003,
} COR_PRF_CODEGEN_FLAGS;

View file

@ -149,12 +149,12 @@ int g_rgCrstLevelMap[] =
10, // CrstAppDomainCache
3, // CrstArgBasedStubCache
3, // CrstAssemblyList
12, // CrstAssemblyLoader
14, // CrstAssemblyLoader
4, // CrstAvailableClass
5, // CrstAvailableParamTypes
7, // CrstBaseDomain
-1, // CrstCCompRC
13, // CrstClassFactInfoHash
15, // CrstClassFactInfoHash
11, // CrstClassInit
-1, // CrstClrNotification
6, // CrstCodeFragmentHeap
@ -170,13 +170,13 @@ int g_rgCrstLevelMap[] =
0, // CrstDebuggerHeapExecMemLock
0, // CrstDebuggerHeapLock
4, // CrstDebuggerJitInfo
10, // CrstDebuggerMutex
13, // CrstDebuggerMutex
0, // CrstDelegateToFPtrHash
16, // CrstDomainLocalBlock
18, // CrstDomainLocalBlock
0, // CrstDynamicIL
3, // CrstDynamicMT
0, // CrstEtwTypeLogHash
18, // CrstEventPipe
20, // CrstEventPipe
0, // CrstEventStore
0, // CrstException
0, // CrstExecutableAllocatorLock
@ -187,55 +187,55 @@ int g_rgCrstLevelMap[] =
7, // CrstFuncPtrStubs
10, // CrstFusionAppCtx
10, // CrstGCCover
15, // CrstGlobalStrLiteralMap
17, // CrstGlobalStrLiteralMap
1, // CrstHandleTable
0, // CrstIbcProfile
8, // CrstIJWFixupData
0, // CrstIJWHash
7, // CrstILStubGen
3, // CrstInlineTrackingMap
17, // CrstInstMethodHashTable
20, // CrstInterop
19, // CrstInstMethodHashTable
22, // CrstInterop
10, // CrstInteropData
0, // CrstIsJMCMethod
7, // CrstISymUnmanagedReader
11, // CrstJit
0, // CrstJitGenericHandleCache
13, // CrstJitInlineTrackingMap
12, // CrstJitInlineTrackingMap
4, // CrstJitPatchpoint
-1, // CrstJitPerf
6, // CrstJumpStubCache
0, // CrstLeafLock
-1, // CrstListLock
15, // CrstLoaderAllocator
16, // CrstLoaderAllocatorReferences
17, // CrstLoaderAllocator
18, // CrstLoaderAllocatorReferences
3, // CrstLoaderHeap
3, // CrstManagedObjectWrapperMap
10, // CrstMethodDescBackpatchInfoTracker
-1, // CrstMethodTableExposedObject
5, // CrstModule
16, // CrstModuleFixup
18, // CrstModuleFixup
4, // CrstModuleLookupTable
0, // CrstMulticoreJitHash
13, // CrstMulticoreJitManager
15, // CrstMulticoreJitManager
3, // CrstNativeImageEagerFixups
0, // CrstNativeImageLoad
0, // CrstNls
0, // CrstNotifyGdb
2, // CrstObjectList
5, // CrstPEImage
19, // CrstPendingTypeLoadEntry
21, // CrstPendingTypeLoadEntry
0, // CrstPerfMap
4, // CrstPgoData
0, // CrstPinnedByrefValidation
14, // CrstPinnedHeapHandleTable
16, // CrstPinnedHeapHandleTable
0, // CrstProfilerGCRefDataFreeList
13, // CrstProfilingAPIStatus
15, // CrstProfilingAPIStatus
4, // CrstRCWCache
0, // CrstRCWCleanupList
10, // CrstReadyToRunEntryPointToMethodDescMap
8, // CrstReflection
14, // CrstReJITGlobalRequest
16, // CrstReJITGlobalRequest
4, // CrstRetThunkCache
3, // CrstSavedExceptionInfo
0, // CrstSaveModuleProfileData
@ -244,7 +244,7 @@ int g_rgCrstLevelMap[] =
5, // CrstSingleUseLock
0, // CrstSpecialStatics
0, // CrstStackSampler
13, // CrstStaticBoxInit
15, // CrstStaticBoxInit
-1, // CrstStressLog
5, // CrstStubCache
0, // CrstStubDispatchCache
@ -252,10 +252,10 @@ int g_rgCrstLevelMap[] =
3, // CrstSyncBlockCache
0, // CrstSyncHashLock
5, // CrstSystemBaseDomain
13, // CrstSystemDomain
15, // CrstSystemDomain
0, // CrstSystemDomainDelayedUnloadList
0, // CrstThreadIdDispenser
12, // CrstThreadStore
14, // CrstThreadStore
8, // CrstTieredCompilation
4, // CrstTypeEquivalenceMap
10, // CrstTypeIDMap

View file

@ -315,6 +315,8 @@ MIDL_DEFINE_GUID(IID, IID_ICorDebugFunction3,0x09B70F28,0xE465,0x482D,0x99,0xE0,
MIDL_DEFINE_GUID(IID, IID_ICorDebugFunction4,0x72965963,0x34fd,0x46e9,0x94,0x34,0xb8,0x17,0xfe,0x6e,0x7f,0x43);
MIDL_DEFINE_GUID(IID, IID_ICorDebugFunction5,0x9D4DAB7B,0x3401,0x4F37,0xBD,0x08,0xCA,0x09,0xF3,0xFD,0xF1,0x0F);
MIDL_DEFINE_GUID(IID, IID_ICorDebugCode,0xCC7BCAF4,0x8A68,0x11d2,0x98,0x3C,0x00,0x00,0xF8,0x08,0x34,0x2D);

File diff suppressed because it is too large Load diff

View file

@ -10218,7 +10218,8 @@ typedef /* [public] */
enum __MIDL___MIDL_itf_corprof_0000_0011_0001
{
COR_PRF_CODEGEN_DISABLE_INLINING = 0x1,
COR_PRF_CODEGEN_DISABLE_ALL_OPTIMIZATIONS = 0x2
COR_PRF_CODEGEN_DISABLE_ALL_OPTIMIZATIONS = 0x2,
COR_PRF_CODEGEN_DEBUG_INFO = 0x3
} COR_PRF_CODEGEN_FLAGS;

View file

@ -46,6 +46,10 @@ void NativeCodeVersion::SetGCCoverageInfo(PTR_GCCoverageInfo gcCover)
#else // FEATURE_CODE_VERSIONING
// This is just used as a unique id. Overflow is OK. If we happen to have more than 4+Billion rejits
// and somehow manage to not run out of memory, we'll just have to redefine ReJITID as size_t.
/* static */
static ReJITID s_GlobalReJitId = 1;
#ifndef DACCESS_COMPILE
NativeCodeVersionNode::NativeCodeVersionNode(
@ -553,20 +557,22 @@ ILCodeVersionNode::ILCodeVersionNode() :
m_pNextILVersionNode(dac_cast<PTR_ILCodeVersionNode>(nullptr)),
m_rejitState(ILCodeVersion::kStateRequested),
m_pIL(),
m_jitFlags(0)
m_jitFlags(0),
m_deoptimized(FALSE)
{
m_pIL.Store(dac_cast<PTR_COR_ILMETHOD>(nullptr));
}
#ifndef DACCESS_COMPILE
ILCodeVersionNode::ILCodeVersionNode(Module* pModule, mdMethodDef methodDef, ReJITID id) :
ILCodeVersionNode::ILCodeVersionNode(Module* pModule, mdMethodDef methodDef, ReJITID id, BOOL isDeoptimized) :
m_pModule(pModule),
m_methodDef(methodDef),
m_rejitId(id),
m_pNextILVersionNode(dac_cast<PTR_ILCodeVersionNode>(nullptr)),
m_rejitState(ILCodeVersion::kStateRequested),
m_pIL(nullptr),
m_jitFlags(0)
m_jitFlags(0),
m_deoptimized(isDeoptimized)
{}
#endif
@ -627,6 +633,12 @@ PTR_ILCodeVersionNode ILCodeVersionNode::GetNextILVersionNode() const
return m_pNextILVersionNode;
}
BOOL ILCodeVersionNode::IsDeoptimized() const
{
LIMITED_METHOD_DAC_CONTRACT;
return m_deoptimized;
}
#ifndef DACCESS_COMPILE
void ILCodeVersionNode::SetRejitState(ILCodeVersion::RejitFlags newState)
{
@ -941,6 +953,19 @@ const InstrumentedILOffsetMapping* ILCodeVersion::GetInstrumentedILMap() const
}
}
BOOL ILCodeVersion::IsDeoptimized() const
{
LIMITED_METHOD_DAC_CONTRACT;
if (m_storageKind == StorageKind::Explicit)
{
return AsNode()->IsDeoptimized();
}
else
{
return FALSE;
}
}
#ifndef DACCESS_COMPILE
void ILCodeVersion::SetRejitState(RejitFlags newState)
{
@ -1448,7 +1473,7 @@ NativeCodeVersion CodeVersionManager::GetNativeCodeVersion(PTR_MethodDesc pMetho
}
#ifndef DACCESS_COMPILE
HRESULT CodeVersionManager::AddILCodeVersion(Module* pModule, mdMethodDef methodDef, ReJITID rejitId, ILCodeVersion* pILCodeVersion)
HRESULT CodeVersionManager::AddILCodeVersion(Module* pModule, mdMethodDef methodDef, ILCodeVersion* pILCodeVersion, BOOL isDeoptimized)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(IsLockOwnedByCurrentThread());
@ -1461,7 +1486,7 @@ HRESULT CodeVersionManager::AddILCodeVersion(Module* pModule, mdMethodDef method
return hr;
}
ILCodeVersionNode* pILCodeVersionNode = new (nothrow) ILCodeVersionNode(pModule, methodDef, rejitId);
ILCodeVersionNode* pILCodeVersionNode = new (nothrow) ILCodeVersionNode(pModule, methodDef, InterlockedIncrement(reinterpret_cast<LONG*>(&s_GlobalReJitId)), isDeoptimized);
if (pILCodeVersionNode == NULL)
{
return E_OUTOFMEMORY;
@ -1495,7 +1520,7 @@ HRESULT CodeVersionManager::SetActiveILCodeVersions(ILCodeVersion* pActiveVersio
CONTRACTL_END;
_ASSERTE(!IsLockOwnedByCurrentThread());
HRESULT hr = S_OK;
#if DEBUG
for (DWORD i = 0; i < cActiveVersions; i++)
{

View file

@ -213,6 +213,7 @@ public:
RejitFlags GetRejitState() const;
BOOL GetEnableReJITCallback() const;
BOOL IsDeoptimized() const;
#ifndef DACCESS_COMPILE
void SetRejitState(RejitFlags newState);
void SetEnableReJITCallback(BOOL state);
@ -365,7 +366,7 @@ class ILCodeVersionNode
public:
ILCodeVersionNode();
#ifndef DACCESS_COMPILE
ILCodeVersionNode(Module* pModule, mdMethodDef methodDef, ReJITID id);
ILCodeVersionNode(Module* pModule, mdMethodDef methodDef, ReJITID id, BOOL isDeoptimized);
#endif
PTR_Module GetModule() const;
mdMethodDef GetMethodDef() const;
@ -376,6 +377,7 @@ public:
ILCodeVersion::RejitFlags GetRejitState() const;
BOOL GetEnableReJITCallback() const;
PTR_ILCodeVersionNode GetNextILVersionNode() const;
BOOL IsDeoptimized() const;
#ifndef DACCESS_COMPILE
void SetIL(COR_ILMETHOD* pIL);
void SetJitFlags(DWORD flags);
@ -394,6 +396,7 @@ private:
VolatilePtr<COR_ILMETHOD, PTR_COR_ILMETHOD> m_pIL;
Volatile<DWORD> m_jitFlags;
InstrumentedILOffsetMapping m_instrumentedILMap;
BOOL m_deoptimized;
};
class ILCodeVersionCollection
@ -591,7 +594,7 @@ public:
HRESULT hrStatus;
};
HRESULT AddILCodeVersion(Module* pModule, mdMethodDef methodDef, ReJITID rejitId, ILCodeVersion* pILCodeVersion);
HRESULT AddILCodeVersion(Module* pModule, mdMethodDef methodDef, ILCodeVersion* pILCodeVersion, BOOL isDeoptimized);
HRESULT AddNativeCodeVersion(ILCodeVersion ilCodeVersion, MethodDesc* pClosedMethodDesc, NativeCodeVersion::OptimizationTier optimizationTier, NativeCodeVersion* pNativeCodeVersion,
PatchpointInfo* patchpointInfo = NULL, unsigned ilOffset = 0);
PCODE PublishVersionableCodeIfNecessary(

View file

@ -413,6 +413,11 @@ public:
virtual void ResumeForGarbageCollectionStarted() = 0;
#endif
virtual BOOL IsSynchronizing() = 0;
#ifndef DACCESS_COMPILE
virtual HRESULT DeoptimizeMethod(Module* pModule, mdMethodDef methodDef) = 0;
virtual HRESULT IsMethodDeoptimized(Module *pModule, mdMethodDef methodDef, BOOL *pResult) = 0;
#endif //DACCESS_COMPILE
};
#ifndef DACCESS_COMPILE

View file

@ -1572,5 +1572,4 @@ BOOL EEDbgInterfaceImpl::AdjustContextForJITHelpersForDebugger(CONTEXT* context)
return AdjustContextForJITHelpers(nullptr, context);
}
#endif
#endif // DEBUGGING_SUPPORTED

View file

@ -189,6 +189,82 @@ void InlineTrackingMap::AddInlining(MethodDesc *inliner, MethodDesc *inlinee)
}
}
NativeImageInliningIterator::NativeImageInliningIterator() :
m_pModule(NULL),
m_dynamicBuffer(NULL),
m_dynamicBufferSize(0),
m_dynamicAvailable(0),
m_currentPos(-1)
{
}
HRESULT NativeImageInliningIterator::Reset(Module *pInlinerModule, MethodInModule inlinee)
{
_ASSERTE(pInlinerModule != NULL);
_ASSERTE(inlinee.m_module != NULL);
m_pModule = pInlinerModule;
m_inlinee = inlinee;
HRESULT hr = S_OK;
EX_TRY
{
// Trying to use the existing buffer
BOOL incompleteData;
Module *inlineeModule = m_inlinee.m_module;
mdMethodDef mdInlinee = m_inlinee.m_methodDef;
COUNT_T methodsAvailable = m_pModule->GetReadyToRunInliners(inlineeModule, mdInlinee, m_dynamicBufferSize, m_dynamicBuffer, &incompleteData);
// If the existing buffer is not large enough, reallocate.
if (methodsAvailable > m_dynamicBufferSize)
{
COUNT_T newSize = max(methodsAvailable, s_bufferSize);
m_dynamicBuffer = new MethodInModule[newSize];
m_dynamicBufferSize = newSize;
methodsAvailable = m_pModule->GetReadyToRunInliners(inlineeModule, mdInlinee, m_dynamicBufferSize, m_dynamicBuffer, &incompleteData);
_ASSERTE(methodsAvailable <= m_dynamicBufferSize);
}
m_dynamicAvailable = methodsAvailable;
}
EX_CATCH_HRESULT(hr);
if (FAILED(hr))
{
m_currentPos = s_failurePos;
}
else
{
m_currentPos = -1;
}
return hr;
}
BOOL NativeImageInliningIterator::Next()
{
if (m_currentPos == s_failurePos)
{
return FALSE;
}
m_currentPos++;
return m_currentPos < m_dynamicAvailable;
}
MethodInModule NativeImageInliningIterator::GetMethod()
{
// this evaluates true when m_currentPos == s_failurePos or m_currentPos == (COUNT_T)-1
// m_currentPos is an unsigned type
if (m_currentPos >= m_dynamicAvailable)
{
return MethodInModule();
}
return m_dynamicBuffer[m_currentPos];
}
#endif //!DACCESS_COMPILE
#ifdef FEATURE_READYTORUN
@ -503,7 +579,7 @@ COUNT_T CrossModulePersistentInlineTrackingMapR2R::GetInliners(PTR_Module inline
CONTRACTL
{
THROWS;
GC_TRIGGERS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;

View file

@ -149,7 +149,29 @@ public:
typedef DPTR(InlineTrackingMap) PTR_InlineTrackingMap;
#ifndef DACCESS_COMPILE
// Used to walk the NGEN/R2R inlining data
class NativeImageInliningIterator
{
public:
NativeImageInliningIterator();
HRESULT Reset(Module* pInlinerModule, MethodInModule inlinee);
BOOL Next();
MethodInModule GetMethod();
private:
Module *m_pModule;
MethodInModule m_inlinee;
NewArrayHolder<MethodInModule> m_dynamicBuffer;
COUNT_T m_dynamicBufferSize;
COUNT_T m_dynamicAvailable;
COUNT_T m_currentPos;
const COUNT_T s_bufferSize = 10;
const COUNT_T s_failurePos = -2;
};
#endif // DACCESS_COMPILE
// ------------------------------------ Persistance support ----------------------------------------------------------
@ -392,7 +414,7 @@ public:
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
GC_NOTRIGGER;
CAN_TAKE_LOCK;
MODE_ANY;
}
@ -413,7 +435,7 @@ public:
static void StaticInitialize()
{
WRAPPER_NO_CONTRACT;
s_mapCrst.Init(CrstJitInlineTrackingMap);
s_mapCrst.Init(CrstJitInlineTrackingMap, CrstFlags(CRST_DEBUGGER_THREAD));
}
static CrstBase *GetMapCrst() { return &s_mapCrst; }

View file

@ -935,6 +935,34 @@ PCODE MethodDesc::GetNativeCode()
return GetStableEntryPoint();
}
PCODE MethodDesc::GetNativeCodeReJITAware()
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
PCODE pDefaultCode = GetNativeCode();
if (pDefaultCode != NULL)
{
return pDefaultCode;
}
{
CodeVersionManager *pCodeVersionManager = GetCodeVersionManager();
CodeVersionManager::LockHolder codeVersioningLockHolder;
ILCodeVersion ilVersion = pCodeVersionManager->GetActiveILCodeVersion(PTR_MethodDesc(this));
if (!ilVersion.IsDefaultVersion())
{
NativeCodeVersion activeNativeCodeVersion = ilVersion.GetActiveNativeCodeVersion(PTR_MethodDesc(this));
if (!activeNativeCodeVersion.IsNull())
{
return activeNativeCodeVersion.GetNativeCode();
}
}
return NULL;
}
}
//*******************************************************************************
PTR_PCODE MethodDesc::GetAddrOfNativeCodeSlot()
{

View file

@ -1387,6 +1387,14 @@ public:
return GetNativeCode() != NULL;
}
// Perf warning: takes the CodeVersionManagerLock on every call
BOOL HasNativeCodeReJITAware()
{
LIMITED_METHOD_DAC_CONTRACT;
return GetNativeCodeReJITAware() != NULL;
}
BOOL SetNativeCodeInterlocked(PCODE addr, PCODE pExpected = NULL);
PTR_PCODE GetAddrOfNativeCodeSlot();
@ -1443,6 +1451,11 @@ public:
// Returns the address of the native code.
PCODE GetNativeCode();
// Returns GetNativeCode() if it exists, but also checks to see if there
// is a non-default IL code version and returns that.
// Perf warning: takes the CodeVersionManagerLock on every call
PCODE GetNativeCodeReJITAware();
#if defined(FEATURE_JIT_PITCHING)
bool IsPitchable();
void PitchNativeCode();

View file

@ -80,7 +80,7 @@ public:
static void StaticInitialize()
{
WRAPPER_NO_CONTRACT;
s_lock.Init(CrstMethodDescBackpatchInfoTracker);
s_lock.Init(CrstMethodDescBackpatchInfoTracker, CrstFlags(CRST_DEBUGGER_THREAD));
}
#endif

View file

@ -372,6 +372,15 @@ PCODE MethodDesc::PrepareILBasedCode(PrepareCodeConfig* pConfig)
shouldTier = false;
}
#endif // FEATURE_TIERED_COMPILATION
NativeCodeVersion nativeCodeVersion = pConfig->GetCodeVersion();
if (shouldTier && !nativeCodeVersion.IsDefaultVersion())
{
CodeVersionManager::LockHolder codeVersioningLockHolder;
if (pConfig->GetCodeVersion().GetILCodeVersion().IsDeoptimized())
{
shouldTier = false;
}
}
if (pConfig->MayUsePrecompiledCode())
{

View file

@ -147,11 +147,6 @@
#include "../debug/ee/controller.h"
#include "codeversion.h"
// This is just used as a unique id. Overflow is OK. If we happen to have more than 4+Billion rejits
// and somehow manage to not run out of memory, we'll just have to redefine ReJITID as size_t.
/* static */
static ReJITID s_GlobalReJitId = 1;
/* static */
CrstStatic ReJitManager::s_csGlobalRequest;
@ -169,6 +164,10 @@ CORJIT_FLAGS ReJitManager::JitFlagsFromProfCodegenFlags(DWORD dwCodegenFlags)
{
jitFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE);
}
if ((dwCodegenFlags & COR_PRF_CODEGEN_DEBUG_INFO) != 0)
{
jitFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_INFO);
}
if ((dwCodegenFlags & COR_PRF_CODEGEN_DISABLE_INLINING) != 0)
{
jitFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_NO_INLINING);
@ -418,82 +417,6 @@ COR_IL_MAP* ProfilerFunctionControl::GetInstrumentedMapEntries()
}
#ifndef DACCESS_COMPILE
NativeImageInliningIterator::NativeImageInliningIterator() :
m_pModule(NULL),
m_dynamicBuffer(NULL),
m_dynamicBufferSize(0),
m_dynamicAvailable(0),
m_currentPos(-1)
{
}
HRESULT NativeImageInliningIterator::Reset(Module *pInlinerModule, MethodInModule inlinee)
{
_ASSERTE(pInlinerModule != NULL);
_ASSERTE(inlinee.m_module != NULL);
m_pModule = pInlinerModule;
m_inlinee = inlinee;
HRESULT hr = S_OK;
EX_TRY
{
// Trying to use the existing buffer
BOOL incompleteData;
Module *inlineeModule = m_inlinee.m_module;
mdMethodDef mdInlinee = m_inlinee.m_methodDef;
COUNT_T methodsAvailable = m_pModule->GetReadyToRunInliners(inlineeModule, mdInlinee, m_dynamicBufferSize, m_dynamicBuffer, &incompleteData);
// If the existing buffer is not large enough, reallocate.
if (methodsAvailable > m_dynamicBufferSize)
{
COUNT_T newSize = max(methodsAvailable, s_bufferSize);
m_dynamicBuffer = new MethodInModule[newSize];
m_dynamicBufferSize = newSize;
methodsAvailable = m_pModule->GetReadyToRunInliners(inlineeModule, mdInlinee, m_dynamicBufferSize, m_dynamicBuffer, &incompleteData);
_ASSERTE(methodsAvailable <= m_dynamicBufferSize);
}
m_dynamicAvailable = methodsAvailable;
}
EX_CATCH_HRESULT(hr);
if (FAILED(hr))
{
m_currentPos = s_failurePos;
}
else
{
m_currentPos = -1;
}
return hr;
}
BOOL NativeImageInliningIterator::Next()
{
if (m_currentPos == s_failurePos)
{
return FALSE;
}
m_currentPos++;
return m_currentPos < m_dynamicAvailable;
}
MethodInModule NativeImageInliningIterator::GetMethod()
{
// this evaluates true when m_currentPos == s_failurePos or m_currentPos == (COUNT_T)-1
// m_currentPos is an unsigned type
if (m_currentPos >= m_dynamicAvailable)
{
return MethodInModule();
}
return m_dynamicBuffer[m_currentPos];
}
//---------------------------------------------------------------------------------------
// ReJitManager implementation
@ -802,7 +725,6 @@ HRESULT ReJitManager::UpdateNativeInlinerActiveILVersions(
// Iterate through all modules, for any that are NGEN or R2R need to check if there are inliners there and call
// RequestReJIT on them
// TODO: is the default domain enough for coreclr?
AppDomain::AssemblyIterator domainAssemblyIterator = SystemDomain::System()->DefaultDomain()->IterateAssembliesEx((AssemblyIterationFlags) (kIncludeLoaded | kIncludeExecution));
CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
NativeImageInliningIterator inlinerIter;
@ -975,7 +897,7 @@ HRESULT ReJitManager::BindILVersion(
// Either there was no ILCodeVersion yet for this MethodDesc OR whatever we've found
// couldn't be reused (and needed to be reverted). Create a new ILCodeVersion to return
// to the caller.
HRESULT hr = pCodeVersionManager->AddILCodeVersion(pModule, methodDef, InterlockedIncrement(reinterpret_cast<LONG*>(&s_GlobalReJitId)), pILCodeVersion);
HRESULT hr = pCodeVersionManager->AddILCodeVersion(pModule, methodDef, pILCodeVersion, FALSE);
pILCodeVersion->SetEnableReJITCallback(fDoCallback);
return hr;
}

View file

@ -68,30 +68,6 @@ protected:
#endif // FEATURE_REJIT
#ifndef DACCESS_COMPILE
// Used to walk the NGEN/R2R inlining data
class NativeImageInliningIterator
{
public:
NativeImageInliningIterator();
HRESULT Reset(Module* pInlinerModule, MethodInModule inlinee);
BOOL Next();
MethodInModule GetMethod();
private:
Module *m_pModule;
MethodInModule m_inlinee;
NewArrayHolder<MethodInModule> m_dynamicBuffer;
COUNT_T m_dynamicBufferSize;
COUNT_T m_dynamicAvailable;
COUNT_T m_currentPos;
const COUNT_T s_bufferSize = 10;
const COUNT_T s_failurePos = -2;
};
#endif // DACCESS_COMPILE
//---------------------------------------------------------------------------------------
// The big honcho. One of these per AppDomain, plus one for the
// SharedDomain. Contains the hash table of ReJitInfo structures to manage

View file

@ -268,6 +268,7 @@ void TieredCompilationManager::AsyncPromoteToTier1(
_ASSERTE(!currentNativeCodeVersion.IsNull());
_ASSERTE(!currentNativeCodeVersion.IsFinalTier());
_ASSERTE(createTieringBackgroundWorkerRef != nullptr);
_ASSERTE(!currentNativeCodeVersion.GetILCodeVersion().IsDeoptimized());
NativeCodeVersion t1NativeCodeVersion;
HRESULT hr;
@ -1003,7 +1004,7 @@ void TieredCompilationManager::ActivateCodeVersion(NativeCodeVersion nativeCodeV
bool mayHaveEntryPointSlotsToBackpatch = pMethod->MayHaveEntryPointSlotsToBackpatch();
MethodDescBackpatchInfoTracker::ConditionalLockHolder slotBackpatchLockHolder(mayHaveEntryPointSlotsToBackpatch);
CodeVersionManager::LockHolder codeVersioningLockHolder;
// As long as we are exclusively using any non-JumpStamp publishing for tiered compilation
// methods this first attempt should succeed
ilParent = nativeCodeVersion.GetILCodeVersion();

View file

@ -75,9 +75,14 @@ private:
private:
void OptimizeMethod(NativeCodeVersion nativeCodeVersion);
HRESULT DeoptimizeMethodHelper(Module* pModule, mdMethodDef methodDef);
NativeCodeVersion GetNextMethodToOptimize();
BOOL CompileCodeVersion(NativeCodeVersion nativeCodeVersion);
void ActivateCodeVersion(NativeCodeVersion nativeCodeVersion);
public:
HRESULT DeoptimizeMethod(Module* pModule, mdMethodDef methodDef);
HRESULT IsMethodDeoptimized(Module *pModule, mdMethodDef methodDef, BOOL *pResult);
#ifndef DACCESS_COMPILE
public: