From ae1be1247896d36fa5dde683b936a28fcfe16614 Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Sat, 6 May 2023 07:53:10 -0700 Subject: [PATCH] JIT: produce consistent flow graph when producing or consuming profile data (#85860) Always try and merge "branch to next" blocks when building the intial flow graph if BBINSTR or BBOPT is set. Fixes #85856. --- src/coreclr/jit/compiler.h | 9 +++++++-- src/coreclr/jit/fgbasic.cpp | 7 ++++--- src/coreclr/jit/fgprofile.cpp | 4 ++-- src/coreclr/jit/importer.cpp | 2 +- src/coreclr/jit/importercalls.cpp | 4 ++-- 5 files changed, 16 insertions(+), 10 deletions(-) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index d2c8ebeb72b..fe49473e7c7 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -9471,9 +9471,14 @@ public: return jitFlags->IsSet(JitFlags::JIT_FLAG_BBINSTR); } - bool IsInstrumentedOptimized() const + bool IsInstrumentedAndOptimized() const { - return IsInstrumented() && jitFlags->IsSet(JitFlags::JIT_FLAG_TIER1); + return IsInstrumented() && jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT); + } + + bool IsInstrumentedOrOptimized() const + { + return IsInstrumented() || jitFlags->IsSet(JitFlags::JIT_FLAG_BBOPT); } // true if we should use the PINVOKE_{BEGIN,END} helpers instead of generating diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index c5d780f7a36..cb627fd3b43 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -1824,8 +1824,9 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed // Compute jump target address signed jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); - if (compIsForInlining() && jmpDist == 0 && - (opcode == CEE_LEAVE || opcode == CEE_LEAVE_S || opcode == CEE_BR || opcode == CEE_BR_S)) + if ((jmpDist == 0) && + (opcode == CEE_LEAVE || opcode == CEE_LEAVE_S || opcode == CEE_BR || opcode == CEE_BR_S) && + opts.IsInstrumentedOrOptimized()) { break; /* NOP */ } @@ -2974,7 +2975,7 @@ unsigned Compiler::fgMakeBasicBlocks(const BYTE* codeAddr, IL_OFFSET codeSize, F jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); - if (compIsForInlining() && jmpDist == 0 && (opcode == CEE_BR || opcode == CEE_BR_S)) + if ((jmpDist == 0) && (opcode == CEE_BR || opcode == CEE_BR_S) && opts.IsInstrumentedOrOptimized()) { continue; /* NOP */ } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index b391ae99dd4..ef614baca48 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -436,7 +436,7 @@ void BlockCountInstrumentor::RelocateProbes() { // We only see such blocks when optimizing. They are flagged by the importer. // - if (!m_comp->opts.IsInstrumentedOptimized() || ((m_comp->optMethodFlags & OMF_HAS_TAILCALL_SUCCESSOR) == 0)) + if (!m_comp->opts.IsInstrumentedAndOptimized() || ((m_comp->optMethodFlags & OMF_HAS_TAILCALL_SUCCESSOR) == 0)) { // No problematic blocks to worry about. // @@ -1616,7 +1616,7 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() { // We only see such blocks when optimizing. They are flagged by the importer. // - if (!m_comp->opts.IsInstrumentedOptimized() || ((m_comp->optMethodFlags & OMF_HAS_TAILCALL_SUCCESSOR) == 0)) + if (!m_comp->opts.IsInstrumentedAndOptimized() || ((m_comp->optMethodFlags & OMF_HAS_TAILCALL_SUCCESSOR) == 0)) { // No problematic blocks to worry about. // diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index e212041d1bb..e4238fcf719 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -7476,7 +7476,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) case CEE_BR_S: jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr); - if (compIsForInlining() && jmpDist == 0) + if ((jmpDist == 0) && opts.IsInstrumentedOrOptimized()) { break; /* NOP */ } diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index 993b2843684..ce7d2f286db 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -1282,7 +1282,7 @@ DONE: // have to check for anything that might introduce a recursive tail call. // * We only instrument root method blocks in OSR methods, // - if ((opts.IsInstrumentedOptimized() || opts.IsOSR()) && !compIsForInlining()) + if ((opts.IsInstrumentedAndOptimized() || opts.IsOSR()) && !compIsForInlining()) { // If a root method tail call candidate block is not a BBJ_RETURN, it should have a unique // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile @@ -7201,7 +7201,7 @@ bool Compiler::impConsiderCallProbe(GenTreeCall* call, IL_OFFSET ilOffset) return false; } - assert(opts.OptimizationDisabled() || opts.IsInstrumentedOptimized()); + assert(opts.OptimizationDisabled() || opts.IsInstrumentedAndOptimized()); assert(!compIsForInlining()); // During importation, optionally flag this block as one that