diff --git a/src/coreclr/vm/callcounting.cpp b/src/coreclr/vm/callcounting.cpp index 2fe7f4aa3d2731..dea3750dea0aef 100644 --- a/src/coreclr/vm/callcounting.cpp +++ b/src/coreclr/vm/callcounting.cpp @@ -25,6 +25,30 @@ const PCODE CallCountingStub::TargetForThresholdReached = (PCODE)GetEEFuncEntryP #ifndef DACCESS_COMPILE +CallCountingManager::CallCountingInfo::CallCountingInfo(NativeCodeVersion codeVersion) + : m_codeVersion(codeVersion), + m_callCountingStub(nullptr), + m_remainingCallCount(0), + m_stage(Stage::Disabled) +{ + WRAPPER_NO_CONTRACT; + _ASSERTE(!codeVersion.IsNull()); +} + +CallCountingManager::CallCountingInfo * +CallCountingManager::CallCountingInfo::CreateWithCallCountingDisabled(NativeCodeVersion codeVersion) +{ + CONTRACTL + { + THROWS; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + return new CallCountingInfo(codeVersion); +} + CallCountingManager::CallCountingInfo::CallCountingInfo(NativeCodeVersion codeVersion, CallCount callCountThreshold) : m_codeVersion(codeVersion), m_callCountingStub(nullptr), @@ -64,6 +88,7 @@ NativeCodeVersion CallCountingManager::CallCountingInfo::GetCodeVersion() const const CallCountingStub *CallCountingManager::CallCountingInfo::GetCallCountingStub() const { WRAPPER_NO_CONTRACT; + _ASSERTE(m_stage != Stage::Disabled); return m_callCountingStub; } @@ -93,6 +118,7 @@ void CallCountingManager::CallCountingInfo::ClearCallCountingStub() PTR_CallCount CallCountingManager::CallCountingInfo::GetRemainingCallCountCell() { WRAPPER_NO_CONTRACT; + _ASSERTE(m_stage != Stage::Disabled); //_ASSERTE(m_callCountingStub != nullptr); return &m_remainingCallCount; @@ -110,6 +136,7 @@ CallCountingManager::CallCountingInfo::Stage CallCountingManager::CallCountingIn FORCEINLINE void CallCountingManager::CallCountingInfo::SetStage(Stage stage) { WRAPPER_NO_CONTRACT; + _ASSERTE(m_stage != Stage::Disabled); _ASSERTE(stage <= Stage::Complete); switch (stage) @@ -478,8 +505,59 @@ void CallCountingManager::StaticInitialize() } #endif +bool CallCountingManager::IsCallCountingEnabled(NativeCodeVersion codeVersion) +{ + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + _ASSERTE(!codeVersion.IsNull()); + _ASSERTE(codeVersion.IsDefaultVersion()); + _ASSERTE(codeVersion.GetMethodDesc()->IsEligibleForTieredCompilation()); + + CodeVersionManager::LockHolder codeVersioningLockHolder; + + PTR_CallCountingInfo callCountingInfo = m_callCountingInfoByCodeVersionHash.Lookup(codeVersion); + return callCountingInfo == NULL || callCountingInfo->GetStage() != CallCountingInfo::Stage::Disabled; +} + #ifndef DACCESS_COMPILE +void CallCountingManager::DisableCallCounting(NativeCodeVersion codeVersion) +{ + CONTRACTL + { + THROWS; + GC_NOTRIGGER; + MODE_ANY; + } + CONTRACTL_END; + + _ASSERTE(!codeVersion.IsNull()); + _ASSERTE(codeVersion.IsDefaultVersion()); + _ASSERTE(codeVersion.GetMethodDesc()->IsEligibleForTieredCompilation()); + + CodeVersionManager::LockHolder codeVersioningLockHolder; + + CallCountingInfo *callCountingInfo = m_callCountingInfoByCodeVersionHash.Lookup(codeVersion); + if (callCountingInfo != nullptr) + { + // Call counting may already have been disabled due to the possibility of concurrent or reentering JIT of the same + // native code version of a method. The call counting info is created with call counting enabled or disabled and it + // cannot be changed thereafter for consistency in dependents of the info. + _ASSERTE(callCountingInfo->GetStage() == CallCountingInfo::Stage::Disabled); + return; + } + + NewHolder callCountingInfoHolder = CallCountingInfo::CreateWithCallCountingDisabled(codeVersion); + m_callCountingInfoByCodeVersionHash.Add(callCountingInfoHolder); + callCountingInfoHolder.SuppressRelease(); +} + // Returns true if the code entry point was updated to reflect the active code version, false otherwise. In normal paths, the // code entry point is not updated only when the use of call counting stubs is disabled, as in that case returning to the // prestub is necessary for further call counting. On exception, the code entry point may or may not have been updated and it's @@ -511,7 +589,12 @@ bool CallCountingManager::SetCodeEntryPoint( _ASSERTE(createTieringBackgroundWorkerRef == nullptr || !*createTieringBackgroundWorkerRef); if (!methodDesc->IsEligibleForTieredCompilation() || - activeCodeVersion.IsFinalTier() || + ( + // For a default code version that is not tier 0, call counting will have been disabled by this time (checked + // below). Avoid the redundant and not-insignificant expense of GetOptimizationTier() on a default code version. + !activeCodeVersion.IsDefaultVersion() && + activeCodeVersion.IsFinalTier() + ) || !g_pConfig->TieredCompilation_CallCounting()) { methodDesc->SetCodeEntryPoint(codeEntryPoint); @@ -1070,6 +1153,11 @@ void CallCountingManager::DeleteAllCallCountingStubs() { CallCountingInfo *callCountingInfo = *it; CallCountingInfo::Stage callCountingStage = callCountingInfo->GetStage(); + if (callCountingStage == CallCountingInfo::Stage::Disabled) + { + continue; + } + if (callCountingInfo->GetCallCountingStub() != nullptr) { callCountingInfo->ClearCallCountingStub(); diff --git a/src/coreclr/vm/callcounting.h b/src/coreclr/vm/callcounting.h index 254f22d5eb2dce..59071aa51f140b 100644 --- a/src/coreclr/vm/callcounting.h +++ b/src/coreclr/vm/callcounting.h @@ -191,6 +191,9 @@ class CallCountingManager // Stub is not active and will not become active, call counting complete, promoted, stub may be deleted Complete, + + // Call counting is disabled, only used for the default code version to indicate that it is to be optimized + Disabled }; private: @@ -200,7 +203,10 @@ class CallCountingManager Stage m_stage; #ifndef DACCESS_COMPILE + private: + CallCountingInfo(NativeCodeVersion codeVersion); public: + static CallCountingInfo *CreateWithCallCountingDisabled(NativeCodeVersion codeVersion); CallCountingInfo(NativeCodeVersion codeVersion, CallCount callCountThreshold); ~CallCountingInfo(); #endif @@ -347,7 +353,13 @@ class CallCountingManager static void StaticInitialize(); #endif // !DACCESS_COMPILE +public: + bool IsCallCountingEnabled(NativeCodeVersion codeVersion); + #ifndef DACCESS_COMPILE +public: + void DisableCallCounting(NativeCodeVersion codeVersion); + public: static bool SetCodeEntryPoint( NativeCodeVersion activeCodeVersion, diff --git a/src/coreclr/vm/codeversion.cpp b/src/coreclr/vm/codeversion.cpp index 50bae39dc7046d..0333d2d7941422 100644 --- a/src/coreclr/vm/codeversion.cpp +++ b/src/coreclr/vm/codeversion.cpp @@ -346,28 +346,22 @@ NativeCodeVersion::OptimizationTier NativeCodeVersion::GetOptimizationTier() con } else { - PTR_MethodDesc pMethodDesc = GetMethodDesc(); - OptimizationTier tier = pMethodDesc->GetMethodDescOptimizationTier(); - if (tier == OptimizationTier::OptimizationTierUnknown) - { - tier = TieredCompilationManager::GetInitialOptimizationTier(pMethodDesc); - } - return tier; + return TieredCompilationManager::GetInitialOptimizationTier(GetMethodDesc()); } } #ifndef DACCESS_COMPILE void NativeCodeVersion::SetOptimizationTier(OptimizationTier tier) { - STANDARD_VM_CONTRACT; - + WRAPPER_NO_CONTRACT; if (m_storageKind == StorageKind::Explicit) { AsNode()->SetOptimizationTier(tier); } else { - GetMethodDesc()->SetMethodDescOptimizationTier(tier); + // State changes should have been made previously such that the initial tier is the new tier + _ASSERTE(TieredCompilationManager::GetInitialOptimizationTier(GetMethodDesc()) == tier); } } #endif diff --git a/src/coreclr/vm/codeversion.h b/src/coreclr/vm/codeversion.h index 990085695f5345..2849f77285c627 100644 --- a/src/coreclr/vm/codeversion.h +++ b/src/coreclr/vm/codeversion.h @@ -82,7 +82,6 @@ class NativeCodeVersion OptimizationTierOptimized, // may do less optimizations than tier 1 OptimizationTier0Instrumented, OptimizationTier1Instrumented, - OptimizationTierUnknown = 0xFFFFFFFF }; #ifdef FEATURE_TIERED_COMPILATION OptimizationTier GetOptimizationTier() const; diff --git a/src/coreclr/vm/eeconfig.cpp b/src/coreclr/vm/eeconfig.cpp index 776febb4045381..a6b9d47b3e36d6 100644 --- a/src/coreclr/vm/eeconfig.cpp +++ b/src/coreclr/vm/eeconfig.cpp @@ -789,35 +789,11 @@ HRESULT EEConfig::sync() } #endif -#ifdef FEATURE_PGO - if (fTieredPGO) - { - // Initial tier for R2R is always just OptimizationTier0 - // For ILOnly it depends on TieredPGO_InstrumentOnlyHotCode: - // OptimizationTier0 as we don't want to instrument the initial version (will only instrument hot Tier0) - // OptimizationTier0Instrumented - instrument all ILOnly code - if (g_pConfig->TieredPGO_InstrumentOnlyHotCode()) - { - tieredCompilation_DefaultTier = (DWORD)NativeCodeVersion::OptimizationTier0; - } - else - { - tieredCompilation_DefaultTier = (DWORD)NativeCodeVersion::OptimizationTier0Instrumented; - } - } - else -#endif - { - tieredCompilation_DefaultTier = (DWORD)NativeCodeVersion::OptimizationTier0; - } - if (ETW::CompilationLog::TieredCompilation::Runtime::IsEnabled()) { ETW::CompilationLog::TieredCompilation::Runtime::SendSettings(); } } -#else // !FEATURE_TIERED_COMPILATION - tieredCompilation_DefaultTier = (DWORD)NativeCodeVersion::OptimizationTierOptimized; #endif #if defined(FEATURE_ON_STACK_REPLACEMENT) diff --git a/src/coreclr/vm/eeconfig.h b/src/coreclr/vm/eeconfig.h index fecb76eb69fb41..23c4c3d99ed3d9 100644 --- a/src/coreclr/vm/eeconfig.h +++ b/src/coreclr/vm/eeconfig.h @@ -96,11 +96,6 @@ class EEConfig bool TieredCompilation_UseCallCountingStubs() const { LIMITED_METHOD_CONTRACT; return fTieredCompilation_UseCallCountingStubs; } DWORD TieredCompilation_DeleteCallCountingStubsAfter() const { LIMITED_METHOD_CONTRACT; return tieredCompilation_DeleteCallCountingStubsAfter; } #endif // FEATURE_TIERED_COMPILATION - DWORD TieredCompilation_DefaultTier() const - { - LIMITED_METHOD_CONTRACT; - return tieredCompilation_DefaultTier; - } #if defined(FEATURE_PGO) bool TieredPGO(void) const { LIMITED_METHOD_CONTRACT; return fTieredPGO; } @@ -612,7 +607,6 @@ class EEConfig DWORD tieredCompilation_CallCountingDelayMs; DWORD tieredCompilation_DeleteCallCountingStubsAfter; #endif - DWORD tieredCompilation_DefaultTier; #if defined(FEATURE_PGO) bool fTieredPGO; diff --git a/src/coreclr/vm/method.cpp b/src/coreclr/vm/method.cpp index 32d9a717280647..6fe925b719bb5f 100644 --- a/src/coreclr/vm/method.cpp +++ b/src/coreclr/vm/method.cpp @@ -239,10 +239,6 @@ HRESULT MethodDesc::EnsureCodeDataExists(AllocMemTracker *pamTracker) if (alloc == NULL) return E_OUTOFMEMORY; -#ifdef FEATURE_CODE_VERSIONING - alloc->OptimizationTier = NativeCodeVersion::OptimizationTierUnknown; -#endif - // Try to set the field. Suppress clean-up if we win the race. if (InterlockedCompareExchangeT(&m_codeData, (MethodDescCodeData*)alloc, NULL) == NULL) amTracker.SuppressRelease(); @@ -264,16 +260,6 @@ HRESULT MethodDesc::SetMethodDescVersionState(PTR_MethodDescVersioningState stat return S_OK; } - -void MethodDesc::SetMethodDescOptimizationTier(NativeCodeVersion::OptimizationTier tier) -{ - STANDARD_VM_CONTRACT; - - IfFailThrow(EnsureCodeDataExists(NULL)); - - _ASSERTE(m_codeData != NULL); - VolatileStoreWithoutBarrier(&m_codeData->OptimizationTier, tier); -} #endif // FEATURE_CODE_VERSIONING #ifdef FEATURE_INTERPRETER @@ -311,15 +297,6 @@ PTR_MethodDescVersioningState MethodDesc::GetMethodDescVersionState() return NULL; return VolatileLoadWithoutBarrier(&codeData->VersioningState); } - -NativeCodeVersion::OptimizationTier MethodDesc::GetMethodDescOptimizationTier() -{ - WRAPPER_NO_CONTRACT; - PTR_MethodDescCodeData codeData = VolatileLoadWithoutBarrier(&m_codeData); - if (codeData == NULL) - return NativeCodeVersion::OptimizationTierUnknown; - return VolatileLoadWithoutBarrier(&codeData->OptimizationTier); -} #endif // FEATURE_CODE_VERSIONING //******************************************************************************* diff --git a/src/coreclr/vm/method.hpp b/src/coreclr/vm/method.hpp index 689ea59ad5c336..c7b5e9cbd42c28 100644 --- a/src/coreclr/vm/method.hpp +++ b/src/coreclr/vm/method.hpp @@ -254,7 +254,6 @@ struct MethodDescCodeData final { #ifdef FEATURE_CODE_VERSIONING PTR_MethodDescVersioningState VersioningState; - NativeCodeVersion::OptimizationTier OptimizationTier; #endif // FEATURE_CODE_VERSIONING PCODE TemporaryEntryPoint; #ifdef FEATURE_INTERPRETER @@ -1955,10 +1954,8 @@ class MethodDesc #ifdef FEATURE_CODE_VERSIONING #ifndef DACCESS_COMPILE HRESULT SetMethodDescVersionState(PTR_MethodDescVersioningState state); - void SetMethodDescOptimizationTier(NativeCodeVersion::OptimizationTier tier); #endif // !DACCESS_COMPILE PTR_MethodDescVersioningState GetMethodDescVersionState(); - NativeCodeVersion::OptimizationTier GetMethodDescOptimizationTier(); #endif // FEATURE_CODE_VERSIONING public: @@ -2340,6 +2337,20 @@ class PrepareCodeConfig #ifdef FEATURE_TIERED_COMPILATION public: + bool WasTieringDisabledBeforeJitting() const + { + WRAPPER_NO_CONTRACT; + return m_wasTieringDisabledBeforeJitting; + } + + void SetWasTieringDisabledBeforeJitting() + { + WRAPPER_NO_CONTRACT; + _ASSERTE(GetMethodDesc()->IsEligibleForTieredCompilation()); + + m_wasTieringDisabledBeforeJitting = true; + } + bool ShouldCountCalls() const { WRAPPER_NO_CONTRACT; @@ -2448,6 +2459,7 @@ class PrepareCodeConfig #ifdef FEATURE_TIERED_COMPILATION private: + bool m_wasTieringDisabledBeforeJitting; bool m_shouldCountCalls; #endif diff --git a/src/coreclr/vm/prestub.cpp b/src/coreclr/vm/prestub.cpp index fafa016af69e3c..c89c9b5c3daf94 100644 --- a/src/coreclr/vm/prestub.cpp +++ b/src/coreclr/vm/prestub.cpp @@ -353,10 +353,16 @@ PCODE MethodDesc::PrepareILBasedCode(PrepareCodeConfig* pConfig) && HasUnmanagedCallersOnlyAttribute()))) { NativeCodeVersion codeVersion = pConfig->GetCodeVersion(); - if (!codeVersion.IsFinalTier()) + if (codeVersion.IsDefaultVersion()) + { + pConfig->GetMethodDesc()->GetLoaderAllocator()->GetCallCountingManager()->DisableCallCounting(codeVersion); + _ASSERTE(codeVersion.IsFinalTier()); + } + else if (!codeVersion.IsFinalTier()) { codeVersion.SetOptimizationTier(NativeCodeVersion::OptimizationTierOptimized); } + pConfig->SetWasTieringDisabledBeforeJitting(); shouldTier = false; } #endif // FEATURE_TIERED_COMPILATION @@ -1086,6 +1092,7 @@ PrepareCodeConfig::PrepareCodeConfig(NativeCodeVersion codeVersion, BOOL needsMu m_generatedOrLoadedNewCode(false), #endif #ifdef FEATURE_TIERED_COMPILATION + m_wasTieringDisabledBeforeJitting(false), m_shouldCountCalls(false), #endif m_jitSwitchedToMinOpt(false), @@ -1258,42 +1265,17 @@ const char *PrepareCodeConfig::GetJitOptimizationTierStr(PrepareCodeConfig *conf // This function should be called before SetNativeCode() for consistency with usage of FinalizeOptimizationTierForTier0Jit bool PrepareCodeConfig::FinalizeOptimizationTierForTier0Load() { - STANDARD_VM_CONTRACT; - _ASSERTE(GetMethodDesc()->IsEligibleForTieredCompilation()); _ASSERTE(!JitSwitchedToOptimized()); - bool shouldTier = true; - - switch (GetCodeVersion().GetOptimizationTier()) - { - case NativeCodeVersion::OptimizationTier0: // This is the default when we may tier up further - break; - - case NativeCodeVersion::OptimizationTierOptimized: // If we've decided for some reason that the R2R code is the final tier - shouldTier = false; - break; - - case NativeCodeVersion::OptimizationTier0Instrumented: - // We should adjust the tier back to regular Tier 0, since the R2R code is not instrumented. - GetCodeVersion().SetOptimizationTier(NativeCodeVersion::OptimizationTier0); - break; - - default: - _ASSERTE(!"Unexpected optimization tier for a method loaded via R2R"); - UNREACHABLE(); - } if (!IsForMulticoreJit()) { - return shouldTier; // should count calls if SetNativeCode() succeeds + return true; // should count calls if SetNativeCode() succeeds } // When using multi-core JIT, the loaded code would not be used until the method is called. Record some information that may // be used later when the method is called. - if (shouldTier) - { - ((MulticoreJitPrepareCodeConfig *)this)->SetWasTier0(); - } + ((MulticoreJitPrepareCodeConfig *)this)->SetWasTier0(); return false; // don't count calls } @@ -1302,8 +1284,6 @@ bool PrepareCodeConfig::FinalizeOptimizationTierForTier0Load() // version, and it should have already been finalized. bool PrepareCodeConfig::FinalizeOptimizationTierForTier0LoadOrJit() { - STANDARD_VM_CONTRACT; - _ASSERTE(GetMethodDesc()->IsEligibleForTieredCompilation()); if (IsForMulticoreJit()) @@ -1331,6 +1311,10 @@ bool PrepareCodeConfig::FinalizeOptimizationTierForTier0LoadOrJit() // Update the tier in the code version. The JIT may have decided to switch from tier 0 to optimized, in which case // call counting would have to be disabled for the method. NativeCodeVersion codeVersion = GetCodeVersion(); + if (codeVersion.IsDefaultVersion()) + { + GetMethodDesc()->GetLoaderAllocator()->GetCallCountingManager()->DisableCallCounting(codeVersion); + } codeVersion.SetOptimizationTier(NativeCodeVersion::OptimizationTierOptimized); return false; // don't count calls } diff --git a/src/coreclr/vm/tieredcompilation.cpp b/src/coreclr/vm/tieredcompilation.cpp index 71231e36426b27..5a66f26ef328b3 100644 --- a/src/coreclr/vm/tieredcompilation.cpp +++ b/src/coreclr/vm/tieredcompilation.cpp @@ -100,9 +100,34 @@ NativeCodeVersion::OptimizationTier TieredCompilationManager::GetInitialOptimiza } _ASSERT(!pMethodDesc->RequestedAggressiveOptimization()); + + if (!pMethodDesc->GetLoaderAllocator()->GetCallCountingManager()->IsCallCountingEnabled(NativeCodeVersion(pMethodDesc))) + { + // Tier 0 call counting may have been disabled for several reasons, the intention is to start with and stay at an + // optimized tier + return NativeCodeVersion::OptimizationTierOptimized; + } + +#ifdef FEATURE_PGO + if (g_pConfig->TieredPGO()) + { + // Initial tier for R2R is always just OptimizationTier0 + // For ILOnly it depends on TieredPGO_InstrumentOnlyHotCode: + // 1 - OptimizationTier0 as we don't want to instrument the initial version (will only instrument hot Tier0) + // 2 - OptimizationTier0Instrumented - instrument all ILOnly code + if (g_pConfig->TieredPGO_InstrumentOnlyHotCode() || + ExecutionManager::IsReadyToRunCode(pMethodDesc->GetNativeCode())) + { + return NativeCodeVersion::OptimizationTier0; + } + return NativeCodeVersion::OptimizationTier0Instrumented; + } #endif - return (NativeCodeVersion::OptimizationTier)g_pConfig->TieredCompilation_DefaultTier(); + return NativeCodeVersion::OptimizationTier0; +#else + return NativeCodeVersion::OptimizationTierOptimized; +#endif } bool TieredCompilationManager::IsTieringDelayActive() @@ -1027,10 +1052,61 @@ CORJIT_FLAGS TieredCompilationManager::GetJitFlags(PrepareCodeConfig *config) { WRAPPER_NO_CONTRACT; _ASSERTE(config != nullptr); + _ASSERTE( + !config->WasTieringDisabledBeforeJitting() || + config->GetCodeVersion().IsFinalTier()); CORJIT_FLAGS flags; + // Determine the optimization tier for the default code version (slightly faster common path during startup compared to + // below), and disable call counting and set the optimization tier if it's not going to be tier 0 (this is used in other + // places for the default code version where necessary to avoid the extra expense of GetOptimizationTier()). NativeCodeVersion nativeCodeVersion = config->GetCodeVersion(); + if (nativeCodeVersion.IsDefaultVersion() && !config->WasTieringDisabledBeforeJitting()) + { + MethodDesc *methodDesc = nativeCodeVersion.GetMethodDesc(); + if (!methodDesc->IsEligibleForTieredCompilation()) + { + _ASSERTE(nativeCodeVersion.GetOptimizationTier() == NativeCodeVersion::OptimizationTierOptimized); + return flags; + } + + _ASSERT(!methodDesc->RequestedAggressiveOptimization()); + + if (g_pConfig->TieredCompilation_QuickJit()) + { + NativeCodeVersion::OptimizationTier currentTier = nativeCodeVersion.GetOptimizationTier(); + if (currentTier == NativeCodeVersion::OptimizationTier::OptimizationTier0Instrumented) + { + flags.Set(CORJIT_FLAGS::CORJIT_FLAG_BBINSTR); + flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER0); + return flags; + } + + if (currentTier == NativeCodeVersion::OptimizationTier::OptimizationTier1Instrumented) + { + flags.Set(CORJIT_FLAGS::CORJIT_FLAG_BBINSTR); + flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER1); + return flags; + } + + _ASSERTE(!nativeCodeVersion.IsFinalTier()); + flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER0); + if (g_pConfig->TieredPGO() && g_pConfig->TieredPGO_InstrumentOnlyHotCode()) + { + // If we plan to only instrument hot code we have to make an exception + // for cold methods with loops so if those self promote to OSR they need + // some profile to optimize, so here we allow JIT to enable instrumentation + // if current method has loops and is eligible for OSR. + flags.Set(CORJIT_FLAGS::CORJIT_FLAG_BBINSTR_IF_LOOPS); + } + return flags; + } + + methodDesc->GetLoaderAllocator()->GetCallCountingManager()->DisableCallCounting(nativeCodeVersion); + nativeCodeVersion.SetOptimizationTier(NativeCodeVersion::OptimizationTierOptimized); + return flags; + } switch (nativeCodeVersion.GetOptimizationTier()) {