diff --git a/src/coreclr/inc/daccess.h b/src/coreclr/inc/daccess.h index df31ac579e52c1..3ca0ca40ee42e5 100644 --- a/src/coreclr/inc/daccess.h +++ b/src/coreclr/inc/daccess.h @@ -97,7 +97,7 @@ // pRS=pRS->pright; // else // { -// return pRS->pjit; +// return pRS->_pjit; // } // } // @@ -108,7 +108,7 @@ // In the assignment statement the compiler will automatically use // the implicit conversion from PTR_RangeSection to RangeSection*, // causing a host instance to be created. Finally, if an appropriate -// section is found the use of pRS->pjit will cause an implicit +// section is found the use of pRS->_pjit will cause an implicit // conversion from PTR_IJitManager to IJitManager. The VPTR code // will look at target memory to determine the actual derived class // for the JitManager and instantiate the right class in the host so diff --git a/src/coreclr/inc/dacvars.h b/src/coreclr/inc/dacvars.h index b1d915efb6f7e8..4e8f7e1f7959be 100644 --- a/src/coreclr/inc/dacvars.h +++ b/src/coreclr/inc/dacvars.h @@ -75,7 +75,7 @@ #define UNKNOWN_POINTER_TYPE SIZE_T -DEFINE_DACVAR_VOLATILE(PTR_RangeSection, ExecutionManager__m_CodeRangeList, ExecutionManager::m_CodeRangeList) +DEFINE_DACVAR(PTR_RangeSectionMap, ExecutionManager__g_codeRangeMap, ExecutionManager::g_codeRangeMap) DEFINE_DACVAR(PTR_EECodeManager, ExecutionManager__m_pDefaultCodeMan, ExecutionManager::m_pDefaultCodeMan) DEFINE_DACVAR_VOLATILE(LONG, ExecutionManager__m_dwReaderCount, ExecutionManager::m_dwReaderCount) DEFINE_DACVAR_VOLATILE(LONG, ExecutionManager__m_dwWriterLock, ExecutionManager::m_dwWriterLock) diff --git a/src/coreclr/inc/vptr_list.h b/src/coreclr/inc/vptr_list.h index d8e6cd42bd7c34..4683dd86e65129 100644 --- a/src/coreclr/inc/vptr_list.h +++ b/src/coreclr/inc/vptr_list.h @@ -13,6 +13,7 @@ VPTR_CLASS(EECodeManager) VPTR_CLASS(RangeList) VPTR_CLASS(LockedRangeList) +VPTR_CLASS(CodeRangeMapRangeList) #ifdef EnC_SUPPORTED VPTR_CLASS(EditAndContinueModule) diff --git a/src/coreclr/nativeaot/Runtime/inc/daccess.h b/src/coreclr/nativeaot/Runtime/inc/daccess.h index a7853555aca619..3fa4d8b0008bf2 100644 --- a/src/coreclr/nativeaot/Runtime/inc/daccess.h +++ b/src/coreclr/nativeaot/Runtime/inc/daccess.h @@ -95,7 +95,7 @@ // pRS=pRS->pright; // else // { -// return pRS->pjit; +// return pRS->_pjit; // } // } // @@ -106,7 +106,7 @@ // In the assignment statement the compiler will automatically use // the implicit conversion from PTR_RangeSection to RangeSection*, // causing a host instance to be created. Finally, if an appropriate -// section is found the use of pRS->pjit will cause an implicit +// section is found the use of pRS->_pjit will cause an implicit // conversion from PTR_IJitManager to IJitManager. The VPTR code // will look at target memory to determine the actual derived class // for the JitManager and instantiate the right class in the host so diff --git a/src/coreclr/vm/assembly.cpp b/src/coreclr/vm/assembly.cpp index b8e49820d1c7ca..3c171e67c66def 100644 --- a/src/coreclr/vm/assembly.cpp +++ b/src/coreclr/vm/assembly.cpp @@ -465,7 +465,6 @@ Assembly *Assembly::CreateDynamic(AssemblyBinder* pBinder, NativeAssemblyNamePar if ((access & ASSEMBLY_ACCESS_COLLECT) != 0) { AssemblyLoaderAllocator *pCollectibleLoaderAllocator = new AssemblyLoaderAllocator(); - pCollectibleLoaderAllocator->SetCollectible(); pLoaderAllocator = pCollectibleLoaderAllocator; // Some of the initialization functions are not virtual. Call through the derived class diff --git a/src/coreclr/vm/assemblynative.cpp b/src/coreclr/vm/assemblynative.cpp index 5636de48aa23b3..526dbe4f617030 100644 --- a/src/coreclr/vm/assemblynative.cpp +++ b/src/coreclr/vm/assemblynative.cpp @@ -1135,7 +1135,6 @@ extern "C" INT_PTR QCALLTYPE AssemblyNative_InitializeAssemblyLoadContext(INT_PT { // Create a new AssemblyLoaderAllocator for an AssemblyLoadContext loaderAllocator = new AssemblyLoaderAllocator(); - loaderAllocator->SetCollectible(); GCX_COOP(); LOADERALLOCATORREF pManagedLoaderAllocator = NULL; diff --git a/src/coreclr/vm/ceeload.cpp b/src/coreclr/vm/ceeload.cpp index a2dc77ba80dc09..f9b466a19c8625 100644 --- a/src/coreclr/vm/ceeload.cpp +++ b/src/coreclr/vm/ceeload.cpp @@ -4355,7 +4355,7 @@ void Module::RunEagerFixupsUnlocked() ExecutionManager::AddCodeRange( base, base + (TADDR)pNativeImage->GetVirtualSize(), ExecutionManager::GetReadyToRunJitManager(), - RangeSection::RANGE_SECTION_READYTORUN, + RangeSection::RANGE_SECTION_NONE, this /* pHeapListOrZapModule */); } #endif // !DACCESS_COMPILE diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index a2eee4ca59898b..bb53bbc0cb3f50 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -49,14 +49,13 @@ SPTR_IMPL(EEJitManager, ExecutionManager, m_pEEJitManager); SPTR_IMPL(ReadyToRunJitManager, ExecutionManager, m_pReadyToRunJitManager); #endif -VOLATILE_SPTR_IMPL_INIT(RangeSection, ExecutionManager, m_CodeRangeList, NULL); +SVAL_IMPL(RangeSectionMapData, ExecutionManager, g_codeRangeMap); VOLATILE_SVAL_IMPL_INIT(LONG, ExecutionManager, m_dwReaderCount, 0); VOLATILE_SVAL_IMPL_INIT(LONG, ExecutionManager, m_dwWriterLock, 0); #ifndef DACCESS_COMPILE CrstStatic ExecutionManager::m_JumpStubCrst; -CrstStatic ExecutionManager::m_RangeCrst; unsigned ExecutionManager::m_normal_JumpStubLookup; unsigned ExecutionManager::m_normal_JumpStubUnique; @@ -391,7 +390,7 @@ void UnwindInfoTable::AddToUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, PT_R if (pRS != NULL) { for(int i = 0; i < unwindInfoCount; i++) - AddToUnwindInfoTable(&pRS->pUnwindInfoTable, &unwindInfo[i], pRS->LowAddress, pRS->HighAddress); + AddToUnwindInfoTable(&pRS->_pUnwindInfoTable, &unwindInfo[i], pRS->_range.RangeStart(), pRS->_range.RangeEndOpen()); } } @@ -409,14 +408,14 @@ void UnwindInfoTable::AddToUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, PT_R _ASSERTE(pRS != NULL); if (pRS != NULL) { - _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL)); - if (pRS->pjit->GetCodeType() == (miManaged | miIL)) + _ASSERTE(pRS->_pjit->GetCodeType() == (miManaged | miIL)); + if (pRS->_pjit->GetCodeType() == (miManaged | miIL)) { // This cast is justified because only EEJitManager's have the code type above. - EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit); + EEJitManager* pJitMgr = (EEJitManager*)(pRS->_pjit); CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(entryPoint); for(ULONG i = 0; i < pHeader->GetNumberOfUnwindInfos(); i++) - RemoveFromUnwindInfoTable(&pRS->pUnwindInfoTable, pRS->LowAddress, pRS->LowAddress + pHeader->GetUnwindInfo(i)->BeginAddress); + RemoveFromUnwindInfoTable(&pRS->_pUnwindInfoTable, pRS->_range.RangeStart(), pRS->_range.RangeStart() + pHeader->GetUnwindInfo(i)->BeginAddress); } } } @@ -453,15 +452,15 @@ extern CrstStatic g_StubUnwindInfoHeapSegmentsCrst; PCODE methodEntry =(PCODE) heapIterator.GetMethodCode(); RangeSection * pRS = ExecutionManager::FindCodeRange(methodEntry, ExecutionManager::GetScanFlags()); _ASSERTE(pRS != NULL); - _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL)); - if (pRS != NULL && pRS->pjit->GetCodeType() == (miManaged | miIL)) + _ASSERTE(pRS->_pjit->GetCodeType() == (miManaged | miIL)); + if (pRS != NULL && pRS->_pjit->GetCodeType() == (miManaged | miIL)) { // This cast is justified because only EEJitManager's have the code type above. - EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit); + EEJitManager* pJitMgr = (EEJitManager*)(pRS->_pjit); CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(methodEntry); int unwindInfoCount = pHeader->GetNumberOfUnwindInfos(); for(int i = 0; i < unwindInfoCount; i++) - AddToUnwindInfoTable(&pRS->pUnwindInfoTable, pHeader->GetUnwindInfo(i), pRS->LowAddress, pRS->HighAddress); + AddToUnwindInfoTable(&pRS->_pUnwindInfoTable, pHeader->GetUnwindInfo(i), pRS->_range.RangeStart(), pRS->_range.RangeEndOpen()); } } } @@ -793,14 +792,17 @@ values: m_CodeRangeList and hold it while walking the lists Uses ReaderLockHolder (allows multiple reeaders with no writers) ----------------------------------------- ExecutionManager::FindCodeRange -ExecutionManager::FindZapModule +ExecutionManager::FindReadyToRunModule ExecutionManager::EnumMemoryRegions +AND +ExecutionManager::IsManagedCode +ExecutionManager::IsManagedCodeWithLock +The IsManagedCode checks are notable as they protect not just access to the RangeSection walking, +but the actual RangeSection while determining if a given ip IsManagedCode. Uses WriterLockHolder (allows single writer and no readers) ----------------------------------------- -ExecutionManager::AddRangeHelper -ExecutionManager::DeleteRangeHelper - +ExecutionManager::DeleteRange */ //----------------------------------------------------------------------------- @@ -4200,7 +4202,10 @@ BOOL EEJitManager::JitCodeToMethodInfo( _ASSERTE(pRangeSection != NULL); - TADDR start = dac_cast(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC); + if (pRangeSection->_flags & RangeSection::RANGE_SECTION_RANGELIST) + return FALSE; + + TADDR start = dac_cast(pRangeSection->_pjit)->FindMethodCode(pRangeSection, currentPC); if (start == NULL) return FALSE; @@ -4240,7 +4245,12 @@ StubCodeBlockKind EEJitManager::GetStubCodeBlockKind(RangeSection * pRangeSectio SUPPORTS_DAC; } CONTRACTL_END; - TADDR start = dac_cast(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC); + if (pRangeSection->_flags & RangeSection::RANGE_SECTION_RANGELIST) + { + return pRangeSection->_pRangeList->GetCodeBlockKind(); + } + + TADDR start = dac_cast(pRangeSection->_pjit)->FindMethodCode(pRangeSection, currentPC); if (start == NULL) return STUB_CODE_BLOCK_NOCODE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); @@ -4256,9 +4266,9 @@ TADDR EEJitManager::FindMethodCode(PCODE currentPC) } CONTRACTL_END; RangeSection * pRS = ExecutionManager::FindCodeRange(currentPC, ExecutionManager::GetScanFlags()); - if (pRS == NULL || (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) == 0) + if (pRS == NULL || (pRS->_flags & RangeSection::RANGE_SECTION_CODEHEAP) == 0) return STUB_CODE_BLOCK_NOCODE; - return dac_cast(pRS->pjit)->FindMethodCode(pRS, currentPC); + return dac_cast(pRS->_pjit)->FindMethodCode(pRS, currentPC); } // Finds the header corresponding to the code at offset "delta". @@ -4269,8 +4279,9 @@ TADDR EEJitManager::FindMethodCode(RangeSection * pRangeSection, PCODE currentPC LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(pRangeSection != NULL); + _ASSERTE(pRangeSection->_flags & RangeSection::RANGE_SECTION_CODEHEAP); - HeapList *pHp = dac_cast(pRangeSection->pHeapListOrZapModule); + HeapList *pHp = pRangeSection->_pHeapList; if ((currentPC < pHp->startAddress) || (currentPC > pHp->endAddress)) @@ -4681,12 +4692,13 @@ void ExecutionManager::Init() m_JumpStubCrst.Init(CrstJumpStubCache, CrstFlags(CRST_UNSAFE_ANYMODE|CRST_DEBUGGER_THREAD)); - m_RangeCrst.Init(CrstExecuteManRangeLock, CRST_UNSAFE_ANYMODE); + new(&g_codeRangeMap) RangeSectionMap(); m_pDefaultCodeMan = new EECodeManager(); m_pEEJitManager = new EEJitManager(); + #ifdef FEATURE_READYTORUN m_pReadyToRunJitManager = new ReadyToRunJitManager(); #endif @@ -4710,7 +4722,9 @@ ExecutionManager::FindCodeRange(PCODE currentPC, ScanFlag scanFlag) if (scanFlag == ScanReaderLock) return FindCodeRangeWithLock(currentPC); - return GetRangeSection(currentPC); + // Since ScanReaderLock is not set, then we should behave AS IF the ReaderLock is held + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; + return GetRangeSection(currentPC, &lockState); } //************************************************************************** @@ -4724,8 +4738,15 @@ ExecutionManager::FindCodeRangeWithLock(PCODE currentPC) SUPPORTS_DAC; } CONTRACTL_END; - ReaderLockHolder rlh; - return GetRangeSection(currentPC); + RangeSectionLockState lockState = RangeSectionLockState::None; + RangeSection *result = GetRangeSection(currentPC, &lockState); + if (lockState == RangeSectionLockState::NeedsLock) + { + ReaderLockHolder rlh; + lockState = RangeSectionLockState::ReaderLocked; + result = GetRangeSection(currentPC, &lockState); + } + return result; } @@ -4787,7 +4808,9 @@ BOOL ExecutionManager::IsManagedCode(PCODE currentPC) if (GetScanFlags() == ScanReaderLock) return IsManagedCodeWithLock(currentPC); - return IsManagedCodeWorker(currentPC); + // Since ScanReaderLock is not set, then we must assume that the ReaderLock is effectively taken. + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; + return IsManagedCodeWorker(currentPC, &lockState); } //************************************************************************** @@ -4799,8 +4822,17 @@ BOOL ExecutionManager::IsManagedCodeWithLock(PCODE currentPC) GC_NOTRIGGER; } CONTRACTL_END; - ReaderLockHolder rlh; - return IsManagedCodeWorker(currentPC); + RangeSectionLockState lockState = RangeSectionLockState::None; + BOOL result = IsManagedCodeWorker(currentPC, &lockState); + + if (lockState == RangeSectionLockState::NeedsLock) + { + ReaderLockHolder rlh; + lockState = RangeSectionLockState::ReaderLocked; + result = IsManagedCodeWorker(currentPC, &lockState); + } + + return result; } //************************************************************************** @@ -4827,14 +4859,15 @@ BOOL ExecutionManager::IsManagedCode(PCODE currentPC, HostCallPreference hostCal return FALSE; } - return IsManagedCodeWorker(currentPC); + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; + return IsManagedCodeWorker(currentPC, &lockState); #endif } //************************************************************************** // Assumes that the ExecutionManager reader/writer lock is taken or that // it is safe not to take it. -BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC) +BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC, RangeSectionLockState *pLockState) { CONTRACTL { NOTHROW; @@ -4845,16 +4878,16 @@ BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC) // taken over the call to JitCodeToMethodInfo too so that nobody pulls out // the range section from underneath us. - RangeSection * pRS = GetRangeSection(currentPC); + RangeSection * pRS = GetRangeSection(currentPC, pLockState); if (pRS == NULL) return FALSE; - if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) + if (pRS->_flags & RangeSection::RANGE_SECTION_CODEHEAP) { // Typically if we find a Jit Manager we are inside a managed method // but on we could also be in a stub, so we check for that // as well and we don't consider stub to be real managed code. - TADDR start = dac_cast(pRS->pjit)->FindMethodCode(pRS, currentPC); + TADDR start = dac_cast(pRS->_pjit)->FindMethodCode(pRS, currentPC); if (start == NULL) return FALSE; CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader)); @@ -4863,9 +4896,9 @@ BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC) } #ifdef FEATURE_READYTORUN else - if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN) + if (pRS->_pR2RModule != NULL) { - if (dac_cast(pRS->pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL)) + if (dac_cast(pRS->_pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL)) return TRUE; } #endif @@ -4882,15 +4915,34 @@ BOOL ExecutionManager::IsReadyToRunCode(PCODE currentPC) GC_NOTRIGGER; } CONTRACTL_END; - // This may get called for arbitrary code addresses. Note that the lock is - // taken over the call to JitCodeToMethodInfo too so that nobody pulls out - // the range section from underneath us. - #ifdef FEATURE_READYTORUN - RangeSection * pRS = GetRangeSection(currentPC); - if (pRS != NULL && (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN)) + RangeSectionLockState lockState = RangeSectionLockState::None; + RangeSection * pRS = GetRangeSection(currentPC, &lockState); + + // Since R2R images are not collectible, and we always can find + // non-collectible RangeSections without taking a lock we don't need + // to take the actual ReaderLock here if GetRangeSection returns NULL + +#ifdef _DEBUG + if (pRS == NULL) { - if (dac_cast(pRS->pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL)) + // This logic checks to ensure that the behavior of the fully locked + // lookup matches that of the unlocked lookup. + // Note that if the locked lookup finds something, we need to check + // the unlocked lookup, in case a new module was loaded in the meantime. + ReaderLockHolder rlh; + lockState = RangeSectionLockState::ReaderLocked; + if (GetRangeSection(currentPC, &lockState) != NULL) + { + lockState = RangeSectionLockState::None; + assert(GetRangeSection(currentPC, &lockState) == NULL); + } + } +#endif // _DEBUG + + if (pRS != NULL && (pRS->_pR2RModule != NULL)) + { + if (dac_cast(pRS->_pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL)) return TRUE; } #endif @@ -4920,7 +4972,7 @@ LPCWSTR ExecutionManager::GetJitName() } #endif // !FEATURE_MERGE_JIT_AND_ENGINE -RangeSection* ExecutionManager::GetRangeSection(TADDR addr) +RangeSection* ExecutionManager::GetRangeSection(TADDR addr, RangeSectionLockState *pLockState) { CONTRACTL { NOTHROW; @@ -4929,148 +4981,7 @@ RangeSection* ExecutionManager::GetRangeSection(TADDR addr) SUPPORTS_DAC; } CONTRACTL_END; - RangeSection * pHead = m_CodeRangeList; - - if (pHead == NULL) - { - return NULL; - } - - RangeSection *pCurr = pHead; - RangeSection *pLast = NULL; - -#ifndef DACCESS_COMPILE - RangeSection *pLastUsedRS = (pCurr != NULL) ? pCurr->pLastUsed : NULL; - - if (pLastUsedRS != NULL) - { - // positive case - if ((addr >= pLastUsedRS->LowAddress) && - (addr < pLastUsedRS->HighAddress) ) - { - return pLastUsedRS; - } - - RangeSection * pNextAfterLastUsedRS = pLastUsedRS->pnext; - - // negative case - if ((addr < pLastUsedRS->LowAddress) && - (pNextAfterLastUsedRS == NULL || addr >= pNextAfterLastUsedRS->HighAddress)) - { - return NULL; - } - } -#endif - - while (pCurr != NULL) - { - // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress) - if (pCurr->LowAddress <= addr) - { - // Since we are sorted, once pCurr->HighAddress is less than addr - // then all subsequence ones will also be lower, so we are done. - if (addr >= pCurr->HighAddress) - { - // we'll return NULL and put pLast into pLastUsed - pCurr = NULL; - } - else - { - // addr must be in [pCurr->LowAddress .. pCurr->HighAddress) - _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress)); - - // Found the matching RangeSection - // we'll return pCurr and put it into pLastUsed - pLast = pCurr; - } - - break; - } - pLast = pCurr; - pCurr = pCurr->pnext; - } - -#ifndef DACCESS_COMPILE - // Cache pCurr as pLastUsed in the head node - // Unless we are on an MP system with many cpus - // where this sort of caching actually diminishes scaling during server GC - // due to many processors writing to a common location - if (g_SystemInfo.dwNumberOfProcessors < 4 || !GCHeapUtilities::IsServerHeap() || !GCHeapUtilities::IsGCInProgress()) - pHead->pLastUsed = pLast; -#endif - - return pCurr; -} - -RangeSection* ExecutionManager::GetRangeSectionAndPrev(RangeSection *pHead, TADDR addr, RangeSection** ppPrev) -{ - WRAPPER_NO_CONTRACT; - - RangeSection *pCurr; - RangeSection *pPrev; - RangeSection *result = NULL; - - for (pPrev = NULL, pCurr = pHead; - pCurr != NULL; - pPrev = pCurr, pCurr = pCurr->pnext) - { - // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress) - if (pCurr->LowAddress > addr) - continue; - - if (addr >= pCurr->HighAddress) - break; - - // addr must be in [pCurr->LowAddress .. pCurr->HighAddress) - _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress)); - - // Found the matching RangeSection - result = pCurr; - - // Write back pPrev to ppPrev if it is non-null - if (ppPrev != NULL) - *ppPrev = pPrev; - - break; - } - - // If we failed to find a match write NULL to ppPrev if it is non-null - if ((ppPrev != NULL) && (result == NULL)) - { - *ppPrev = NULL; - } - - return result; -} - -/* static */ -PTR_Module ExecutionManager::FindZapModule(TADDR currentData) -{ - CONTRACTL - { - NOTHROW; - GC_NOTRIGGER; - MODE_ANY; - STATIC_CONTRACT_HOST_CALLS; - SUPPORTS_DAC; - } - CONTRACTL_END; - - ReaderLockHolder rlh; - - RangeSection * pRS = GetRangeSection(currentData); - if (pRS == NULL) - return NULL; - - if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) - return NULL; - -#ifdef FEATURE_READYTORUN - if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN) - return NULL; -#endif - - return dac_cast(pRS->pHeapListOrZapModule); + return GetCodeRangeMap()->LookupRangeSection(addr, pLockState); } /* static */ @@ -5087,19 +4998,34 @@ PTR_Module ExecutionManager::FindReadyToRunModule(TADDR currentData) CONTRACTL_END; #ifdef FEATURE_READYTORUN - ReaderLockHolder rlh; + RangeSectionLockState lockState = RangeSectionLockState::None; + RangeSection * pRS = GetRangeSection(currentData, &lockState); - RangeSection * pRS = GetRangeSection(currentData); + // Since R2R images are not collectible, and we always can find + // non-collectible RangeSections without taking a lock we don't need + // to take the actual ReaderLock here if GetRangeSection returns NULL if (pRS == NULL) - return NULL; + { +#ifdef _DEBUG + { + // This logic checks to ensure that the behavior of the fully locked + // lookup matches that of the unlocked lookup. + // Note that if the locked lookup finds something, we need to check + // the unlocked lookup, in case a new module was loaded in the meantime. + ReaderLockHolder rlh; + lockState = RangeSectionLockState::ReaderLocked; + if (GetRangeSection(currentData, &lockState) != NULL) + { + lockState = RangeSectionLockState::None; + assert(GetRangeSection(currentData, &lockState) == NULL); + } + } +#endif // _DEBUG - if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) return NULL; + } - if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN) - return dac_cast(pRS->pHeapListOrZapModule);; - - return NULL; + return pRS->_pR2RModule; #else return NULL; #endif @@ -5117,118 +5043,91 @@ PTR_Module ExecutionManager::FindModuleForGCRefMap(TADDR currentData) } CONTRACTL_END; +#ifndef FEATURE_READYTORUN + return NULL; +#else RangeSection * pRS = FindCodeRange(currentData, ExecutionManager::GetScanFlags()); if (pRS == NULL) return NULL; - if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) - return NULL; - -#ifdef FEATURE_READYTORUN - // RANGE_SECTION_READYTORUN is intentionally not filtered out here -#endif - - return dac_cast(pRS->pHeapListOrZapModule); + return pRS->_pR2RModule; +#endif // FEATURE_READYTORUN } #ifndef DACCESS_COMPILE -/* NGenMem depends on this entrypoint */ NOINLINE void ExecutionManager::AddCodeRange(TADDR pStartRange, TADDR pEndRange, IJitManager * pJit, RangeSection::RangeSectionFlags flags, - void * pHp) + PTR_Module pModule) { CONTRACTL { THROWS; GC_NOTRIGGER; + HOST_CALLS; + PRECONDITION(pStartRange < pEndRange); PRECONDITION(CheckPointer(pJit)); - PRECONDITION(CheckPointer(pHp)); + PRECONDITION(CheckPointer(pModule)); } CONTRACTL_END; - AddRangeHelper(pStartRange, - pEndRange, - pJit, - flags, - dac_cast(pHp)); + ReaderLockHolder rlh; + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; // + + PTR_RangeSection pRange = GetCodeRangeMap()->AllocateRange(Range(pStartRange, pEndRange), pJit, flags, pModule, &lockState); + if (pRange == NULL) + ThrowOutOfMemory(); } -void ExecutionManager::AddRangeHelper(TADDR pStartRange, - TADDR pEndRange, - IJitManager * pJit, - RangeSection::RangeSectionFlags flags, - TADDR pHeapListOrZapModule) +NOINLINE +void ExecutionManager::AddCodeRange(TADDR pStartRange, + TADDR pEndRange, + IJitManager * pJit, + RangeSection::RangeSectionFlags flags, + PTR_HeapList pHp) { CONTRACTL { THROWS; GC_NOTRIGGER; HOST_CALLS; PRECONDITION(pStartRange < pEndRange); - PRECONDITION(pHeapListOrZapModule != NULL); + PRECONDITION(CheckPointer(pJit)); + PRECONDITION(CheckPointer(pHp)); } CONTRACTL_END; - RangeSection *pnewrange = new RangeSection; + ReaderLockHolder rlh; + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; // - _ASSERTE(pEndRange > pStartRange); + PTR_RangeSection pRange = GetCodeRangeMap()->AllocateRange(Range(pStartRange, pEndRange), pJit, flags, pHp, &lockState); - pnewrange->LowAddress = pStartRange; - pnewrange->HighAddress = pEndRange; - pnewrange->pjit = pJit; - pnewrange->pnext = NULL; - pnewrange->flags = flags; - pnewrange->pLastUsed = NULL; - pnewrange->pHeapListOrZapModule = pHeapListOrZapModule; -#if defined(TARGET_AMD64) - pnewrange->pUnwindInfoTable = NULL; -#endif // defined(TARGET_AMD64) - { - CrstHolder ch(&m_RangeCrst); // Acquire the Crst before linking in a new RangeList + if (pRange == NULL) + ThrowOutOfMemory(); +} - RangeSection * current = m_CodeRangeList; - RangeSection * previous = NULL; +NOINLINE +void ExecutionManager::AddCodeRange(TADDR pStartRange, + TADDR pEndRange, + IJitManager * pJit, + RangeSection::RangeSectionFlags flags, + PTR_CodeRangeMapRangeList pRangeList) +{ + CONTRACTL { + THROWS; + GC_NOTRIGGER; + HOST_CALLS; + PRECONDITION(pStartRange < pEndRange); + PRECONDITION(CheckPointer(pJit)); + PRECONDITION(CheckPointer(pRangeList)); + } CONTRACTL_END; - if (current != NULL) - { - while (true) - { - // Sort addresses top down so that more recently created ranges - // will populate the top of the list - if (pnewrange->LowAddress > current->LowAddress) - { - // Asserts if ranges are overlapping - _ASSERTE(pnewrange->LowAddress >= current->HighAddress); - pnewrange->pnext = current; + ReaderLockHolder rlh; + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; // - if (previous == NULL) // insert new head - { - m_CodeRangeList = pnewrange; - } - else - { // insert in the middle - previous->pnext = pnewrange; - } - break; - } + PTR_RangeSection pRange = GetCodeRangeMap()->AllocateRange(Range(pStartRange, pEndRange), pJit, flags, pRangeList, &lockState); - RangeSection * next = current->pnext; - if (next == NULL) // insert at end of list - { - current->pnext = pnewrange; - break; - } - - // Continue walking the RangeSection list - previous = current; - current = next; - } - } - else - { - m_CodeRangeList = pnewrange; - } - } + if (pRange == NULL) + ThrowOutOfMemory(); } // Deletes a single range starting at pStartRange @@ -5239,114 +5138,64 @@ void ExecutionManager::DeleteRange(TADDR pStartRange) GC_NOTRIGGER; } CONTRACTL_END; - RangeSection *pCurr = NULL; - { - // Acquire the Crst before unlinking a RangeList. - // NOTE: The Crst must be acquired BEFORE we grab the writer lock, as the - // writer lock forces us into a forbid suspend thread region, and it's illegal - // to enter a Crst after the forbid suspend thread region is entered - CrstHolder ch(&m_RangeCrst); + RangeSection *pCurr = FindCodeRangeWithLock(pStartRange); + GetCodeRangeMap()->RemoveRangeSection(pCurr); + + +#if defined(TARGET_AMD64) + PTR_UnwindInfoTable unwindTable = pCurr->_pUnwindInfoTable; +#endif + { // Acquire the WriterLock and prevent any readers from walking the RangeList. // This also forces us to enter a forbid suspend thread region, to prevent // hijacking profilers from grabbing this thread and walking it (the walk may // require the reader lock, which would cause a deadlock). WriterLockHolder wlh; - RangeSection *pPrev = NULL; - - pCurr = GetRangeSectionAndPrev(m_CodeRangeList, pStartRange, &pPrev); - - // pCurr points at the Range that needs to be unlinked from the RangeList - if (pCurr != NULL) - { - - // If pPrev is NULL the head of this list is to be deleted - if (pPrev == NULL) - { - m_CodeRangeList = pCurr->pnext; - } - else - { - _ASSERT(pPrev->pnext == pCurr); - - pPrev->pnext = pCurr->pnext; - } - - // Clear the cache pLastUsed in the head node (if any) - RangeSection * head = m_CodeRangeList; - if (head != NULL) - { - head->pLastUsed = NULL; - } - - // - // Cannot delete pCurr here because we own the WriterLock and if this is - // a hosted scenario then the hosting api callback cannot occur in a forbid - // suspend region, which the writer lock is. - // - } + RangeSectionLockState lockState = RangeSectionLockState::WriteLocked; + + GetCodeRangeMap()->CleanupRangeSections(&lockState); + // Unlike the previous implementation, we no longer attempt to avoid freeing + // the memory behind the RangeSection here, as we do not support the hosting + // api taking over memory allocation. } // - // Now delete the node + // Now delete the unwind info table // - if (pCurr != NULL) - { #if defined(TARGET_AMD64) - if (pCurr->pUnwindInfoTable != 0) - delete pCurr->pUnwindInfoTable; + if (unwindTable != 0) + delete unwindTable; #endif // defined(TARGET_AMD64) - delete pCurr; - } } #endif // #ifndef DACCESS_COMPILE #ifdef DACCESS_COMPILE -void ExecutionManager::EnumRangeList(RangeSection* list, - CLRDataEnumMemoryFlags flags) +void RangeSection::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { - while (list != NULL) - { - // If we can't read the target memory, stop immediately so we don't work - // with broken data. - if (!DacEnumMemoryRegion(dac_cast(list), sizeof(*list))) - break; - - if (list->pjit.IsValid()) - { - list->pjit->EnumMemoryRegions(flags); - } - - if (!(list->flags & RangeSection::RANGE_SECTION_CODEHEAP)) - { - PTR_Module pModule = dac_cast(list->pHeapListOrZapModule); + if (!DacEnumMemoryRegion(dac_cast(this), sizeof(*this))) + return; - if (pModule.IsValid()) - { - pModule->EnumMemoryRegions(flags, true); - } - } + if (_pjit.IsValid()) + { + _pjit->EnumMemoryRegions(flags); + } - list = list->pnext; -#if defined (_DEBUG) - // Test hook: when testing on debug builds, we want an easy way to test that the while - // correctly terminates in the face of ridiculous stuff from the target. - if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpGeneration_IntentionallyCorruptDataFromTarget) == 1) +#ifdef FEATURE_READYTORUN + if (_pR2RModule != NULL) + { + if (_pR2RModule.IsValid()) { - // Force us to struggle on with something bad. - if (list == NULL) - { - list = (RangeSection *)&flags; - } + _pR2RModule->EnumMemoryRegions(flags, true); } -#endif // (_DEBUG) - } +#endif // FEATURE_READYTORUN } + void ExecutionManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) { STATIC_CONTRACT_HOST_CALLS; @@ -5357,17 +5206,13 @@ void ExecutionManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) // Report the global data portions. // - m_CodeRangeList.EnumMem(); + GetCodeRangeMap().EnumMem(); m_pDefaultCodeMan.EnumMem(); // // Walk structures and report. // - - if (m_CodeRangeList.IsValid()) - { - EnumRangeList(m_CodeRangeList, flags); - } + GetCodeRangeMap()->EnumMemoryRegions(flags); } #endif // #ifdef DACCESS_COMPILE @@ -6033,7 +5878,7 @@ ReadyToRunInfo * ReadyToRunJitManager::JitTokenToReadyToRunInfo(const METHODTOKE SUPPORTS_DAC; } CONTRACTL_END; - return dac_cast(MethodToken.m_pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo(); + return MethodToken.m_pRangeSection->_pR2RModule->GetReadyToRunInfo(); } UINT32 ReadyToRunJitManager::JitTokenToGCInfoVersion(const METHODTOKEN& MethodToken) @@ -6170,9 +6015,9 @@ StubCodeBlockKind ReadyToRunJitManager::GetStubCodeBlockKind(RangeSection * pRan } CONTRACTL_END; - DWORD rva = (DWORD)(currentPC - pRangeSection->LowAddress); + DWORD rva = (DWORD)(currentPC - pRangeSection->_range.RangeStart()); - PTR_ReadyToRunInfo pReadyToRunInfo = dac_cast(pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo(); + PTR_ReadyToRunInfo pReadyToRunInfo = pRangeSection->_pR2RModule->GetReadyToRunInfo(); PTR_IMAGE_DATA_DIRECTORY pDelayLoadMethodCallThunksDir = pReadyToRunInfo->GetDelayMethodCallThunksSection(); if (pDelayLoadMethodCallThunksDir != NULL) @@ -6321,11 +6166,11 @@ BOOL ReadyToRunJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection, TADDR currentInstr = PCODEToPINSTR(currentPC); - TADDR ImageBase = pRangeSection->LowAddress; + TADDR ImageBase = pRangeSection->_range.RangeStart(); DWORD RelativePc = (DWORD)(currentInstr - ImageBase); - Module * pModule = dac_cast(pRangeSection->pHeapListOrZapModule); + Module * pModule = pRangeSection->_pR2RModule; ReadyToRunInfo * pInfo = pModule->GetReadyToRunInfo(); COUNT_T nRuntimeFunctions = pInfo->m_nRuntimeFunctions; diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index 8e432e6e5209ba..5c1ec4230b4d92 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -100,6 +100,8 @@ enum StubCodeBlockKind : int STUB_CODE_BLOCK_JUMPSTUB, STUB_CODE_BLOCK_PRECODE, STUB_CODE_BLOCK_DYNAMICHELPER, + STUB_CODE_BLOCK_STUBPRECODE, + STUB_CODE_BLOCK_FIXUPPRECODE, // Last valid value. Note that the definition is duplicated in debug\daccess\fntableaccess.cpp STUB_CODE_BLOCK_LAST = 0xF, // Placeholders returned by code:GetStubCodeBlockKind @@ -608,44 +610,924 @@ class UnwindInfoTable { // address range to track the code heaps. typedef DPTR(struct RangeSection) PTR_RangeSection; +typedef VPTR(class CodeRangeMapRangeList) PTR_CodeRangeMapRangeList; -struct RangeSection +class RangeSectionMap; + +class Range { - TADDR LowAddress; - TADDR HighAddress; + // [begin,end) (This is an inclusive range) + TADDR begin; + TADDR end; - PTR_IJitManager pjit; // The owner of this address range +public: + Range(TADDR begin, TADDR end) : begin(begin), end(end) + { + assert(end >= begin); + } -#ifndef DACCESS_COMPILE - // Volatile because of the list can be walked lock-free - Volatile pnext; // link rangesections in a sorted list -#else - PTR_RangeSection pnext; -#endif + bool IsInRange(TADDR address) const + { + return address >= begin && address < end; + } - PTR_RangeSection pLastUsed; // for the head node only: a link to rangesections that was used most recently + TADDR RangeSize() const + { + return end - begin; + } + + TADDR RangeStart() const + { + return begin; + } + + TADDR RangeEnd() const + { + assert(RangeSize() > 0); + return end - 1; + } + + TADDR RangeEndOpen() const + { + return end; + } +}; +struct RangeSection +{ + friend class RangeSectionMap; enum RangeSectionFlags { RANGE_SECTION_NONE = 0x0, RANGE_SECTION_COLLECTIBLE = 0x1, RANGE_SECTION_CODEHEAP = 0x2, + RANGE_SECTION_RANGELIST = 0x4, + }; + #ifdef FEATURE_READYTORUN - RANGE_SECTION_READYTORUN = 0x4, + RangeSection(Range range, IJitManager* pJit, RangeSectionFlags flags, PTR_Module pR2RModule) : + _range(range), + _flags(flags), + _pjit(pJit), + _pR2RModule(pR2RModule), + _pHeapList(dac_cast((TADDR)0)), + _pRangeList(dac_cast((TADDR)0)) +#if defined(TARGET_AMD64) + , _pUnwindInfoTable(dac_cast((TADDR)0)) +#endif + { + assert(!(flags & RANGE_SECTION_COLLECTIBLE)); + assert(pR2RModule != NULL); + } #endif + + RangeSection(Range range, IJitManager* pJit, RangeSectionFlags flags, PTR_HeapList pHeapList) : + _range(range), + _flags(flags), + _pjit(pJit), + _pR2RModule(dac_cast((TADDR)0)), + _pHeapList(pHeapList), + _pRangeList(dac_cast((TADDR)0)) +#if defined(TARGET_AMD64) + , _pUnwindInfoTable(dac_cast((TADDR)0)) +#endif + {} + + RangeSection(Range range, IJitManager* pJit, RangeSectionFlags flags, PTR_CodeRangeMapRangeList pRangeList) : + _range(range), + _flags(flags), + _pjit(pJit), + _pR2RModule(dac_cast((TADDR)0)), + _pHeapList(dac_cast((TADDR)0)), + _pRangeList(pRangeList) +#if defined(TARGET_AMD64) + , _pUnwindInfoTable(dac_cast((TADDR)0)) +#endif + {} + +#ifdef DACCESS_COMPILE + void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); +#endif + + const Range _range; + const RangeSectionFlags _flags; + const PTR_IJitManager _pjit; + const PTR_Module _pR2RModule; + const PTR_HeapList _pHeapList; + const PTR_CodeRangeMapRangeList _pRangeList; + +#if defined(TARGET_AMD64) + PTR_UnwindInfoTable _pUnwindInfoTable; // Points to unwind information for this memory range. +#endif // defined(TARGET_AMD64) + + + RangeSection* _pRangeSectionNextForDelete = NULL; // Used for adding to the cleanup list +}; + +enum class RangeSectionLockState +{ + None, + NeedsLock, + ReaderLocked, + WriteLocked, +}; + +// For 64bit, we work with 8KB chunks of memory holding pointers to the next level. This provides 10 bits of address resolution per level. +// For *reasons* the X64 hardware is limited to 57bits of addressable address space, and to make the math work out nicely, the minimum granularity that +// is 128KB (or every 2^17 bits) for the tree structure. +// Similarly the Arm64 specification requires addresses to use at most 52 bits. Thus we use the maximum addressable range of X64 to provide the real max range +// So the first level is bits [56:49] -> L5 +// Then [48:41] -> L4 +// [40:33] -> L3 +// [32:25] -> L2 +// [24:17] -> L1 +// This leaves 17 bits of the address to be handled by the RangeSectionFragment linked list +// +// For 32bit VA processes, use 1KB chunks holding pointers to the next level. This provides 8 bits of address resolution per level. [31:24] and [23:16]. +// For the 32bit processes, only the last 16bits are handled by the RangeSectionFragment linked list. + +// Each level of the tree may be considered collectible or non-collectible (except for the top level, which is ALWAYS non-collectible) +// The locking model of the tree structure is as follows. +// Adding a newly allocated level requires holding the Reader lock. Multiple threads may add a level in parallel. +// Removing a level requires holding the Writer lock. +// No level which refers to a non-collectible fragment may be considered collectible. +// A level may be upgraded from collectible to non-collectible, by changing the pointer which points at it to not have the sentinel bit. +// A level may NOT ever be downgraded from non-collectible to collectible. +// When a level becomes empty, it may be freed, and the pointer which points at it may be nulled out. +// +// Within the linked list of RangeSectionFragments, there are effectively 2 lists. +// - The non-collectible list, which are always found first. +// - The collectible list, which follows the non-collectible list. +// +// Insertion into the map uses atomic updates and fully pre-initialized RangeSection structures, so that insertions can be lock-free with regards to each other. +// However, they are not lock-free with regards to removals, so the insertions use a Reader lock. +// +// Reading from the non-collectible data (the non-collectible portion of tree structure + the non-collectible list) does not require any locking at all. +// Reading from the collectible data will require a ReaderLock. There is a scheme using the RangeSectionLockState where when there is an attempt to read without the +// lock and we find collectible data, which will cause the runtime to upgrade to using the Reader lock in that situation. +// +// Reading this code you will ALSO find that the ReaderLock logic used here is intertwined with the GC mode of the process. In particular, +// in cooperative mode and during GC stackwalking, the ReaderLock is always considered to be held. +class RangeSectionMap +{ + class RangeSectionFragment; + class RangeSectionFragmentPointer; + typedef DPTR(RangeSectionFragment) PTR_RangeSectionFragment; + typedef DPTR(RangeSectionFragmentPointer) PTR_RangeSectionFragmentPointer; + + // Helper structure which forces all access to the various pointers to be handled via volatile/interlocked operations + // The copy/move constructors are all deleted to forbid accidental reads into temporaries, etc. + class RangeSectionFragmentPointer + { + private: + TADDR _ptr; + + static TADDR FragmentToPtr(RangeSectionFragment* fragment) + { + TADDR ptr = dac_cast(fragment); + if (ptr == 0) + return ptr; + + if (fragment->isCollectibleRangeSectionFragment) + { + ptr += 1; + } + + return ptr; + } + + RangeSectionFragmentPointer() { _ptr = 0; } + public: + + RangeSectionFragmentPointer(RangeSectionFragmentPointer &) = delete; + RangeSectionFragmentPointer(RangeSectionFragmentPointer &&) = delete; + RangeSectionFragmentPointer& operator=(const RangeSectionFragmentPointer&) = delete; + + bool PointerIsCollectible() + { + return ((_ptr & 1) == 1); + } + + bool IsNull() + { + return _ptr == 0; + } + + PTR_RangeSectionFragment VolatileLoadWithoutBarrier(RangeSectionLockState *pLockState) + { + TADDR ptr = ::VolatileLoadWithoutBarrier(&_ptr); + if ((ptr & 1) == 1) + { + if ((*pLockState == RangeSectionLockState::None) || (*pLockState == RangeSectionLockState::NeedsLock)) + { + *pLockState = RangeSectionLockState::NeedsLock; + return NULL; + } + return dac_cast(ptr - 1); + } + else + { + return dac_cast(ptr); + } + } + +#ifndef DACCESS_COMPILE + void VolatileStore(RangeSectionFragment* fragment) + { + ::VolatileStore(&_ptr, FragmentToPtr(fragment)); + } + + bool AtomicReplace(RangeSectionFragment* newFragment, RangeSectionFragment* oldFragment) + { + TADDR oldPtr = FragmentToPtr(oldFragment); + TADDR newPtr = FragmentToPtr(newFragment); + + return oldPtr == InterlockedCompareExchangeT(&_ptr, newPtr, oldPtr); + } +#endif // DACCESS_COMPILE }; - DWORD flags; + // Helper structure which forces all access to the various pointers to be handled via volatile/interlocked operations + // The copy/move constructors are all deleted to forbid accidental reads into temporaries, etc. + template + class RangeSectionLevelPointer + { + private: + TADDR _ptr; - // union - // { - // PTR_CodeHeap pCodeHeap; // valid if RANGE_SECTION_HEAP is set - // PTR_Module pZapModule; // valid if RANGE_SECTION_HEAP is not set - // }; - TADDR pHeapListOrZapModule; -#if defined(HOST_64BIT) - PTR_UnwindInfoTable pUnwindInfoTable; // Points to unwind information for this memory range. -#endif // defined(HOST_64BIT) + static TADDR LevelToPtr(TPtr level, bool collectible) + { + TADDR ptr = dac_cast(level); + if (ptr == 0) + return ptr; + + if (collectible) + { + ptr += 1; + } + + return ptr; + } + + RangeSectionLevelPointer() { _ptr = 0; } + public: + + RangeSectionLevelPointer(RangeSectionLevelPointer &) = delete; + RangeSectionLevelPointer(RangeSectionLevelPointer &&) = delete; + RangeSectionLevelPointer& operator=(const RangeSectionLevelPointer&) = delete; + + bool PointerIsCollectible() + { + return ((::VolatileLoadWithoutBarrier(&_ptr) & 1) == 1); + } + + bool IsNull() + { + return _ptr == 0; + } + + TPtr VolatileLoadWithoutBarrier(RangeSectionLockState *pLockState) + { + TADDR ptr = ::VolatileLoadWithoutBarrier(&_ptr); + if ((ptr & 1) == 1) + { + if ((*pLockState == RangeSectionLockState::None) || (*pLockState == RangeSectionLockState::NeedsLock)) + { + *pLockState = RangeSectionLockState::NeedsLock; + return NULL; + } + return dac_cast(ptr - 1); + } + else + { + return dac_cast(ptr); + } + } + + TPtr VolatileLoad(RangeSectionLockState *pLockState) + { + TADDR ptr = ::VolatileLoad(&_ptr); + if ((ptr & 1) == 1) + { + if ((*pLockState == RangeSectionLockState::None) || (*pLockState == RangeSectionLockState::NeedsLock)) + { + *pLockState = RangeSectionLockState::NeedsLock; + return NULL; + } + return dac_cast(ptr - 1); + } + else + { + return dac_cast(ptr); + } + } + +#ifndef DACCESS_COMPILE + + void UpgradeToNonCollectible() + { + TADDR ptr = ::VolatileLoadWithoutBarrier(&_ptr); + if ((ptr & 1) == 1) + { + // Upgrade to non-collectible +#ifdef _DEBUG + TADDR initialValue = +#endif + InterlockedCompareExchangeT(&_ptr, ptr - 1, ptr); + assert(initialValue == ptr || initialValue == (ptr - 1)); + } + } + + // Install a newly allocated level pointer. Return true if the new buffer is installed. + // Return false if a buffer is already installed. + bool Install(TPtr level, bool collectible) + { + TADDR initialPointerStoreAttempt = LevelToPtr(level, collectible); + if (0 == InterlockedCompareExchangeT(&_ptr, initialPointerStoreAttempt, (TADDR)0)) + { + return true; + } + else if (!collectible) + { + // In this case we update the already stored level to be pointed at via a non-collectible pointer + // But since we don't actually install the newly passed in pointer, we still return false. + UpgradeToNonCollectible(); + } + + return false; + } + + void Uninstall() + { + ::VolatileStore(&_ptr, (TADDR)0); + } +#endif // DACCESS_COMPILE + }; + + // Unlike a RangeSection, a RangeSectionFragment cannot span multiple elements of the last level of the RangeSectionMap + // Always allocated via memset/free + class RangeSectionFragment + { + public: + RangeSectionFragmentPointer pRangeSectionFragmentNext; + Range _range; + PTR_RangeSection pRangeSection; + bool InRange(TADDR address) { return _range.IsInRange(address) && pRangeSection->_pRangeSectionNextForDelete == NULL; } + bool isPrimaryRangeSectionFragment; // RangeSectionFragment are allocated in arrays, but we only need to free the first allocated one. It will be marked with this flag. + bool isCollectibleRangeSectionFragment; // RangeSectionFragments + }; + +#ifdef TARGET_64BIT + static constexpr uintptr_t entriesPerMapLevel = 256; +#else + static constexpr uintptr_t entriesPerMapLevel = 256; +#endif + + typedef RangeSectionFragmentPointer RangeSectionList; + typedef RangeSectionList RangeSectionL1[entriesPerMapLevel]; + typedef RangeSectionLevelPointer RangeSectionL2[entriesPerMapLevel]; + typedef RangeSectionLevelPointer RangeSectionL3[entriesPerMapLevel]; + typedef RangeSectionLevelPointer RangeSectionL4[entriesPerMapLevel]; + typedef RangeSectionLevelPointer RangeSectionL5[entriesPerMapLevel]; + +#ifdef TARGET_64BIT + typedef RangeSectionL5 RangeSectionTopLevel; + static constexpr uintptr_t mapLevels = 5; + static constexpr uintptr_t maxSetBit = 56; // This is 0 indexed + static constexpr uintptr_t bitsPerLevel = 8; +#else + typedef RangeSectionL2 RangeSectionTopLevel; + static constexpr uintptr_t mapLevels = 2; + static constexpr uintptr_t maxSetBit = 31; // This is 0 indexed + static constexpr uintptr_t bitsPerLevel = 8; +#endif + + BYTE _topLevelData[sizeof(RangeSectionTopLevel)]; + RangeSectionTopLevel &GetTopLevel() + { + return *(RangeSectionTopLevel*)&_topLevelData; + } + + RangeSection* _pCleanupList; + + static constexpr uintptr_t bitsAtLastLevel = maxSetBit - (bitsPerLevel * mapLevels) + 1; + static constexpr uintptr_t bytesAtLastLevel = (((uintptr_t)1) << bitsAtLastLevel); + + RangeSection* EndOfCleanupListMarker() { return (RangeSection*)1; } + + void* AllocateLevel() + { + size_t size = entriesPerMapLevel * sizeof(void*); + void *buf = malloc(size); + if (buf == NULL) + return NULL; + memset(buf, 0, size); + return buf; + } + + uintptr_t EffectiveBitsForLevel(TADDR address, uintptr_t level) + { + TADDR addressAsInt = address; + TADDR addressBitsUsedInMap = addressAsInt >> (maxSetBit + 1 - (mapLevels * bitsPerLevel)); + TADDR addressBitsShifted = addressBitsUsedInMap >> ((level - 1) * bitsPerLevel); + TADDR addressBitsUsedInLevel = (entriesPerMapLevel - 1) & addressBitsShifted; + return addressBitsUsedInLevel; + } + +#ifndef DACCESS_COMPILE + template + auto EnsureLevel(TADDR address, T* outerLevel, uintptr_t level, bool collectible) -> decltype(&((*outerLevel->VolatileLoad(NULL))[0])) + { + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; // This function may only be called while the lock is held at least at ReaderLocked + uintptr_t index = EffectiveBitsForLevel(address, level); + auto levelToGetPointerIn = outerLevel->VolatileLoad(&lockState); + + if (levelToGetPointerIn == NULL) + { + auto levelNew = static_castVolatileLoad(NULL))[0])>(AllocateLevel()); + if (levelNew == NULL) + return NULL; + + if (!outerLevel->Install(levelNew, collectible)) + { + // Handle race where another thread grew the table + levelToGetPointerIn = outerLevel->VolatileLoad(&lockState); + free(levelNew); + } + else + { + levelToGetPointerIn = levelNew; + } + assert(levelToGetPointerIn != NULL); + } + else if (!collectible && outerLevel->PointerIsCollectible()) + { + outerLevel->UpgradeToNonCollectible(); + } + + return &((*levelToGetPointerIn)[index]); + } + // Returns pointer to address in last level map that actually points at RangeSection space. + RangeSectionFragmentPointer* EnsureMapsForAddress(TADDR address, bool collectible) + { + uintptr_t level = mapLevels + 1; + uintptr_t topLevelIndex = EffectiveBitsForLevel(address, --level); + auto nextLevelAddress = &(GetTopLevel()[topLevelIndex]); +#ifdef TARGET_64BIT + auto rangeSectionL4 = nextLevelAddress; + auto rangeSectionL3 = EnsureLevel(address, rangeSectionL4, --level, collectible); + if (rangeSectionL3 == NULL) + return NULL; // Failure case + auto rangeSectionL2 = EnsureLevel(address, rangeSectionL3, --level, collectible); + if (rangeSectionL2 == NULL) + return NULL; // Failure case + auto rangeSectionL1 = EnsureLevel(address, rangeSectionL2, --level, collectible); + if (rangeSectionL1 == NULL) + return NULL; // Failure case +#else + auto rangeSectionL1 = nextLevelAddress; +#endif + auto result = EnsureLevel(address, rangeSectionL1, --level, collectible); + if (result == NULL) + return NULL; // Failure case + + return result; + } +#endif // DACCESS_COMPILE + + void* GetRangeSectionMapLevelForAddress(TADDR address, uintptr_t level, RangeSectionLockState *pLockState) + { + uintptr_t topLevelIndex = EffectiveBitsForLevel(address, mapLevels); + auto nextLevelAddress = &(GetTopLevel()[topLevelIndex]); +#ifdef TARGET_64BIT + if (level == 4) + return nextLevelAddress; + + auto rangeSectionL4 = nextLevelAddress->VolatileLoad(pLockState); + if (rangeSectionL4 == NULL) + return NULL; + auto rangeSectionL3Ptr = &((*rangeSectionL4)[EffectiveBitsForLevel(address, 4)]); + if (level == 3) + return rangeSectionL3Ptr; + + auto rangeSectionL3 = rangeSectionL3Ptr->VolatileLoadWithoutBarrier(pLockState); + if (rangeSectionL3 == NULL) + return NULL; + + auto rangeSectionL2Ptr = &((*rangeSectionL3)[EffectiveBitsForLevel(address, 3)]); + if (level == 2) + return rangeSectionL2Ptr; + + auto rangeSectionL2 = rangeSectionL2Ptr->VolatileLoadWithoutBarrier(pLockState); + if (rangeSectionL2 == NULL) + return NULL; + + auto rangeSectionL1Ptr = &((*rangeSectionL2)[EffectiveBitsForLevel(address, 2)]); + if (level == 1) + return rangeSectionL1Ptr; +#else + if (level == 1) + { + return nextLevelAddress; + } +#endif + assert(!"Unexpected level searched for"); + return NULL; + } + + PTR_RangeSectionFragment GetRangeSectionForAddress(TADDR address, RangeSectionLockState *pLockState) + { + uintptr_t topLevelIndex = EffectiveBitsForLevel(address, mapLevels); + auto nextLevelAddress = &(GetTopLevel()[topLevelIndex]); +#ifdef TARGET_64BIT + auto rangeSectionL4 = nextLevelAddress->VolatileLoad(pLockState); + if (rangeSectionL4 == NULL) + return NULL; + auto rangeSectionL3 = (*rangeSectionL4)[EffectiveBitsForLevel(address, 4)].VolatileLoadWithoutBarrier(pLockState); + if (rangeSectionL3 == NULL) + return NULL; + auto rangeSectionL2 = (*rangeSectionL3)[EffectiveBitsForLevel(address, 3)].VolatileLoadWithoutBarrier(pLockState); + if (rangeSectionL2 == NULL) + return NULL; + auto rangeSectionL1 = (*rangeSectionL2)[EffectiveBitsForLevel(address, 2)].VolatileLoadWithoutBarrier(pLockState); +#else + auto rangeSectionL1 = nextLevelAddress->VolatileLoad(pLockState); +#endif + if (rangeSectionL1 == NULL) + return NULL; + + return ((*rangeSectionL1)[EffectiveBitsForLevel(address, 1)]).VolatileLoadWithoutBarrier(pLockState); + } + + static uintptr_t RangeSectionFragmentCount(PTR_RangeSection pRangeSection) + { + uintptr_t rangeSize = pRangeSection->_range.RangeSize(); + if (rangeSize == 0) + return 0; + + // Account for the range not starting at the beginning of a last level fragment + rangeSize += pRangeSection->_range.RangeStart() & (bytesAtLastLevel - 1); + + uintptr_t fragmentCount = ((rangeSize - 1) / bytesAtLastLevel) + 1; + return fragmentCount; + } + + static TADDR IncrementAddressByMaxSizeOfFragment(TADDR input) + { + return input + bytesAtLastLevel; + } + +#ifndef DACCESS_COMPILE + bool AttachRangeSectionToMap(PTR_RangeSection pRangeSection, RangeSectionLockState *pLockState) + { + assert(*pLockState == RangeSectionLockState::ReaderLocked); // Must be locked so that the cannot fail case, can't fail. NOTE: This only needs the reader lock, as the attach process can happen in parallel to reads. + + // Currently all use of the RangeSection should be with aligned addresses, so validate that the start and end are at aligned boundaries + assert((pRangeSection->_range.RangeStart() & 0xF) == 0); + assert((pRangeSection->_range.RangeEnd() & 0xF) == 0xF); + assert((pRangeSection->_range.RangeEndOpen() & 0xF) == 0); + + uintptr_t rangeSectionFragmentCount = RangeSectionFragmentCount(pRangeSection); + size_t fragmentsSize = rangeSectionFragmentCount * sizeof(RangeSectionFragment); + void* fragmentsMemory = (RangeSectionFragment*)malloc(fragmentsSize); + if (fragmentsMemory == NULL) + { + return false; + } + memset(fragmentsMemory, 0, fragmentsSize); + + RangeSectionFragment* fragments = (RangeSectionFragment*)fragmentsMemory; + + + size_t entryUpdateSize = rangeSectionFragmentCount * sizeof(RangeSectionFragmentPointer*); + RangeSectionFragmentPointer** entriesInMapToUpdate = (RangeSectionFragmentPointer**)malloc(entryUpdateSize); + if (entriesInMapToUpdate == NULL) + { + free(fragments); + return false; + } + + memset(entriesInMapToUpdate, 0, entryUpdateSize); + + fragments[0].isPrimaryRangeSectionFragment = true; + + TADDR addressToPrepForUpdate = pRangeSection->_range.RangeStart(); + + // Assert that range is not already mapped in any way + assert(LookupRangeSection(addressToPrepForUpdate, pLockState) == NULL); + assert(LookupRangeSection(pRangeSection->_range.RangeEnd(), pLockState) == NULL); + for (TADDR fragmentAddress = addressToPrepForUpdate; pRangeSection->_range.IsInRange(fragmentAddress); fragmentAddress = IncrementAddressByMaxSizeOfFragment(fragmentAddress)) + { + assert(LookupRangeSection(fragmentAddress, pLockState) == NULL); + } + + bool collectible = !!(pRangeSection->_flags & RangeSection::RANGE_SECTION_COLLECTIBLE); + + for (uintptr_t iFragment = 0; iFragment < rangeSectionFragmentCount; iFragment++) + { + fragments[iFragment].pRangeSection = pRangeSection; + fragments[iFragment]._range = pRangeSection->_range; + fragments[iFragment].isCollectibleRangeSectionFragment = collectible; + RangeSectionFragmentPointer* entryInMapToUpdate = EnsureMapsForAddress(addressToPrepForUpdate, collectible); + if (entryInMapToUpdate == NULL) + { + free(fragments); + free(entriesInMapToUpdate); + return false; + } + + entriesInMapToUpdate[iFragment] = entryInMapToUpdate; + addressToPrepForUpdate = IncrementAddressByMaxSizeOfFragment(addressToPrepForUpdate); + } + + // At this point all the needed memory is allocated, and it is no longer possible to fail. + for (uintptr_t iFragment = 0; iFragment < rangeSectionFragmentCount; iFragment++) + { + RangeSectionFragmentPointer* pFragmentPointerToUpdate = entriesInMapToUpdate[iFragment]; + do + { + RangeSectionFragment* initialFragmentInMap = pFragmentPointerToUpdate->VolatileLoadWithoutBarrier(pLockState); + + // When inserting collectible elements into the range section map, ALWAYS put them after any non-collectible + // fragments. This is so that when looking up ReadyToRun data, we never will need to take the ReaderLock for real. + while (initialFragmentInMap != NULL && collectible && !initialFragmentInMap->isCollectibleRangeSectionFragment) + { + pFragmentPointerToUpdate = &initialFragmentInMap->pRangeSectionFragmentNext; + initialFragmentInMap = pFragmentPointerToUpdate->VolatileLoadWithoutBarrier(pLockState); + } + + fragments[iFragment].pRangeSectionFragmentNext.VolatileStore(initialFragmentInMap); + if (pFragmentPointerToUpdate->AtomicReplace(&(fragments[iFragment]), initialFragmentInMap)) + break; + } while (true); + } + + // Assert that range is now found via lookup + assert(LookupRangeSection(pRangeSection->_range.RangeStart(), pLockState) == pRangeSection); + assert(LookupRangeSection(pRangeSection->_range.RangeEnd(), pLockState) == pRangeSection); + for (TADDR fragmentAddress = pRangeSection->_range.RangeStart(); pRangeSection->_range.IsInRange(fragmentAddress); fragmentAddress = IncrementAddressByMaxSizeOfFragment(fragmentAddress)) + { + assert(LookupRangeSection(fragmentAddress, pLockState) == pRangeSection); + } + + // entriesInMapToUpdate was just a temporary allocation + free(entriesInMapToUpdate); + + return true; + } +#endif // DACCESS_COMPILE + + +public: + RangeSectionMap() : _pCleanupList(EndOfCleanupListMarker()) + { + memset(&_topLevelData, 0, sizeof(_topLevelData)); + } + +#ifndef DACCESS_COMPILE + +#ifdef FEATURE_READYTORUN + RangeSection *AllocateRange(Range range, IJitManager* pJit, RangeSection::RangeSectionFlags flags, PTR_Module pR2RModule, RangeSectionLockState* pLockState) + { + PTR_RangeSection pSection(new(nothrow)RangeSection(range, pJit, flags, pR2RModule)); + if (pSection == NULL) + return NULL; + + if (!AttachRangeSectionToMap(pSection, pLockState)) + { + delete pSection; + return NULL; + } + return pSection; + } +#endif + + RangeSection *AllocateRange(Range range, IJitManager* pJit, RangeSection::RangeSectionFlags flags, PTR_HeapList pHeapList, RangeSectionLockState* pLockState) + { + PTR_RangeSection pSection(new(nothrow)RangeSection(range, pJit, flags, pHeapList)); + if (pSection == NULL) + return NULL; + + if (!AttachRangeSectionToMap(pSection, pLockState)) + { + delete pSection; + return NULL; + } + return pSection; + } + + RangeSection *AllocateRange(Range range, IJitManager* pJit, RangeSection::RangeSectionFlags flags, PTR_CodeRangeMapRangeList pRangeList, RangeSectionLockState* pLockState) + { + PTR_RangeSection pSection(new(nothrow)RangeSection(range, pJit, flags, pRangeList)); + if (pSection == NULL) + return NULL; + + if (!AttachRangeSectionToMap(pSection, pLockState)) + { + delete pSection; + return NULL; + } + return pSection; + } +#endif // DACCESS_COMPILE + + PTR_RangeSection LookupRangeSection(TADDR address, RangeSectionLockState *pLockState) + { + PTR_RangeSectionFragment fragment = GetRangeSectionForAddress(address, pLockState); + if (fragment == NULL) + return NULL; + + while ((fragment != NULL) && !fragment->InRange(address)) + { + fragment = fragment->pRangeSectionFragmentNext.VolatileLoadWithoutBarrier(pLockState); + } + + if (fragment != NULL) + { + if (fragment->pRangeSection->_pRangeSectionNextForDelete != NULL) + return NULL; + return fragment->pRangeSection; + } + + return NULL; + } + +#ifndef DACCESS_COMPILE + void RemoveRangeSection(RangeSection* pRangeSection) + { + assert(pRangeSection->_pRangeSectionNextForDelete == NULL); + assert(pRangeSection->_flags & RangeSection::RANGE_SECTION_COLLECTIBLE); +#ifdef FEATURE_READYTORUN + assert(pRangeSection->_pR2RModule == NULL); +#endif + + // Removal is implemented by placing onto the cleanup linked list. This is then processed later during cleanup + RangeSection* pLatestRemovedRangeSection; + do + { + pLatestRemovedRangeSection = VolatileLoad(&_pCleanupList); + VolatileStore(&pRangeSection->_pRangeSectionNextForDelete, pLatestRemovedRangeSection); + } while (InterlockedCompareExchangeT(&_pCleanupList, pRangeSection, pLatestRemovedRangeSection) != pLatestRemovedRangeSection); + } + + void CleanupRangeSections(RangeSectionLockState *pLockState) + { + assert(*pLockState == RangeSectionLockState::WriteLocked); + + while (this->_pCleanupList != EndOfCleanupListMarker()) + { + PTR_RangeSection pRangeSectionToCleanup(this->_pCleanupList); + RangeSectionFragment* pRangeSectionFragmentToFree = NULL; + this->_pCleanupList = pRangeSectionToCleanup->_pRangeSectionNextForDelete; + + uintptr_t rangeSectionFragmentCount = RangeSectionFragmentCount(pRangeSectionToCleanup); + + TADDR addressToPrepForCleanup = pRangeSectionToCleanup->_range.RangeStart(); + + assert(LookupRangeSection(addressToPrepForCleanup, pLockState) == NULL); + assert(LookupRangeSection(pRangeSectionToCleanup->_range.RangeEnd(), pLockState) == NULL); + for (TADDR fragmentAddress = addressToPrepForCleanup; pRangeSectionToCleanup->_range.IsInRange(fragmentAddress); fragmentAddress = IncrementAddressByMaxSizeOfFragment(fragmentAddress)) + { + assert(LookupRangeSection(fragmentAddress, pLockState) == NULL); + } + + // Remove fragments from each of the fragment linked lists + for (uintptr_t iFragment = 0; iFragment < rangeSectionFragmentCount; iFragment++) + { + RangeSectionFragmentPointer* entryInMapToUpdate = EnsureMapsForAddress(addressToPrepForCleanup, true /* collectible */); + assert(entryInMapToUpdate != NULL); + +#ifdef _DEBUG + bool seenCollectibleRangeList = false; +#endif + while ((entryInMapToUpdate->VolatileLoadWithoutBarrier(pLockState))->pRangeSection != pRangeSectionToCleanup) + { +#ifdef _DEBUG + if (entryInMapToUpdate->VolatileLoadWithoutBarrier(pLockState)->isCollectibleRangeSectionFragment) + { + seenCollectibleRangeList = true; + } + else + { + // Since the fragment linked lists are sorted such that the collectible ones are always after the non-collectible ones, this should never happen. + assert(!seenCollectibleRangeList); + } +#endif + entryInMapToUpdate = &(entryInMapToUpdate->VolatileLoadWithoutBarrier(pLockState))->pRangeSectionFragmentNext; + } + + RangeSectionFragment* fragment = entryInMapToUpdate->VolatileLoadWithoutBarrier(pLockState); + + // The fragment associated with the start of the range has the address that was allocated earlier + if (iFragment == 0) + { + pRangeSectionFragmentToFree = fragment; + assert(pRangeSectionFragmentToFree->isPrimaryRangeSectionFragment); + } + + auto fragmentThatRemains = fragment->pRangeSectionFragmentNext.VolatileLoadWithoutBarrier(pLockState); + entryInMapToUpdate->VolatileStore(fragmentThatRemains); + + // Now determine if we need to actually free portions of the map structure + if (fragmentThatRemains == NULL) + { + for (uintptr_t level = 1; level < mapLevels; level++) + { + // Note that the type here is actually not necessarily correct, but its close enough + auto pointerToLevelData = (RangeSectionLevelPointer*)GetRangeSectionMapLevelForAddress(addressToPrepForCleanup, level, pLockState); + if (pointerToLevelData == NULL) + break; + auto &rawData = *pointerToLevelData->VolatileLoad(pLockState); + bool foundMeaningfulValue = false; + + for (uintptr_t i = 0; i < entriesPerMapLevel; i++) + { + if (!rawData[i].IsNull()) + { + foundMeaningfulValue = true; + break; + } + } + + if (foundMeaningfulValue) + break; + + // This level is completely empty. Free it, and then null out the pointer to it. + pointerToLevelData->Uninstall(); + free((void*)rawData); + } + } + + addressToPrepForCleanup = IncrementAddressByMaxSizeOfFragment(addressToPrepForCleanup); + } + + // Free the array of fragments + delete pRangeSectionToCleanup; + free(pRangeSectionFragmentToFree); + } + } +#endif // DACCESS_COMPILE + +#ifdef DACCESS_COMPILE + void EnumMemoryRangeSectionMapLevel(CLRDataEnumMemoryFlags flags, RangeSectionFragmentPointer& fragmentPointer, RangeSectionLockState* pLockState) + { + PTR_RangeSectionFragment fragment = fragmentPointer.VolatileLoadWithoutBarrier(pLockState); + while (fragment != NULL) + { + if (!DacEnumMemoryRegion(dac_cast(fragment), sizeof(RangeSectionFragment))) + return; + + fragment->pRangeSection->EnumMemoryRegions(flags); + + fragment = fragment->pRangeSectionFragmentNext.VolatileLoadWithoutBarrier(pLockState); + } + } + + void EnumMemoryRangeSectionMapLevel(CLRDataEnumMemoryFlags flags, RangeSectionL1& level, RangeSectionLockState* pLockState) + { + if (!DacEnumMemoryRegion(dac_cast(&level), sizeof(level))) + return; + + for (uintptr_t i = 0; i < entriesPerMapLevel; i++) + { + if (!level[i].IsNull()) + { + EnumMemoryRangeSectionMapLevel(flags, level[i], pLockState); + } + } + } + + template + void EnumMemoryRangeSectionMapLevel(CLRDataEnumMemoryFlags flags, T& level, RangeSectionLockState* pLockState) + { + if (!DacEnumMemoryRegion(dac_cast(&level), sizeof(level))) + return; + + for (uintptr_t i = 0; i < entriesPerMapLevel; i++) + { + if (level[i].IsNull()) + { + EnumMemoryRangeSectionMapLevel(flags, *level[i].VolatileLoad(pLockState), pLockState); + } + } + } + + void EnumMemoryRegions(CLRDataEnumMemoryFlags flags) + { + if (!DacEnumMemoryRegion(dac_cast(this), sizeof(*this))) + return; + + // Always assume we are locked when enumerating + RangeSectionLockState lockState = RangeSectionLockState::ReaderLocked; + EnumMemoryRangeSectionMapLevel(flags, GetTopLevel(), &lockState); + } +#endif// DACCESS_COMPILE + +}; + +struct RangeSectionMapData +{ + BYTE Data[sizeof(RangeSectionMap)]; }; /*****************************************************************************/ @@ -1239,7 +2121,7 @@ class ExecutionManager } CONTRACTL_END; RangeSection * pRange = FindCodeRange(currentPC, GetScanFlags()); - return (pRange != NULL) ? pRange->pjit : NULL; + return (pRange != NULL) ? pRange->_pjit : NULL; } static RangeSection * FindCodeRange(PCODE currentPC, ScanFlag scanFlag); @@ -1284,11 +2166,17 @@ class ExecutionManager static void AddCodeRange(TADDR StartRange, TADDR EndRange, IJitManager* pJit, RangeSection::RangeSectionFlags flags, - void * pHp); + PTR_CodeRangeMapRangeList pRangeList); - static void AddNativeImageRange(TADDR StartRange, - SIZE_T Size, - Module * pModule); + static void AddCodeRange(TADDR StartRange, TADDR EndRange, + IJitManager* pJit, + RangeSection::RangeSectionFlags flags, + PTR_HeapList pHp); + + static void AddCodeRange(TADDR StartRange, TADDR EndRange, + IJitManager* pJit, + RangeSection::RangeSectionFlags flags, + PTR_Module pModule); static void DeleteRange(TADDR StartRange); @@ -1300,17 +2188,12 @@ class ExecutionManager return (ICodeManager *)m_pDefaultCodeMan; } - static PTR_Module FindZapModule(TADDR currentData); static PTR_Module FindReadyToRunModule(TADDR currentData); - // FindZapModule flavor to be used during GC to find GCRefMap + // FindReadyToRunModule flavor to be used during GC to find GCRefMap static PTR_Module FindModuleForGCRefMap(TADDR currentData); - static RangeSection* GetRangeSectionAndPrev(RangeSection *pRS, TADDR addr, RangeSection **ppPrev); - #ifdef DACCESS_COMPILE - static void EnumRangeList(RangeSection* list, - CLRDataEnumMemoryFlags flags); static void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); #endif @@ -1327,9 +2210,9 @@ class ExecutionManager static RangeSection * FindCodeRangeWithLock(PCODE currentPC); static BOOL IsManagedCodeWithLock(PCODE currentPC); - static BOOL IsManagedCodeWorker(PCODE currentPC); + static BOOL IsManagedCodeWorker(PCODE currentPC, RangeSectionLockState *pLockState); - static RangeSection* GetRangeSection(TADDR addr); + static RangeSection* GetRangeSection(TADDR addr, RangeSectionLockState *pLockState); SPTR_DECL(EECodeManager, m_pDefaultCodeMan); @@ -1341,9 +2224,18 @@ class ExecutionManager static CrstStatic m_JumpStubCrst; static CrstStatic m_RangeCrst; // Acquire before writing into m_CodeRangeList and m_DataRangeList + // Make the CodeRangeMap a global, initialized as the process starts up. + // The odd formulation of a BYTE array is used to avoid an extra memory indirection + // that would be needed if the memory for the CodeRangeMap was dynamically allocated. + SVAL_DECL(RangeSectionMapData, g_codeRangeMap); + static PTR_RangeSectionMap GetCodeRangeMap() + { + TADDR codeRangeMapAddress = dac_cast(&(g_codeRangeMap)); + return dac_cast(codeRangeMapAddress); + } + // infrastructure to manage readers so we can lock them out and delete domain data // make ReaderCount volatile because we have order dependency in READER_INCREMENT - VOLATILE_SPTR_DECL(RangeSection, m_CodeRangeList); VOLATILE_SVAL_DECL(LONG, m_dwReaderCount); VOLATILE_SVAL_DECL(LONG, m_dwWriterLock); @@ -1372,14 +2264,6 @@ class ExecutionManager } #endif // defined(_DEBUG) - static void AddRangeHelper(TADDR StartRange, - TADDR EndRange, - IJitManager* pJit, - RangeSection::RangeSectionFlags flags, - TADDR pHeapListOrZapModule); - static void DeleteRangeHelper(RangeSection** ppRangeList, - TADDR StartRange); - #ifndef DACCESS_COMPILE static PCODE getNextJumpStub(MethodDesc* pMD, PCODE target, diff --git a/src/coreclr/vm/codeman.inl b/src/coreclr/vm/codeman.inl index da36c9fa142639..8af0fc0e48bfbd 100644 --- a/src/coreclr/vm/codeman.inl +++ b/src/coreclr/vm/codeman.inl @@ -6,10 +6,10 @@ inline BOOL ExecutionManager::IsCollectibleMethod(const METHODTOKEN& MethodToken) { WRAPPER_NO_CONTRACT; - return MethodToken.m_pRangeSection->flags & RangeSection::RANGE_SECTION_COLLECTIBLE; + return MethodToken.m_pRangeSection->_flags & RangeSection::RANGE_SECTION_COLLECTIBLE; } inline TADDR IJitManager::JitTokenToModuleBase(const METHODTOKEN& MethodToken) { - return MethodToken.m_pRangeSection->LowAddress; + return MethodToken.m_pRangeSection->_range.RangeStart(); } diff --git a/src/coreclr/vm/comdelegate.cpp b/src/coreclr/vm/comdelegate.cpp index aec64968e63703..32b5af3d0238f7 100644 --- a/src/coreclr/vm/comdelegate.cpp +++ b/src/coreclr/vm/comdelegate.cpp @@ -1568,6 +1568,25 @@ FCIMPLEND extern "C" void * _ReturnAddress(void); #endif // _MSC_VER && !TARGET_UNIX +uint32_t MethodDescToNumFixedArgs(MethodDesc *pMD) +{ + WRAPPER_NO_CONTRACT; + + SigParser sig = pMD->GetSigParser(); + + uint32_t data; + IfFailThrow(sig.GetCallingConvInfo(&data)); + if (data & IMAGE_CEE_CS_CALLCONV_GENERIC) + { + // Skip over generic argument count + IfFailThrow(sig.GetData(&data)); + } + + // Return argument count + IfFailThrow(sig.GetData(&data)); + return data; +} + // This is the single constructor for all Delegates. The compiler // doesn't provide an implementation of the Delegate constructor. We // provide that implementation through an ECall call to this method. @@ -1635,10 +1654,8 @@ FCIMPL3(void, COMDelegate::DelegateConstruct, Object* refThisUNSAFE, Object* tar DelegateEEClass *pDelCls = (DelegateEEClass*)pDelMT->GetClass(); MethodDesc *pDelegateInvoke = COMDelegate::FindDelegateInvokeMethod(pDelMT); - MetaSig invokeSig(pDelegateInvoke); - MetaSig methodSig(pMeth); - UINT invokeArgCount = invokeSig.NumFixedArgs(); - UINT methodArgCount = methodSig.NumFixedArgs(); + UINT invokeArgCount = MethodDescToNumFixedArgs(pDelegateInvoke); + UINT methodArgCount = MethodDescToNumFixedArgs(pMeth); BOOL isStatic = pMeth->IsStatic(); if (!isStatic) { diff --git a/src/coreclr/vm/common.h b/src/coreclr/vm/common.h index 7e5d8300f621c0..6ebe0205951b88 100644 --- a/src/coreclr/vm/common.h +++ b/src/coreclr/vm/common.h @@ -117,6 +117,7 @@ typedef DPTR(class EEClass) PTR_EEClass; typedef DPTR(class DelegateEEClass) PTR_DelegateEEClass; typedef DPTR(struct DomainLocalModule) PTR_DomainLocalModule; typedef VPTR(class EECodeManager) PTR_EECodeManager; +typedef DPTR(class RangeSectionMap) PTR_RangeSectionMap; typedef DPTR(class EEConfig) PTR_EEConfig; typedef VPTR(class EEDbgInterfaceImpl) PTR_EEDbgInterfaceImpl; typedef VPTR(class DebugInfoManager) PTR_DebugInfoManager; diff --git a/src/coreclr/vm/gccover.cpp b/src/coreclr/vm/gccover.cpp index 71a49b63be540a..248c37350d167f 100644 --- a/src/coreclr/vm/gccover.cpp +++ b/src/coreclr/vm/gccover.cpp @@ -61,18 +61,20 @@ static MethodDesc* getTargetMethodDesc(PCODE target) _ASSERTE(token.IsValid()); return VirtualCallStubManager::GetInterfaceMethodDescFromToken(token); } - if (RangeSectionStubManager::GetStubKind(target) == STUB_CODE_BLOCK_PRECODE) + + auto stubKind = RangeSectionStubManager::GetStubKind(target); + if (stubKind == STUB_CODE_BLOCK_PRECODE) { // The address looks like a value stub, try to get the method descriptor. return MethodDesc::GetMethodDescFromStubAddr(target, TRUE); } - if (PrecodeStubManager::g_pManager->GetStubPrecodeRangeList()->IsInRange(target)) + if (stubKind == STUB_CODE_BLOCK_STUBPRECODE) { return (MethodDesc*)((StubPrecode*)PCODEToPINSTR(target))->GetMethodDesc(); } - if (PrecodeStubManager::g_pManager->GetFixupPrecodeRangeList()->IsInRange(target)) + if (stubKind == STUB_CODE_BLOCK_FIXUPPRECODE) { if (!FixupPrecode::IsFixupPrecodeByASM(target)) { diff --git a/src/coreclr/vm/genericdict.cpp b/src/coreclr/vm/genericdict.cpp index db18a4044f5674..52645f0d92cf50 100644 --- a/src/coreclr/vm/genericdict.cpp +++ b/src/coreclr/vm/genericdict.cpp @@ -690,10 +690,7 @@ Dictionary::PopulateEntry( ptr = SigPointer((PCCOR_SIGNATURE)signature); IfFailThrow(ptr.GetData(&kind)); - Module * pContainingZapModule = ExecutionManager::FindZapModule(dac_cast(signature)); - - zapSigContext = ZapSig::Context(CoreLibBinder::GetModule(), (void *)pContainingZapModule, ZapSig::NormalTokens); - pZapSigContext = (pContainingZapModule != NULL) ? &zapSigContext : NULL; + pZapSigContext = NULL; } ModuleBase * pLookupModule = (isReadyToRunModule) ? pZapSigContext->pInfoModule : CoreLibBinder::GetModule(); diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index cd0161a4aec634..e588f2893d5590 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -14346,10 +14346,10 @@ void EECodeInfo::Init(PCODE codeAddress, ExecutionManager::ScanFlag scanFlag) if (pRS == NULL) goto Invalid; - if (!pRS->pjit->JitCodeToMethodInfo(pRS, codeAddress, &m_pMD, this)) + if (!pRS->_pjit->JitCodeToMethodInfo(pRS, codeAddress, &m_pMD, this)) goto Invalid; - m_pJM = pRS->pjit; + m_pJM = pRS->_pjit; return; Invalid: diff --git a/src/coreclr/vm/loaderallocator.cpp b/src/coreclr/vm/loaderallocator.cpp index f1ba447b34c952..b49a100d105f49 100644 --- a/src/coreclr/vm/loaderallocator.cpp +++ b/src/coreclr/vm/loaderallocator.cpp @@ -17,7 +17,9 @@ UINT64 LoaderAllocator::cLoaderAllocatorsCreated = 1; -LoaderAllocator::LoaderAllocator() +LoaderAllocator::LoaderAllocator(bool collectible) : + m_stubPrecodeRangeList(STUB_CODE_BLOCK_STUBPRECODE, collectible), + m_fixupPrecodeRangeList(STUB_CODE_BLOCK_FIXUPPRECODE, collectible) { LIMITED_METHOD_CONTRACT; @@ -66,7 +68,7 @@ LoaderAllocator::LoaderAllocator() m_pLastUsedCodeHeap = NULL; m_pLastUsedDynamicCodeHeap = NULL; m_pJumpStubCache = NULL; - m_IsCollectible = false; + m_IsCollectible = collectible; m_pMarshalingData = NULL; @@ -1194,7 +1196,7 @@ void LoaderAllocator::Init(BaseDomain *pDomain, BYTE *pExecutableHeapMemory) m_pNewStubPrecodeHeap = new (&m_NewStubPrecodeHeapInstance) LoaderHeap(2 * GetOsPageSize(), 2 * GetOsPageSize(), - PrecodeStubManager::g_pManager->GetStubPrecodeRangeList(), + &m_stubPrecodeRangeList, UnlockedLoaderHeap::HeapKind::Interleaved, false /* fUnlocked */, StubPrecode::GenerateCodePage, @@ -1202,7 +1204,7 @@ void LoaderAllocator::Init(BaseDomain *pDomain, BYTE *pExecutableHeapMemory) m_pFixupPrecodeHeap = new (&m_FixupPrecodeHeapInstance) LoaderHeap(2 * GetOsPageSize(), 2 * GetOsPageSize(), - PrecodeStubManager::g_pManager->GetFixupPrecodeRangeList(), + &m_fixupPrecodeRangeList, UnlockedLoaderHeap::HeapKind::Interleaved, false /* fUnlocked */, FixupPrecode::GenerateCodePage, @@ -1687,17 +1689,6 @@ void DomainAssemblyIterator::operator++() pNextAssembly = pCurrentAssembly ? pCurrentAssembly->GetNextDomainAssemblyInSameALC() : NULL; } -void AssemblyLoaderAllocator::SetCollectible() -{ - CONTRACTL - { - NOTHROW; - } - CONTRACTL_END; - - m_IsCollectible = true; -} - #ifndef DACCESS_COMPILE void AssemblyLoaderAllocator::Init(AppDomain* pAppDomain) diff --git a/src/coreclr/vm/loaderallocator.hpp b/src/coreclr/vm/loaderallocator.hpp index c38e495087218f..365063b0b5220c 100644 --- a/src/coreclr/vm/loaderallocator.hpp +++ b/src/coreclr/vm/loaderallocator.hpp @@ -39,6 +39,150 @@ typedef SHash> LoaderAllocatorSet; class CustomAssemblyBinder; + +// This implements the Add/Remove rangelist api on top of the CodeRangeMap in the code manager +class CodeRangeMapRangeList : public RangeList +{ +public: + VPTR_VTABLE_CLASS(CodeRangeMapRangeList, RangeList) + +#if defined(DACCESS_COMPILE) || !defined(TARGET_WINDOWS) + CodeRangeMapRangeList() : + _RangeListRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT), + _rangeListType(STUB_CODE_BLOCK_UNKNOWN), + _id(NULL), + _collectible(true) + {} +#endif + + CodeRangeMapRangeList(StubCodeBlockKind rangeListType, bool collectible) : + _RangeListRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT), + _rangeListType(rangeListType), + _id(NULL), + _collectible(collectible) + { + LIMITED_METHOD_CONTRACT; + } + + ~CodeRangeMapRangeList() + { + LIMITED_METHOD_CONTRACT; + RemoveRangesWorker(_id, NULL, NULL); + } + + StubCodeBlockKind GetCodeBlockKind() + { + LIMITED_METHOD_CONTRACT; + return _rangeListType; + } + +private: +#ifndef DACCESS_COMPILE + void AddRangeWorkerHelper(TADDR start, TADDR end, void* id) + { + SimpleWriteLockHolder lh(&_RangeListRWLock); + + _ASSERTE(id == _id || _id == NULL); + _id = id; + // Grow the array first, so that a failure cannot break the + + RangeSection::RangeSectionFlags flags = RangeSection::RANGE_SECTION_RANGELIST; + if (_collectible) + { + _starts.Preallocate(_starts.GetCount() + 1); + flags = (RangeSection::RangeSectionFlags)(flags | RangeSection::RANGE_SECTION_COLLECTIBLE); + } + + ExecutionManager::AddCodeRange(start, end, ExecutionManager::GetEEJitManager(), flags, this); + + if (_collectible) + { + // This cannot fail as the array was Preallocated above. + _starts.Append(start); + } + } +#endif + +protected: + virtual BOOL AddRangeWorker(const BYTE *start, const BYTE *end, void *id) + { + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + } + CONTRACTL_END; + +#ifndef DACCESS_COMPILE + BOOL result = FALSE; + + EX_TRY + { + AddRangeWorkerHelper((TADDR)start, (TADDR)end, id); + result = TRUE; + } + EX_CATCH + { + } + EX_END_CATCH(SwallowAllExceptions) + + return result; +#else + return FALSE; +#endif // DACCESS_COMPILE + } + + virtual void RemoveRangesWorker(void *id, const BYTE *start, const BYTE *end) + { + CONTRACTL + { + NOTHROW; + GC_NOTRIGGER; + } + CONTRACTL_END; + +#ifndef DACCESS_COMPILE + // This implementation only works for the case where the RangeList is used in a single LoaderHeap + _ASSERTE(start == NULL); + _ASSERTE(end == NULL); + + SimpleWriteLockHolder lh(&_RangeListRWLock); + _ASSERTE(id == _id || (_id == NULL && _starts.IsEmpty())); + + // Iterate backwards to improve efficiency of removals + // as any linked lists in the RangeSectionMap code are in reverse order of insertion. + for (auto i = _starts.GetCount(); i > 0;) + { + --i; + if (_starts[i] != 0) + { + ExecutionManager::DeleteRange(_starts[i]); + _starts[i] = 0; + } + } +#endif // DACCESS_COMPILE + } + + virtual BOOL IsInRangeWorker(TADDR address, TADDR *pID = NULL) + { + WRAPPER_NO_CONTRACT; + RangeSection *pRS = ExecutionManager::FindCodeRange(address, ExecutionManager::ScanReaderLock); + if (pRS == NULL) + return FALSE; + if ((pRS->_flags & RangeSection::RANGE_SECTION_RANGELIST) == 0) + return FALSE; + + return (pRS->_pRangeList == this); + } + +private: + SimpleRWLock _RangeListRWLock; + StubCodeBlockKind _rangeListType; + SArray _starts; + void* _id; + bool _collectible; +}; + // Iterator over a DomainAssembly in the same ALC class DomainAssemblyIterator { @@ -197,6 +341,9 @@ class LoaderAllocator // IL stub cache with fabricated MethodTable parented by a random module in this LoaderAllocator. ILStubCache m_ILStubCache; + CodeRangeMapRangeList m_stubPrecodeRangeList; + CodeRangeMapRangeList m_fixupPrecodeRangeList; + #ifdef FEATURE_PGO // PgoManager to hold pgo data associated with this LoaderAllocator Volatile m_pgoManager; @@ -555,7 +702,7 @@ class LoaderAllocator OBJECTREF GetHandleValue(LOADERHANDLE handle); - LoaderAllocator(); + LoaderAllocator(bool collectible); virtual ~LoaderAllocator(); BaseDomain *GetDomain() { LIMITED_METHOD_CONTRACT; return m_pDomain; } virtual BOOL CanUnload() = 0; @@ -708,7 +855,7 @@ class GlobalLoaderAllocator : public LoaderAllocator public: void Init(BaseDomain *pDomain); - GlobalLoaderAllocator() : m_Id(LAT_Global, (void*)1) { LIMITED_METHOD_CONTRACT;}; + GlobalLoaderAllocator() : LoaderAllocator(false), m_Id(LAT_Global, (void*)1) { LIMITED_METHOD_CONTRACT;}; virtual LoaderAllocatorID* Id(); virtual BOOL CanUnload(); }; @@ -728,7 +875,7 @@ class AssemblyLoaderAllocator : public LoaderAllocator ShuffleThunkCache* m_pShuffleThunkCache; public: virtual LoaderAllocatorID* Id(); - AssemblyLoaderAllocator() : m_Id(LAT_Assembly), m_pShuffleThunkCache(NULL) + AssemblyLoaderAllocator() : LoaderAllocator(true), m_Id(LAT_Assembly), m_pShuffleThunkCache(NULL) #if !defined(DACCESS_COMPILE) , m_binderToRelease(NULL) #endif @@ -736,8 +883,6 @@ class AssemblyLoaderAllocator : public LoaderAllocator void Init(AppDomain *pAppDomain); virtual BOOL CanUnload(); - void SetCollectible(); - void AddDomainAssembly(DomainAssembly *pDomainAssembly) { WRAPPER_NO_CONTRACT; diff --git a/src/coreclr/vm/method.cpp b/src/coreclr/vm/method.cpp index be4d0de7ec78b2..d6f96bfee7febb 100644 --- a/src/coreclr/vm/method.cpp +++ b/src/coreclr/vm/method.cpp @@ -2108,17 +2108,6 @@ MethodDesc* NonVirtualEntry2MethodDesc(PCODE entryPoint) RangeSection* pRS = ExecutionManager::FindCodeRange(entryPoint, ExecutionManager::GetScanFlags()); if (pRS == NULL) { - TADDR pInstr = PCODEToPINSTR(entryPoint); - if (PrecodeStubManager::g_pManager->GetStubPrecodeRangeList()->IsInRange(entryPoint)) - { - return (MethodDesc*)((StubPrecode*)pInstr)->GetMethodDesc(); - } - - if (PrecodeStubManager::g_pManager->GetFixupPrecodeRangeList()->IsInRange(entryPoint)) - { - return (MethodDesc*)((FixupPrecode*)pInstr)->GetMethodDesc(); - } - // Is it an FCALL? MethodDesc* pFCallMD = ECall::MapTargetBackToMethod(entryPoint); if (pFCallMD != NULL) @@ -2129,16 +2118,38 @@ MethodDesc* NonVirtualEntry2MethodDesc(PCODE entryPoint) return NULL; } + // Inlined fast path for fixup precode and stub precode from RangeList implementation + if (pRS->_flags == RangeSection::RANGE_SECTION_RANGELIST) + { + if (pRS->_pRangeList->GetCodeBlockKind() == STUB_CODE_BLOCK_FIXUPPRECODE) + { + return (MethodDesc*)((FixupPrecode*)PCODEToPINSTR(entryPoint))->GetMethodDesc(); + } + if (pRS->_pRangeList->GetCodeBlockKind() == STUB_CODE_BLOCK_STUBPRECODE) + { + return (MethodDesc*)((StubPrecode*)PCODEToPINSTR(entryPoint))->GetMethodDesc(); + } + } + MethodDesc* pMD; - if (pRS->pjit->JitCodeToMethodInfo(pRS, entryPoint, &pMD, NULL)) + if (pRS->_pjit->JitCodeToMethodInfo(pRS, entryPoint, &pMD, NULL)) return pMD; - if (pRS->pjit->GetStubCodeBlockKind(pRS, entryPoint) == STUB_CODE_BLOCK_PRECODE) - return MethodDesc::GetMethodDescFromStubAddr(entryPoint); + auto stubCodeBlockKind = pRS->_pjit->GetStubCodeBlockKind(pRS, entryPoint); - // We should never get here - _ASSERTE(!"NonVirtualEntry2MethodDesc failed for RangeSection"); - return NULL; + switch(stubCodeBlockKind) + { + case STUB_CODE_BLOCK_PRECODE: + return MethodDesc::GetMethodDescFromStubAddr(entryPoint); + case STUB_CODE_BLOCK_FIXUPPRECODE: + return (MethodDesc*)((FixupPrecode*)PCODEToPINSTR(entryPoint))->GetMethodDesc(); + case STUB_CODE_BLOCK_STUBPRECODE: + return (MethodDesc*)((StubPrecode*)PCODEToPINSTR(entryPoint))->GetMethodDesc(); + default: + // We should never get here + _ASSERTE(!"NonVirtualEntry2MethodDesc failed for RangeSection"); + return NULL; + } } //******************************************************************************* diff --git a/src/coreclr/vm/prestub.cpp b/src/coreclr/vm/prestub.cpp index 4b4373ac40e818..c8a95d77a18cd9 100644 --- a/src/coreclr/vm/prestub.cpp +++ b/src/coreclr/vm/prestub.cpp @@ -2399,12 +2399,6 @@ EXTERN_C PCODE STDCALL ExternalMethodFixupWorker(TransitionBlock * pTransitionBl } #endif - // FUTURE: Consider always passing in module and section index to avoid the lookups - if (pModule == NULL) - { - pModule = ExecutionManager::FindZapModule(pIndirection); - sectionIndex = (DWORD)-1; - } _ASSERTE(pModule != NULL); pEMFrame->SetCallSite(pModule, pIndirection); diff --git a/src/coreclr/vm/stubmgr.cpp b/src/coreclr/vm/stubmgr.cpp index 7376386b34079a..423465536c627c 100644 --- a/src/coreclr/vm/stubmgr.cpp +++ b/src/coreclr/vm/stubmgr.cpp @@ -1003,7 +1003,8 @@ BOOL PrecodeStubManager::CheckIsStub_Internal(PCODE stubStartAddress) } CONTRACTL_END; - return GetStubPrecodeRangeList()->IsInRange(stubStartAddress) || GetFixupPrecodeRangeList()->IsInRange(stubStartAddress); + auto stubKind = RangeSectionStubManager::GetStubKind(stubStartAddress); + return (stubKind == STUB_CODE_BLOCK_FIXUPPRECODE) || (stubKind == STUB_CODE_BLOCK_STUBPRECODE); } BOOL PrecodeStubManager::DoTraceStub(PCODE stubStartAddress, @@ -1551,7 +1552,7 @@ RangeSectionStubManager::GetStubKind(PCODE stubStartAddress) if (pRS == NULL) return STUB_CODE_BLOCK_UNKNOWN; - return pRS->pjit->GetStubCodeBlockKind(pRS, stubStartAddress); + return pRS->_pjit->GetStubCodeBlockKind(pRS, stubStartAddress); } // @@ -2382,8 +2383,6 @@ PrecodeStubManager::DoEnumMemoryRegions(CLRDataEnumMemoryFlags flags) WRAPPER_NO_CONTRACT; DAC_ENUM_VTHIS(); EMEM_OUT(("MEM: %p PrecodeStubManager\n", dac_cast(this))); - GetStubPrecodeRangeList()->EnumMemoryRegions(flags); - GetFixupPrecodeRangeList()->EnumMemoryRegions(flags); } void diff --git a/src/coreclr/vm/stubmgr.h b/src/coreclr/vm/stubmgr.h index 49e2e837704765..c4c80d0b17db61 100644 --- a/src/coreclr/vm/stubmgr.h +++ b/src/coreclr/vm/stubmgr.h @@ -399,28 +399,6 @@ class PrecodeStubManager : public StubManager ~PrecodeStubManager() {WRAPPER_NO_CONTRACT;} #endif - protected: - LockedRangeList m_stubPrecodeRangeList; - LockedRangeList m_fixupPrecodeRangeList; - - public: - // Get dac-ized pointer to rangelist. - PTR_RangeList GetStubPrecodeRangeList() - { - SUPPORTS_DAC; - - TADDR addr = PTR_HOST_MEMBER_TADDR(PrecodeStubManager, this, m_stubPrecodeRangeList); - return PTR_RangeList(addr); - } - - PTR_RangeList GetFixupPrecodeRangeList() - { - SUPPORTS_DAC; - - TADDR addr = PTR_HOST_MEMBER_TADDR(PrecodeStubManager, this, m_fixupPrecodeRangeList); - return PTR_RangeList(addr); - } - public: virtual BOOL CheckIsStub_Internal(PCODE stubStartAddress); diff --git a/src/coreclr/vm/threads.cpp b/src/coreclr/vm/threads.cpp index cce1d0ba28ffee..fcb79dc0f8eca6 100644 --- a/src/coreclr/vm/threads.cpp +++ b/src/coreclr/vm/threads.cpp @@ -3951,30 +3951,6 @@ DWORD Thread::Wait(CLREvent *pEvent, INT32 timeOut, PendingSync *syncInfo) return dwResult; } -void Thread::Wake(SyncBlock *psb) -{ - WRAPPER_NO_CONTRACT; - - CLREvent* hEvent = NULL; - WaitEventLink *walk = &m_WaitEventLink; - while (walk->m_Next) { - if (walk->m_Next->m_WaitSB == psb) { - hEvent = walk->m_Next->m_EventWait; - // We are guaranteed that only one thread can change walk->m_Next->m_WaitSB - // since the thread is helding the syncblock. - walk->m_Next->m_WaitSB = (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB | 1); - break; - } -#ifdef _DEBUG - else if ((SyncBlock*)((DWORD_PTR)walk->m_Next & ~1) == psb) { - _ASSERTE (!"Can not wake a thread on the same SyncBlock more than once"); - } -#endif - } - PREFIX_ASSUME (hEvent != NULL); - hEvent->Set(); -} - #define WAIT_INTERRUPT_THREADABORT 0x1 #define WAIT_INTERRUPT_INTERRUPT 0x2 #define WAIT_INTERRUPT_OTHEREXCEPTION 0x4 diff --git a/src/coreclr/vm/threads.h b/src/coreclr/vm/threads.h index c312981b707716..1665565c5e6406 100644 --- a/src/coreclr/vm/threads.h +++ b/src/coreclr/vm/threads.h @@ -3170,7 +3170,6 @@ class Thread // Support for Wait/Notify BOOL Block(INT32 timeOut, PendingSync *syncInfo); - void Wake(SyncBlock *psb); DWORD Wait(HANDLE *objs, int cntObjs, INT32 timeOut, PendingSync *syncInfo); DWORD Wait(CLREvent* pEvent, INT32 timeOut, PendingSync *syncInfo);