From ba4592cc5c9030d2cabb49991cc77f2bcdda3c01 Mon Sep 17 00:00:00 2001 From: Jan Vorlicek Date: Wed, 1 Mar 2023 01:31:31 +0100 Subject: [PATCH] Remove cached RW mapping when the corresponding RX one is released When a RX mapping in the ExecutableAllocation is released and there is a RW mapping cached for it, it incorrectly stays in the cache. So when the same virtual address range that was just released gets reserved again and a request to get RW mapping comes in, the cached RW mapping is used. But it is likely that the new RX mapping has a different offset in the underlying file mapping and thus the RW mapping is unrelated to the new RX mapping. Using this RW mapping results either in an overwrite of a different block of memory or just a silent dropping of what's written. This was discovered when investigating a GC reliability framework crash. It turned out it was also the culprit behind the #75244 issue that I was unable to reproduce and it kept occuring in the CI on x86 only for quite some time. In addition to the fix, I've found that there was an off by one condition in the ExecutableAllocator::FindRWBlock so I've fixed that. And I've also noticed that in UnlockedLoaderHeap::UnlockedReservePages, if the allocation of LoaderHeapBlock failed, we would leave the data block incorrectly on the m_pRangeList. I've fixed that too. Close #75244 --- src/coreclr/inc/executableallocator.h | 3 + src/coreclr/utilcode/executableallocator.cpp | 71 +++++++++++++------- src/coreclr/utilcode/loaderheap.cpp | 13 ++-- 3 files changed, 55 insertions(+), 32 deletions(-) diff --git a/src/coreclr/inc/executableallocator.h b/src/coreclr/inc/executableallocator.h index c229f5546aa7fa..a6f6caf9da21d1 100644 --- a/src/coreclr/inc/executableallocator.h +++ b/src/coreclr/inc/executableallocator.h @@ -114,6 +114,9 @@ class ExecutableAllocator // and replaces it by the passed in one. void UpdateCachedMapping(BlockRW *pBlock); + // Remove the cached mapping + void RemoveCachedMapping(); + // Find existing RW block that maps the whole specified range of RX memory. // Return NULL if no such block exists. void* FindRWBlock(void* baseRX, size_t size); diff --git a/src/coreclr/utilcode/executableallocator.cpp b/src/coreclr/utilcode/executableallocator.cpp index dbaf2f52ebac2f..046504050d8194 100644 --- a/src/coreclr/utilcode/executableallocator.cpp +++ b/src/coreclr/utilcode/executableallocator.cpp @@ -261,28 +261,36 @@ bool ExecutableAllocator::Initialize() #define ENABLE_CACHED_MAPPINGS -void ExecutableAllocator::UpdateCachedMapping(BlockRW* pBlock) +void ExecutableAllocator::RemoveCachedMapping() { - LIMITED_METHOD_CONTRACT; #ifdef ENABLE_CACHED_MAPPINGS - if (m_cachedMapping == NULL) + void* unmapAddress = NULL; + size_t unmapSize; + + if (!RemoveRWBlock(m_cachedMapping->baseRW, &unmapAddress, &unmapSize)) { - m_cachedMapping = pBlock; - pBlock->refCount++; + g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("The RW block to unmap was not found")); } - else if (m_cachedMapping != pBlock) + if (unmapAddress && !VMToOSInterface::ReleaseRWMapping(unmapAddress, unmapSize)) { - void* unmapAddress = NULL; - size_t unmapSize; + g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Releasing the RW mapping failed")); + } - if (!RemoveRWBlock(m_cachedMapping->baseRW, &unmapAddress, &unmapSize)) - { - g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("The RW block to unmap was not found")); - } - if (unmapAddress && !VMToOSInterface::ReleaseRWMapping(unmapAddress, unmapSize)) + m_cachedMapping = NULL; +#endif // ENABLE_CACHED_MAPPINGS +} + +void ExecutableAllocator::UpdateCachedMapping(BlockRW* pBlock) +{ + LIMITED_METHOD_CONTRACT; +#ifdef ENABLE_CACHED_MAPPINGS + if (m_cachedMapping != pBlock) + { + if (m_cachedMapping != NULL) { - g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Releasing the RW mapping failed")); + RemoveCachedMapping(); } + m_cachedMapping = pBlock; pBlock->refCount++; } @@ -453,6 +461,15 @@ void ExecutableAllocator::Release(void* pRX) if (pBlock != NULL) { + if (m_cachedMapping != NULL) + { + // In case the cached mapping maps the region being released, it needs to be removed + if ((pBlock->baseRX <= m_cachedMapping->baseRX) && (m_cachedMapping->baseRX < ((BYTE*)pBlock->baseRX + pBlock->size))) + { + RemoveCachedMapping(); + } + } + if (!VMToOSInterface::ReleaseDoubleMappedMemory(m_doubleMemoryMapperHandle, pRX, pBlock->offset, pBlock->size)) { g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Releasing the double mapped memory failed")); @@ -467,6 +484,8 @@ void ExecutableAllocator::Release(void* pRX) // The block was not found, which should never happen. g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("The RX block to release was not found")); } + + _ASSERTE(FindRWBlock(pRX, 1) == NULL); } else { @@ -779,22 +798,22 @@ void* ExecutableAllocator::MapRW(void* pRX, size_t size) { if (pRX >= pBlock->baseRX && ((size_t)pRX + size) <= ((size_t)pBlock->baseRX + pBlock->size)) { - // Offset of the RX address in the originally allocated block - size_t offset = (size_t)pRX - (size_t)pBlock->baseRX; - // Offset of the RX address that will start the newly mapped block - size_t mapOffset = ALIGN_DOWN(offset, Granularity()); - // Size of the block we will map - size_t mapSize = ALIGN_UP(offset - mapOffset + size, Granularity()); + // Offset of the RX address in the originally allocated block + size_t offset = (size_t)pRX - (size_t)pBlock->baseRX; + // Offset of the RX address that will start the newly mapped block + size_t mapOffset = ALIGN_DOWN(offset, Granularity()); + // Size of the block we will map + size_t mapSize = ALIGN_UP(offset - mapOffset + size, Granularity()); #ifdef LOG_EXECUTABLE_ALLOCATOR_STATISTICS - StopWatch sw2(&g_mapCreateTimeSum); + StopWatch sw2(&g_mapCreateTimeSum); #endif - void* pRW = VMToOSInterface::GetRWMapping(m_doubleMemoryMapperHandle, (BYTE*)pBlock->baseRX + mapOffset, pBlock->offset + mapOffset, mapSize); + void* pRW = VMToOSInterface::GetRWMapping(m_doubleMemoryMapperHandle, (BYTE*)pBlock->baseRX + mapOffset, pBlock->offset + mapOffset, mapSize); - if (pRW == NULL) - { - g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Failed to create RW mapping for RX memory")); - } + if (pRW == NULL) + { + g_fatalErrorHandler(COR_E_EXECUTIONENGINE, W("Failed to create RW mapping for RX memory")); + } AddRWBlock(pRW, (BYTE*)pBlock->baseRX + mapOffset, mapSize); diff --git a/src/coreclr/utilcode/loaderheap.cpp b/src/coreclr/utilcode/loaderheap.cpp index 25fd4367b2bf4b..a4dff5783e3732 100644 --- a/src/coreclr/utilcode/loaderheap.cpp +++ b/src/coreclr/utilcode/loaderheap.cpp @@ -1198,6 +1198,12 @@ BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit) return FALSE; } + NewHolder pNewBlock = new (nothrow) LoaderHeapBlock; + if (pNewBlock == NULL) + { + return FALSE; + } + // Record reserved range in range list, if one is specified // Do this AFTER the commit - otherwise we'll have bogus ranges included. if (m_pRangeList != NULL) @@ -1210,14 +1216,9 @@ BOOL UnlockedLoaderHeap::UnlockedReservePages(size_t dwSizeToCommit) } } - LoaderHeapBlock *pNewBlock = new (nothrow) LoaderHeapBlock; - if (pNewBlock == NULL) - { - return FALSE; - } - m_dwTotalAlloc += dwSizeToCommit; + pNewBlock.SuppressRelease(); pData.SuppressRelease(); pNewBlock->dwVirtualSize = dwSizeToReserve;