mirror of
https://github.com/VSadov/Satori.git
synced 2025-06-11 18:20:26 +09:00
escape mark in write barriers
This commit is contained in:
parent
3ad442c8ee
commit
7da8d69379
23 changed files with 545 additions and 174 deletions
|
@ -319,7 +319,7 @@ FORCEINLINE void InlinedMemmoveGCRefsHelper(void *dest, const void *src, size_t
|
|||
InlinedBackwardGCSafeCopyHelper(dest, src, len);
|
||||
}
|
||||
|
||||
InlinedSetCardsAfterBulkCopyHelper((Object**)dest, len);
|
||||
InlinedSetCardsAfterBulkCopyHelper((Object**)dest, (Object*)src, len);
|
||||
}
|
||||
|
||||
#endif // !_ARRAYNATIVE_INL_
|
||||
|
|
|
@ -182,6 +182,7 @@ if (FEATURE_ENABLE_NO_ADDRESS_SPACE_RANDOMIZATION)
|
|||
add_definitions(-DFEATURE_ENABLE_NO_ADDRESS_SPACE_RANDOMIZATION)
|
||||
endif(FEATURE_ENABLE_NO_ADDRESS_SPACE_RANDOMIZATION)
|
||||
add_definitions(-DFEATURE_SVR_GC)
|
||||
add_definitions(-DFEATURE_SATORI_GC)
|
||||
add_definitions(-DFEATURE_SYMDIFF)
|
||||
add_compile_definitions($<$<NOT:$<BOOL:$<TARGET_PROPERTY:CROSSGEN_COMPONENT>>>:FEATURE_TIERED_COMPILATION>)
|
||||
if (CLR_CMAKE_TARGET_ARCH_AMD64)
|
||||
|
|
|
@ -2905,12 +2905,14 @@ ClrDataAccess::GetGCHeapData(struct DacpGcHeapData *gcheapData)
|
|||
// is GC_HEAP_INVALID, in which case we fail.
|
||||
ULONG32 gcHeapValue = g_heap_type;
|
||||
|
||||
// GC_HEAP_TYPE has three possible values:
|
||||
// GC_HEAP_TYPE has four possible values:
|
||||
// GC_HEAP_INVALID = 0,
|
||||
// GC_HEAP_WKS = 1,
|
||||
// GC_HEAP_SVR = 2
|
||||
// GC_HEAP_SVR = 2,
|
||||
// GC_HEAP_SATORI = 3
|
||||
// If we get something other than that, we probably read the wrong location.
|
||||
_ASSERTE(gcHeapValue >= GC_HEAP_INVALID && gcHeapValue <= GC_HEAP_SVR);
|
||||
//TODO: Satori
|
||||
_ASSERTE(gcHeapValue >= GC_HEAP_INVALID && gcHeapValue <= GC_HEAP_SATORI);
|
||||
|
||||
// we have GC_HEAP_INVALID if gcHeapValue == 0, so we're done - we haven't
|
||||
// initialized the heap yet.
|
||||
|
|
|
@ -317,4 +317,14 @@ inline bool IsServerHeap()
|
|||
#endif // FEATURE_SVR_GC
|
||||
}
|
||||
|
||||
inline bool IsSatoriHeap()
|
||||
{
|
||||
#ifdef FEATURE_SATORI_GC
|
||||
assert(g_gc_heap_type != GC_HEAP_INVALID);
|
||||
return g_gc_heap_type == GC_HEAP_SATORI;
|
||||
#else // FEATURE_SVR_GC
|
||||
return false;
|
||||
#endif // FEATURE_SVR_GC
|
||||
}
|
||||
|
||||
#endif // __GC_H
|
||||
|
|
|
@ -104,6 +104,7 @@ GC_Initialize(
|
|||
}
|
||||
else if (true)
|
||||
{
|
||||
//TODO: Satori
|
||||
g_gc_heap_type = GC_HEAP_SATORI;
|
||||
heap = new(nothrow) SatoriGCHeap();
|
||||
}
|
||||
|
|
|
@ -538,6 +538,16 @@ void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value)
|
|||
#endif
|
||||
}
|
||||
|
||||
void MarkEscapeSatori(Object* ref)
|
||||
{
|
||||
// mark the escape byte
|
||||
// TODO: VS, check if region is allocating?
|
||||
if (!((int8_t*)ref)[-5])
|
||||
{
|
||||
((int8_t*)ref)[-5] = (int8_t)0xFF;
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef DACCESS_COMPILE
|
||||
/*
|
||||
* HndWriteBarrierWorker
|
||||
|
@ -549,6 +559,8 @@ void HndWriteBarrierWorker(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value)
|
|||
{
|
||||
_ASSERTE (value != NULL);
|
||||
|
||||
MarkEscapeSatori(OBJECTREFToObject(value));
|
||||
|
||||
// find the write barrier for this handle
|
||||
uint8_t *barrier = (uint8_t *)((uintptr_t)handle & HANDLE_SEGMENT_ALIGN_MASK);
|
||||
|
||||
|
|
|
@ -16,16 +16,21 @@ class SatoriHeap;
|
|||
|
||||
void SatoriAllocationContext::OnTerminateThread(SatoriHeap* heap)
|
||||
{
|
||||
if (RegularRegion() != nullptr)
|
||||
{
|
||||
RegularRegion()->Deactivate(heap);
|
||||
RegularRegion() = nullptr;
|
||||
}
|
||||
if (RegularRegion() != nullptr)
|
||||
{
|
||||
this->alloc_bytes -= this->alloc_limit - this->alloc_ptr;
|
||||
|
||||
if (LargeRegion() != nullptr)
|
||||
{
|
||||
LargeRegion()->Deactivate(heap);
|
||||
LargeRegion() = nullptr;
|
||||
}
|
||||
//TODO: VS also maybe allocated bytes accounting?
|
||||
//TODO: VS make parseable
|
||||
//TODO: VS check for emptiness, mark, sweep, compact, slice, ...
|
||||
|
||||
RegularRegion()->Deactivate(heap);
|
||||
RegularRegion() = nullptr;
|
||||
}
|
||||
|
||||
if (LargeRegion() != nullptr)
|
||||
{
|
||||
//TODO: VS check for emptiness, mark, sweep, compact, slice, ...
|
||||
LargeRegion()->Deactivate(heap);
|
||||
LargeRegion() = nullptr;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,7 +45,12 @@ void SatoriGCHeap::WaitUntilConcurrentGCComplete()
|
|||
|
||||
bool SatoriGCHeap::IsConcurrentGCInProgress()
|
||||
{
|
||||
return false;
|
||||
// Satori may move thread local objects asyncronously,
|
||||
// but noone should see that (that is the point).
|
||||
//
|
||||
// The only thing that may get to TL objects is object verification.
|
||||
// Return "true" for now.
|
||||
return true;
|
||||
}
|
||||
|
||||
void SatoriGCHeap::TemporaryEnableConcurrentGC()
|
||||
|
@ -209,29 +214,9 @@ size_t SatoriGCHeap::GetLastGCGenerationSize(int gen)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void InitWriteBarrier()
|
||||
{
|
||||
WriteBarrierParameters args = {};
|
||||
args.operation = WriteBarrierOp::Initialize;
|
||||
args.is_runtime_suspended = true;
|
||||
args.requires_upper_bounds_check = false;
|
||||
args.card_table = nullptr;
|
||||
|
||||
#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
|
||||
args.card_bundle_table = nullptr;
|
||||
#endif
|
||||
|
||||
args.lowest_address = (uint8_t*)-1;
|
||||
args.highest_address = (uint8_t*)-1;
|
||||
args.ephemeral_low = (uint8_t*)-1;
|
||||
args.ephemeral_high = (uint8_t*)-1;
|
||||
GCToEEInterface::StompWriteBarrier(&args);
|
||||
}
|
||||
|
||||
HRESULT SatoriGCHeap::Initialize()
|
||||
{
|
||||
m_perfCounterFrequency = GCToOSInterface::QueryPerformanceFrequency();
|
||||
InitWriteBarrier();
|
||||
SatoriUtil::Initialize();
|
||||
m_heap = SatoriHeap::Create();
|
||||
if (m_heap == nullptr)
|
||||
|
|
|
@ -12,6 +12,43 @@
|
|||
|
||||
#include "SatoriHeap.h"
|
||||
|
||||
void InitWriteBarrier(uint8_t* segmentTable, size_t highest_address)
|
||||
{
|
||||
WriteBarrierParameters args = {};
|
||||
args.operation = WriteBarrierOp::Initialize;
|
||||
args.is_runtime_suspended = true;
|
||||
args.requires_upper_bounds_check = false;
|
||||
args.card_table = (uint32_t*)segmentTable;
|
||||
|
||||
#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
|
||||
args.card_bundle_table = nullptr;
|
||||
#endif
|
||||
|
||||
args.lowest_address = (uint8_t*)1;
|
||||
args.highest_address = (uint8_t*)highest_address;
|
||||
args.ephemeral_low = (uint8_t*)-1;
|
||||
args.ephemeral_high = (uint8_t*)-1;
|
||||
GCToEEInterface::StompWriteBarrier(&args);
|
||||
}
|
||||
|
||||
void UpdateWriteBarrier(uint8_t* segmentTable, size_t highest_address)
|
||||
{
|
||||
WriteBarrierParameters args = {};
|
||||
args.operation = WriteBarrierOp::StompResize;
|
||||
args.is_runtime_suspended = false;
|
||||
args.requires_upper_bounds_check = false;
|
||||
args.card_table = (uint32_t*)segmentTable;
|
||||
|
||||
#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
|
||||
args.card_bundle_table = nullptr;
|
||||
#endif
|
||||
|
||||
args.lowest_address = (uint8_t*)1;
|
||||
args.highest_address = (uint8_t*)highest_address;
|
||||
args.ephemeral_low = (uint8_t*)-1;
|
||||
args.ephemeral_high = (uint8_t*)-1;
|
||||
GCToEEInterface::StompWriteBarrier(&args);
|
||||
}
|
||||
|
||||
SatoriHeap* SatoriHeap::Create()
|
||||
{
|
||||
|
@ -35,6 +72,7 @@ SatoriHeap* SatoriHeap::Create()
|
|||
SatoriHeap* result = (SatoriHeap*)reserved;
|
||||
result->m_reservedMapSize = mapSize;
|
||||
result->m_committedMapSize = (int)(commitSize - ((size_t)&result->m_pageMap - (size_t)result));
|
||||
InitWriteBarrier(result->m_pageMap, result->m_committedMapSize * Satori::PAGE_SIZE_GRANULARITY);
|
||||
result->m_mapLock.Initialize();
|
||||
result->m_nextPageIndex = 1;
|
||||
|
||||
|
@ -56,6 +94,7 @@ bool SatoriHeap::CommitMoreMap(int currentlyCommitted)
|
|||
{
|
||||
// we did the commit
|
||||
m_committedMapSize = min(currentlyCommitted + (int)commitSize, m_reservedMapSize);
|
||||
UpdateWriteBarrier(m_pageMap, m_committedMapSize * Satori::PAGE_SIZE_GRANULARITY);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -78,17 +117,15 @@ bool SatoriHeap::TryAddRegularPage(SatoriPage*& newPage)
|
|||
newPage = SatoriPage::InitializeAt(pageAddress, Satori::PAGE_SIZE_GRANULARITY);
|
||||
if (newPage)
|
||||
{
|
||||
// mark the map, before an object can be allocated in the new page and
|
||||
// may be seen in a GC barrier
|
||||
// SYNCRONIZATION:
|
||||
// A page map update must be seen by all threads befeore seeing objects allocated
|
||||
// in the new page or checked barriers may consider the objects not in the heap.
|
||||
//
|
||||
// If another thread checks if object is in heap, its read of the map element is dependent on object,
|
||||
// therefore the read will happen after the object is obtained.
|
||||
// Also the object must be published before other thread could see it, and publishing is a release.
|
||||
// Thus an ordinary write is ok even for weak memory cases.
|
||||
m_pageMap[i] = 1;
|
||||
|
||||
// we also need to ensure that the other thread doing the barrier,
|
||||
// reads the object before reading the updated map.
|
||||
// on ARM this would require load fence in the barrier. We will do a processwide here instead.
|
||||
#if defined(HOST_ARM64) || defined(HOST_ARM)
|
||||
GCToOSInterface::FlushProcessWriteBuffers();
|
||||
#endif
|
||||
|
||||
// ensure the next is advanced to at least i + 1
|
||||
while ((nextPageIndex = m_nextPageIndex) < i + 1 &&
|
||||
Interlocked::CompareExchange(&m_nextPageIndex, i + 1, nextPageIndex) != nextPageIndex);
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
#include "SatoriHeap.h"
|
||||
#include "SatoriRecycler.h"
|
||||
#include "SatoriRegion.h"
|
||||
|
||||
void SatoriRecycler::Initialize(SatoriHeap* heap)
|
||||
{
|
||||
|
@ -21,6 +22,7 @@ void SatoriRecycler::Initialize(SatoriHeap* heap)
|
|||
void SatoriRecycler::AddRegion(SatoriRegion* region)
|
||||
{
|
||||
// TODO: VS make end parsable?
|
||||
region->Publish();
|
||||
|
||||
// TODO: VS leak the region for now
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ SatoriRegion* SatoriRegion::InitializeAt(SatoriPage* containingPage, size_t addr
|
|||
committed += toCommit;
|
||||
}
|
||||
|
||||
result->m_state = SatoriRegionState::allocating;
|
||||
result->m_end = address + regionSize;
|
||||
result->m_committed = min(committed, address + regionSize);
|
||||
result->m_zeroInitedAfter = min(max(zeroInitedAfter, address + sizeof(SatoriRegion)), result->End());
|
||||
|
@ -48,7 +49,7 @@ SatoriRegion* SatoriRegion::InitializeAt(SatoriPage* containingPage, size_t addr
|
|||
|
||||
result->m_allocStart = (size_t)&result->m_firstObject;
|
||||
// +1 for syncblock
|
||||
result->m_allocEnd = result->m_end + 1;
|
||||
result->m_allocEnd = result->End() + 1;
|
||||
|
||||
if (zeroInitedAfter > (size_t)&result->m_index)
|
||||
{
|
||||
|
@ -61,6 +62,7 @@ SatoriRegion* SatoriRegion::InitializeAt(SatoriPage* containingPage, size_t addr
|
|||
|
||||
void SatoriRegion::MakeBlank()
|
||||
{
|
||||
m_state = SatoriRegionState::allocating;
|
||||
m_allocStart = (size_t)&m_firstObject;
|
||||
// +1 for syncblock
|
||||
m_allocEnd = m_end + 1;
|
||||
|
@ -141,7 +143,7 @@ void SatoriRegion::Deactivate(SatoriHeap* heap)
|
|||
return;
|
||||
}
|
||||
|
||||
// TODO: VS: if can be splitd, split and return tail
|
||||
// TODO: VS: if can be splitted, split and return tail
|
||||
heap->Recycler()->AddRegion(this);
|
||||
}
|
||||
|
||||
|
|
|
@ -13,6 +13,12 @@
|
|||
#include "SatoriHeap.h"
|
||||
#include "SatoriUtil.h"
|
||||
|
||||
enum class SatoriRegionState : int8_t
|
||||
{
|
||||
allocating = 0,
|
||||
shared = 1,
|
||||
};
|
||||
|
||||
class SatoriRegion
|
||||
{
|
||||
friend class SatoriRegionQueue;
|
||||
|
@ -43,7 +49,7 @@ public:
|
|||
|
||||
size_t Start()
|
||||
{
|
||||
return (size_t)&m_end;
|
||||
return (size_t)&m_state;
|
||||
}
|
||||
|
||||
size_t End()
|
||||
|
@ -76,7 +82,14 @@ public:
|
|||
return (SatoriRegion*)(((size_t)obj) >> Satori::REGION_BITS);
|
||||
}
|
||||
|
||||
void Publish()
|
||||
{
|
||||
_ASSERTE(m_state == SatoriRegionState::allocating);
|
||||
m_state = SatoriRegionState::shared;
|
||||
}
|
||||
|
||||
private:
|
||||
SatoriRegionState m_state;
|
||||
// end is edge exclusive
|
||||
size_t m_end;
|
||||
size_t m_committed;
|
||||
|
@ -89,7 +102,8 @@ private:
|
|||
|
||||
// active allocation may happen in the following range.
|
||||
// the range may not be parseable as sequence of objects
|
||||
// NB: it is in terms of objects, if converting to size_t beware of sync blocks
|
||||
// NB: the range is in terms of objects,
|
||||
// there is embedded off-by-one error for syncblocks
|
||||
size_t m_allocStart;
|
||||
size_t m_allocEnd;
|
||||
|
||||
|
|
|
@ -51,6 +51,8 @@ endif
|
|||
|
||||
extern JIT_InternalThrow:proc
|
||||
|
||||
ifndef FEATURE_SATORI_GC
|
||||
|
||||
; There is an even more optimized version of these helpers possible which takes
|
||||
; advantage of knowledge of which way the ephemeral heap is growing to only do 1/2
|
||||
; that check (this is more significant in the JIT_WriteBarrier case).
|
||||
|
@ -388,6 +390,157 @@ endif
|
|||
ret
|
||||
LEAF_END_MARKED JIT_ByRefWriteBarrier, _TEXT
|
||||
|
||||
|
||||
|
||||
else
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
; void JIT_CheckedWriteBarrier(Object** dst, Object* src)
|
||||
LEAF_ENTRY JIT_CheckedWriteBarrier, _TEXT
|
||||
|
||||
; When WRITE_BARRIER_CHECK is defined _NotInHeap will write the reference
|
||||
; but if it isn't then it will just return.
|
||||
;
|
||||
|
||||
; See if this is in GCHeap
|
||||
cmp rcx, [g_highest_address]
|
||||
jnb NotInHeap
|
||||
|
||||
mov rax, rcx
|
||||
shr rax, 30 ; round to page size ( >> PAGE_BITS )
|
||||
add rax, [g_card_table]
|
||||
cmp byte ptr [rax], 0
|
||||
jnz JIT_WriteBarrier
|
||||
|
||||
NotInHeap:
|
||||
; See comment above about possible AV
|
||||
mov [rcx], rdx
|
||||
ret
|
||||
LEAF_END_MARKED JIT_CheckedWriteBarrier, _TEXT
|
||||
|
||||
|
||||
; Mark start of the code region that we patch at runtime
|
||||
LEAF_ENTRY JIT_PatchedCodeStart, _TEXT
|
||||
ret
|
||||
LEAF_END JIT_PatchedCodeStart, _TEXT
|
||||
|
||||
; This is used by the mechanism to hold either the JIT_WriteBarrier_PreGrow
|
||||
; or JIT_WriteBarrier_PostGrow code (depending on the state of the GC). It _WILL_
|
||||
; change at runtime as the GC changes. Initially it should simply be a copy of the
|
||||
; larger of the two functions (JIT_WriteBarrier_PostGrow) to ensure we have created
|
||||
; enough space to copy that code in.
|
||||
LEAF_ENTRY JIT_WriteBarrier, _TEXT
|
||||
align 16
|
||||
|
||||
mov [rcx], rdx
|
||||
|
||||
mov rax, rdx
|
||||
xor rax, rcx
|
||||
shr rax, 21
|
||||
jnz CrossRegion
|
||||
REPRET ; assignment is within the same region
|
||||
|
||||
CrossRegion:
|
||||
cmp rdx, 0
|
||||
je Exit ; assigning null
|
||||
|
||||
mov rax, rdx
|
||||
and rax, 0FFFFFFFFFFE00000h ; region
|
||||
cmp byte ptr [rax], 0 ; check status, 0 -> allocating
|
||||
|
||||
jne EscapeChecked ; object is not from allocating region
|
||||
; this is optimization, it is ok to mark, just noone cares
|
||||
; TODO: VS is this really an optimization?
|
||||
|
||||
cmp byte ptr [rdx - 5], 00h
|
||||
jne EscapeChecked ; already escaped
|
||||
|
||||
; mark the escape byte
|
||||
mov byte ptr [rdx - 5], 0FFh
|
||||
|
||||
EscapeChecked:
|
||||
; cross generational referencing would be recorded here
|
||||
Exit:
|
||||
ret
|
||||
|
||||
; make sure this is bigger than any of the others
|
||||
align 16
|
||||
nop
|
||||
LEAF_END_MARKED JIT_WriteBarrier, _TEXT
|
||||
|
||||
; Mark start of the code region that we patch at runtime
|
||||
LEAF_ENTRY JIT_PatchedCodeLast, _TEXT
|
||||
ret
|
||||
LEAF_END JIT_PatchedCodeLast, _TEXT
|
||||
|
||||
; JIT_ByRefWriteBarrier has weird symantics, see usage in StubLinkerX86.cpp
|
||||
;
|
||||
; Entry:
|
||||
; RDI - address of ref-field (assigned to)
|
||||
; RSI - address of the data (source)
|
||||
; RCX is trashed
|
||||
; RAX is trashed when FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP is defined
|
||||
; Exit:
|
||||
; RDI, RSI are incremented by SIZEOF(LPVOID)
|
||||
LEAF_ENTRY JIT_ByRefWriteBarrier, _TEXT
|
||||
mov rcx, [rsi]
|
||||
|
||||
; do the assignment
|
||||
mov [rdi], rcx
|
||||
|
||||
; See if this is in GCHeap
|
||||
cmp rdi, [g_highest_address]
|
||||
jnb Exit ; not in heap
|
||||
|
||||
mov rax, rdi
|
||||
shr rax, 30 ; round to page size ( >> PAGE_BITS )
|
||||
add rax, [g_card_table]
|
||||
cmp byte ptr [rax], 0
|
||||
je Exit ; not in heap
|
||||
|
||||
; check if this is a cross-region assignment (TODO: VS perhaps check before "in heap")
|
||||
mov rax, rdi
|
||||
xor rax, rcx
|
||||
shr rax, 21
|
||||
jz Exit ; assignment is within the same region
|
||||
|
||||
CrossRegion:
|
||||
cmp rcx, 0
|
||||
je Exit ; assigning null
|
||||
|
||||
mov rax, rcx
|
||||
and rax, 0FFFFFFFFFFE00000h ; region
|
||||
cmp byte ptr [rax], 0 ; check status, 0 -> allocating
|
||||
|
||||
jne EscapeChecked ; object is not from allocating region
|
||||
; this is optimization, it is ok to mark, just noone cares
|
||||
; TODO: VS is this really an optimization?
|
||||
|
||||
cmp byte ptr [rcx - 5], 00h
|
||||
jne EscapeChecked ; already escaped
|
||||
|
||||
; mark the escape byte
|
||||
mov byte ptr [rcx - 5], 0FFh
|
||||
|
||||
EscapeChecked:
|
||||
; cross generational referencing would be recorded here
|
||||
|
||||
align 16
|
||||
Exit:
|
||||
; Increment the registers before leaving
|
||||
add rdi, 8h
|
||||
add rsi, 8h
|
||||
ret
|
||||
LEAF_END_MARKED JIT_ByRefWriteBarrier, _TEXT
|
||||
|
||||
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
||||
|
||||
endif
|
||||
|
||||
; The following helper will access ("probe") a word on each page of the stack
|
||||
; starting with the page right beneath rsp down to the one pointed to by r11.
|
||||
; The procedure is needed to make sure that the "guard" page is pushed down below the allocated stack frame.
|
||||
|
|
|
@ -203,6 +203,43 @@ LEAF_END_MARKED JIT_WriteBarrier_SVR64, _TEXT
|
|||
|
||||
endif
|
||||
|
||||
ifdef FEATURE_SATORI_GC
|
||||
|
||||
LEAF_ENTRY JIT_WriteBarrier_SATORI, _TEXT
|
||||
align 8
|
||||
mov [rcx], rdx
|
||||
|
||||
mov rax, rdx
|
||||
xor rax, rcx
|
||||
shr rax, 21
|
||||
jnz CrossRegion
|
||||
REPRET ; assignment is within the same region
|
||||
|
||||
CrossRegion:
|
||||
cmp rdx, 0
|
||||
je Exit ; assigning null
|
||||
|
||||
mov rax, rdx
|
||||
and rax, 0FFFFFFFFFFE00000h ; region
|
||||
cmp byte ptr [rax], 0 ; check status, 0 -> allocating
|
||||
|
||||
jne EscapeChecked ; object is not from allocating region
|
||||
; this is optimization, it is ok to mark, just noone cares
|
||||
; TODO: VS is this really an optimization?
|
||||
|
||||
cmp byte ptr [rdx - 5], 00h
|
||||
jne EscapeChecked ; already escaped
|
||||
|
||||
; mark the escape byte
|
||||
mov byte ptr [rdx - 5], 0FFh
|
||||
|
||||
EscapeChecked:
|
||||
; cross generational referencing would be recorded here
|
||||
Exit:
|
||||
ret
|
||||
LEAF_END_MARKED JIT_WriteBarrier_SATORI, _TEXT
|
||||
|
||||
endif
|
||||
|
||||
ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
|
||||
|
|
|
@ -50,6 +50,11 @@ EXTERN_C void JIT_WriteBarrier_SVR64_PatchLabel_CardBundleTable();
|
|||
EXTERN_C void JIT_WriteBarrier_SVR64_End();
|
||||
#endif // FEATURE_SVR_GC
|
||||
|
||||
#ifdef FEATURE_SATORI_GC
|
||||
EXTERN_C void JIT_WriteBarrier_SATORI(Object** dst, Object* ref);
|
||||
EXTERN_C void JIT_WriteBarrier_SATORI_End();
|
||||
#endif // FEATURE_SATORI_GC
|
||||
|
||||
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
EXTERN_C void JIT_WriteBarrier_WriteWatch_PreGrow64(Object **dst, Object *ref);
|
||||
EXTERN_C void JIT_WriteBarrier_WriteWatch_PreGrow64_Patch_Label_WriteWatchTable();
|
||||
|
@ -214,6 +219,10 @@ PCODE WriteBarrierManager::GetCurrentWriteBarrierCode()
|
|||
case WRITE_BARRIER_SVR64:
|
||||
return GetEEFuncEntryPoint(JIT_WriteBarrier_SVR64);
|
||||
#endif // FEATURE_SVR_GC
|
||||
#ifdef FEATURE_SATORI_GC
|
||||
case WRITE_BARRIER_SATORI:
|
||||
return GetEEFuncEntryPoint(JIT_WriteBarrier_SATORI);
|
||||
#endif // FEATURE_SATORI_GC
|
||||
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
case WRITE_BARRIER_WRITE_WATCH_PREGROW64:
|
||||
return GetEEFuncEntryPoint(JIT_WriteBarrier_WriteWatch_PreGrow64);
|
||||
|
@ -246,6 +255,10 @@ size_t WriteBarrierManager::GetSpecificWriteBarrierSize(WriteBarrierType writeBa
|
|||
case WRITE_BARRIER_SVR64:
|
||||
return MARKED_FUNCTION_SIZE(JIT_WriteBarrier_SVR64);
|
||||
#endif // FEATURE_SVR_GC
|
||||
#ifdef FEATURE_SATORI_GC
|
||||
case WRITE_BARRIER_SATORI:
|
||||
return MARKED_FUNCTION_SIZE(JIT_WriteBarrier_SATORI);
|
||||
#endif // FEATURE_SATORI_GC
|
||||
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
case WRITE_BARRIER_WRITE_WATCH_PREGROW64:
|
||||
return MARKED_FUNCTION_SIZE(JIT_WriteBarrier_WriteWatch_PreGrow64);
|
||||
|
@ -297,6 +310,10 @@ int WriteBarrierManager::ChangeWriteBarrierTo(WriteBarrierType newWriteBarrier,
|
|||
|
||||
switch (newWriteBarrier)
|
||||
{
|
||||
#ifdef FEATURE_SATORI_GC
|
||||
case WRITE_BARRIER_SATORI:
|
||||
return stompWBCompleteActions;
|
||||
#endif
|
||||
case WRITE_BARRIER_PREGROW64:
|
||||
{
|
||||
m_pLowerBoundImmediate = CALC_PATCH_LOCATION(JIT_WriteBarrier_PreGrow64, Patch_Label_Lower, 2);
|
||||
|
@ -432,11 +449,20 @@ void WriteBarrierManager::Initialize()
|
|||
// write barrier implementations.
|
||||
size_t cbWriteBarrierBuffer = GetSpecificWriteBarrierSize(WRITE_BARRIER_BUFFER);
|
||||
|
||||
//TODO: Satori
|
||||
#ifdef FEATURE_SATORI_GC
|
||||
_ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", cbWriteBarrierBuffer >= GetSpecificWriteBarrierSize(WRITE_BARRIER_SATORI));
|
||||
|
||||
#else
|
||||
|
||||
_ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", cbWriteBarrierBuffer >= GetSpecificWriteBarrierSize(WRITE_BARRIER_PREGROW64));
|
||||
_ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", cbWriteBarrierBuffer >= GetSpecificWriteBarrierSize(WRITE_BARRIER_POSTGROW64));
|
||||
#ifdef FEATURE_SVR_GC
|
||||
_ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", cbWriteBarrierBuffer >= GetSpecificWriteBarrierSize(WRITE_BARRIER_SVR64));
|
||||
#endif // FEATURE_SVR_GC
|
||||
#ifdef FEATURE_SATORI_GC
|
||||
_ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", cbWriteBarrierBuffer >= GetSpecificWriteBarrierSize(WRITE_BARRIER_SATORI));
|
||||
#endif // FEATURE_SATORI_GC
|
||||
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
_ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", cbWriteBarrierBuffer >= GetSpecificWriteBarrierSize(WRITE_BARRIER_WRITE_WATCH_PREGROW64));
|
||||
_ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", cbWriteBarrierBuffer >= GetSpecificWriteBarrierSize(WRITE_BARRIER_WRITE_WATCH_POSTGROW64));
|
||||
|
@ -445,6 +471,8 @@ void WriteBarrierManager::Initialize()
|
|||
#endif // FEATURE_SVR_GC
|
||||
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
|
||||
#endif //FEATURE_SATORI_GC
|
||||
|
||||
#if !defined(CODECOVERAGE)
|
||||
Validate();
|
||||
#endif
|
||||
|
@ -473,7 +501,8 @@ bool WriteBarrierManager::NeedDifferentWriteBarrier(bool bReqUpperBoundsCheck, W
|
|||
}
|
||||
#endif
|
||||
|
||||
writeBarrierType = GCHeapUtilities::IsServerHeap() ? WRITE_BARRIER_SVR64 : WRITE_BARRIER_PREGROW64;
|
||||
//TODO: Satori
|
||||
writeBarrierType = GCHeapUtilities::IsServerHeap() ? WRITE_BARRIER_SATORI : WRITE_BARRIER_PREGROW64;
|
||||
continue;
|
||||
|
||||
case WRITE_BARRIER_PREGROW64:
|
||||
|
@ -491,6 +520,11 @@ bool WriteBarrierManager::NeedDifferentWriteBarrier(bool bReqUpperBoundsCheck, W
|
|||
break;
|
||||
#endif // FEATURE_SVR_GC
|
||||
|
||||
#ifdef FEATURE_SATORI_GC
|
||||
case WRITE_BARRIER_SATORI:
|
||||
break;
|
||||
#endif // FEATURE_SATORI_GC
|
||||
|
||||
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
case WRITE_BARRIER_WRITE_WATCH_PREGROW64:
|
||||
if (bReqUpperBoundsCheck)
|
||||
|
@ -573,6 +607,12 @@ int WriteBarrierManager::UpdateEphemeralBounds(bool isRuntimeSuspended)
|
|||
}
|
||||
#endif // FEATURE_SVR_GC
|
||||
|
||||
#ifdef FEATURE_SATORI_GC
|
||||
case WRITE_BARRIER_SATORI:
|
||||
{
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
default:
|
||||
UNREACHABLE_MSG("unexpected m_currentWriteBarrier in UpdateEphemeralBounds");
|
||||
}
|
||||
|
|
|
@ -438,8 +438,10 @@ void InitializeStartupFlags()
|
|||
else
|
||||
g_IGCconcurrent = 0;
|
||||
|
||||
//TODO: Satori
|
||||
// g_heap_type = ((flags & STARTUP_SERVER_GC) && GetCurrentProcessCpuCount() > 1) ? GC_HEAP_SVR : GC_HEAP_WKS;
|
||||
g_heap_type = GC_HEAP_SATORI;
|
||||
|
||||
g_heap_type = ((flags & STARTUP_SERVER_GC) && GetCurrentProcessCpuCount() > 1) ? GC_HEAP_SVR : GC_HEAP_WKS;
|
||||
g_IGCHoardVM = (flags & STARTUP_HOARD_GC_VM) == 0 ? 0 : 1;
|
||||
}
|
||||
#endif // CROSSGEN_COMPILE
|
||||
|
|
|
@ -991,8 +991,7 @@ void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
|
|||
assert(g_card_table == nullptr);
|
||||
assert(g_lowest_address == nullptr);
|
||||
assert(g_highest_address == nullptr);
|
||||
//TODO: Satori
|
||||
//assert(args->card_table != nullptr);
|
||||
assert(args->card_table != nullptr);
|
||||
assert(args->lowest_address != nullptr);
|
||||
assert(args->highest_address != nullptr);
|
||||
assert(args->ephemeral_low != nullptr);
|
||||
|
|
|
@ -1125,6 +1125,8 @@ extern "C" HCIMPL2_RAW(VOID, JIT_CheckedWriteBarrier, Object **dst, Object *ref)
|
|||
if (((BYTE*)dst < g_lowest_address) || ((BYTE*)dst >= g_highest_address))
|
||||
return;
|
||||
|
||||
TODO: Satori , is this dead code?
|
||||
|
||||
#ifdef FEATURE_COUNT_GC_WRITE_BARRIERS
|
||||
CheckedAfterHeapFilter++;
|
||||
#endif
|
||||
|
@ -1252,43 +1254,81 @@ HCIMPLEND_RAW
|
|||
// This function sets the card table with the granularity of 1 byte, to avoid ghost updates
|
||||
// that could occur if multiple threads were trying to set different bits in the same card.
|
||||
|
||||
|
||||
static const int PAGE_BITS = 30;
|
||||
static const size_t PAGE_SIZE_GRANULARITY = (size_t)1 << PAGE_BITS;
|
||||
|
||||
#include <optsmallperfcritical.h>
|
||||
|
||||
bool IsInHeapSatori(Object** start)
|
||||
{
|
||||
if ((uint8_t*)start > (uint8_t*)g_highest_address)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
uint8_t* pages = (uint8_t*)g_card_table;
|
||||
size_t page = (size_t)start >> PAGE_BITS;
|
||||
return pages[page];
|
||||
}
|
||||
|
||||
void CheckAndMarkEscapeSatori(Object** dst, Object* ref)
|
||||
{
|
||||
if (ref && ((size_t)dst ^ (size_t)(ref)) >> 21)
|
||||
{
|
||||
// mark the escape byte
|
||||
// TODO: VS, check if region is allocating?
|
||||
if (!((int8_t*)ref)[-5])
|
||||
{
|
||||
((int8_t*)ref)[-5] = (int8_t)0xFF;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ErectWriteBarrier(OBJECTREF *dst, OBJECTREF ref)
|
||||
{
|
||||
STATIC_CONTRACT_MODE_COOPERATIVE;
|
||||
STATIC_CONTRACT_NOTHROW;
|
||||
STATIC_CONTRACT_GC_NOTRIGGER;
|
||||
|
||||
// if the dst is outside of the heap (unboxed value classes) then we
|
||||
// simply exit
|
||||
if (((BYTE*)dst < g_lowest_address) || ((BYTE*)dst >= g_highest_address))
|
||||
if (!IsInHeapSatori((Object**)dst))
|
||||
{
|
||||
return;
|
||||
|
||||
#ifdef WRITE_BARRIER_CHECK
|
||||
updateGCShadow((Object**) dst, OBJECTREFToObject(ref)); // support debugging write barrier
|
||||
#endif
|
||||
|
||||
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
if (GCHeapUtilities::SoftwareWriteWatchIsEnabled())
|
||||
{
|
||||
GCHeapUtilities::SoftwareWriteWatchSetDirty(dst, sizeof(*dst));
|
||||
}
|
||||
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
|
||||
if ((BYTE*) OBJECTREFToObject(ref) >= g_ephemeral_low && (BYTE*) OBJECTREFToObject(ref) < g_ephemeral_high)
|
||||
{
|
||||
// VolatileLoadWithoutBarrier() is used here to prevent fetch of g_card_table from being reordered
|
||||
// with g_lowest/highest_address check above. See comment in StompWriteBarrier.
|
||||
BYTE* pCardByte = (BYTE*)VolatileLoadWithoutBarrier(&g_card_table) + card_byte((BYTE *)dst);
|
||||
if (*pCardByte != 0xFF)
|
||||
{
|
||||
*pCardByte = 0xFF;
|
||||
CheckAndMarkEscapeSatori((Object**)dst, OBJECTREFToObject(ref));
|
||||
|
||||
#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
|
||||
SetCardBundleByte((BYTE*)dst);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
//TODO: Satori
|
||||
// // if the dst is outside of the heap (unboxed value classes) then we
|
||||
// // simply exit
|
||||
// if (((BYTE*)dst < g_lowest_address) || ((BYTE*)dst >= g_highest_address))
|
||||
// return;
|
||||
//
|
||||
//#ifdef WRITE_BARRIER_CHECK
|
||||
// updateGCShadow((Object**) dst, OBJECTREFToObject(ref)); // support debugging write barrier
|
||||
//#endif
|
||||
//
|
||||
//#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
// if (GCHeapUtilities::SoftwareWriteWatchIsEnabled())
|
||||
// {
|
||||
// GCHeapUtilities::SoftwareWriteWatchSetDirty(dst, sizeof(*dst));
|
||||
// }
|
||||
//#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
//
|
||||
// if ((BYTE*) OBJECTREFToObject(ref) >= g_ephemeral_low && (BYTE*) OBJECTREFToObject(ref) < g_ephemeral_high)
|
||||
// {
|
||||
// // VolatileLoadWithoutBarrier() is used here to prevent fetch of g_card_table from being reordered
|
||||
// // with g_lowest/highest_address check above. See comment in StompWriteBarrier.
|
||||
// BYTE* pCardByte = (BYTE*)VolatileLoadWithoutBarrier(&g_card_table) + card_byte((BYTE *)dst);
|
||||
// if (*pCardByte != 0xFF)
|
||||
// {
|
||||
// *pCardByte = 0xFF;
|
||||
//
|
||||
//#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
|
||||
// SetCardBundleByte((BYTE*)dst);
|
||||
//#endif
|
||||
// }
|
||||
// }
|
||||
}
|
||||
#include <optdefault.h>
|
||||
|
||||
|
@ -1353,15 +1393,15 @@ void ErectWriteBarrierForMT(MethodTable **dst, MethodTable *ref)
|
|||
#pragma optimize("y", on) // Small critical routines, don't put in EBP frame
|
||||
#endif //_MSC_VER && TARGET_X86
|
||||
|
||||
void
|
||||
SetCardsAfterBulkCopy(Object **start, size_t len)
|
||||
{
|
||||
// If the size is smaller than a pointer, no write barrier is required.
|
||||
if (len >= sizeof(uintptr_t))
|
||||
{
|
||||
InlinedSetCardsAfterBulkCopyHelper(start, len);
|
||||
}
|
||||
}
|
||||
//void
|
||||
//SetCardsAfterBulkCopy(Object **start, size_t len)
|
||||
//{
|
||||
// // If the size is smaller than a pointer, no write barrier is required.
|
||||
// if (len >= sizeof(uintptr_t))
|
||||
// {
|
||||
// InlinedSetCardsAfterBulkCopyHelper(start, len);
|
||||
// }
|
||||
//}
|
||||
|
||||
#if defined(_MSC_VER) && defined(TARGET_X86)
|
||||
#pragma optimize("", on) // Go back to command line default optimizations
|
||||
|
|
|
@ -66,5 +66,8 @@ extern void ThrowOutOfMemoryDimensionsExceeded();
|
|||
//========================================================================
|
||||
|
||||
void ErectWriteBarrier(OBJECTREF* dst, OBJECTREF ref);
|
||||
void SetCardsAfterBulkCopy(Object **start, size_t len);
|
||||
bool IsInHeapSatori(Object** start);
|
||||
void CheckAndMarkEscapeSatori(Object** dst, Object* ref);
|
||||
|
||||
//void SetCardsAfterBulkCopy(Object **start, size_t len);
|
||||
#endif // _GCHELPERS_H_
|
||||
|
|
|
@ -19,90 +19,105 @@
|
|||
//========================================================================
|
||||
|
||||
#if defined(HOST_64BIT)
|
||||
static const int card_byte_shift = 11;
|
||||
static const int card_bundle_byte_shift = 21;
|
||||
static const int card_byte_shift = 11;
|
||||
static const int card_bundle_byte_shift = 21;
|
||||
#else
|
||||
static const int card_byte_shift = 10;
|
||||
|
||||
#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
|
||||
#error Manually managed card bundles are currently only implemented for AMD64.
|
||||
#endif
|
||||
#endif
|
||||
|
||||
FORCEINLINE void InlinedSetCardsAfterBulkCopyHelper(Object **start, size_t len)
|
||||
{
|
||||
// Check whether the writes were even into the heap. If not there's no card update required.
|
||||
// Also if the size is smaller than a pointer, no write barrier is required.
|
||||
_ASSERTE(len >= sizeof(uintptr_t));
|
||||
if ((BYTE*)start < g_lowest_address || (BYTE*)start >= g_highest_address)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't optimize the Generation 0 case if we are checking for write barrier violations
|
||||
// since we need to update the shadow heap even in the generation 0 case.
|
||||
#if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
|
||||
if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK)
|
||||
{
|
||||
for(unsigned i=0; i < len / sizeof(Object*); i++)
|
||||
{
|
||||
updateGCShadow(&start[i], start[i]);
|
||||
}
|
||||
}
|
||||
#endif //WRITE_BARRIER_CHECK && !SERVER_GC
|
||||
|
||||
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
if (GCHeapUtilities::SoftwareWriteWatchIsEnabled())
|
||||
{
|
||||
GCHeapUtilities::SoftwareWriteWatchSetDirtyRegion(start, len);
|
||||
}
|
||||
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
|
||||
size_t startAddress = (size_t)start;
|
||||
size_t endAddress = startAddress + len;
|
||||
size_t startingClump = startAddress >> card_byte_shift;
|
||||
size_t endingClump = (endAddress + (1 << card_byte_shift) - 1) >> card_byte_shift;
|
||||
|
||||
// calculate the number of clumps to mark (round_up(end) - start)
|
||||
size_t clumpCount = endingClump - startingClump;
|
||||
|
||||
// VolatileLoadWithoutBarrier() is used here to prevent fetch of g_card_table from being reordered
|
||||
// with g_lowest/highest_address check above. See comment in StompWriteBarrier.
|
||||
BYTE* card = (BYTE*)VolatileLoadWithoutBarrier(&g_card_table) + startingClump;
|
||||
|
||||
// Fill the cards. To avoid cache line thrashing we check whether the cards have already been set before
|
||||
// writing.
|
||||
do
|
||||
{
|
||||
if (*card != 0xff)
|
||||
{
|
||||
*card = 0xff;
|
||||
}
|
||||
|
||||
card++;
|
||||
clumpCount--;
|
||||
}
|
||||
while (clumpCount != 0);
|
||||
static const int card_byte_shift = 10;
|
||||
|
||||
#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
|
||||
size_t startBundleByte = startAddress >> card_bundle_byte_shift;
|
||||
size_t endBundleByte = (endAddress + (1 << card_bundle_byte_shift) - 1) >> card_bundle_byte_shift;
|
||||
size_t bundleByteCount = endBundleByte - startBundleByte;
|
||||
|
||||
uint8_t* pBundleByte = ((uint8_t*)VolatileLoadWithoutBarrier(&g_card_bundle_table)) + startBundleByte;
|
||||
|
||||
do
|
||||
{
|
||||
if (*pBundleByte != 0xFF)
|
||||
{
|
||||
*pBundleByte = 0xFF;
|
||||
}
|
||||
|
||||
pBundleByte++;
|
||||
bundleByteCount--;
|
||||
}
|
||||
while (bundleByteCount != 0);
|
||||
#error Manually managed card bundles are currently only implemented for AMD64.
|
||||
#endif
|
||||
#endif
|
||||
|
||||
const static int REGION_BITS = 21;
|
||||
const static size_t REGION_SIZE_GRANULARITY = 1 << REGION_BITS;
|
||||
|
||||
FORCEINLINE void InlinedSetCardsAfterBulkCopyHelper(Object** dst, Object* src, size_t len)
|
||||
{
|
||||
// TODO: VS dst can't be stack?
|
||||
_ASSERTE(IsInHeapSatori(dst));
|
||||
|
||||
for (int i = 0; i < len; i++)
|
||||
{
|
||||
CheckAndMarkEscapeSatori(&dst[i], &src[i]);
|
||||
}
|
||||
}
|
||||
|
||||
//TODO: Satori
|
||||
//FORCEINLINE void InlinedSetCardsAfterBulkCopyHelper(Object **start, size_t len)
|
||||
//{
|
||||
// // Check whether the writes were even into the heap. If not there's no card update required.
|
||||
// // Also if the size is smaller than a pointer, no write barrier is required.
|
||||
// _ASSERTE(len >= sizeof(uintptr_t));
|
||||
// if ((BYTE*)start < g_lowest_address || (BYTE*)start >= g_highest_address)
|
||||
// {
|
||||
// return;
|
||||
// }
|
||||
//
|
||||
// // Don't optimize the Generation 0 case if we are checking for write barrier violations
|
||||
// // since we need to update the shadow heap even in the generation 0 case.
|
||||
//#if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC)
|
||||
// if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK)
|
||||
// {
|
||||
// for(unsigned i=0; i < len / sizeof(Object*); i++)
|
||||
// {
|
||||
// updateGCShadow(&start[i], start[i]);
|
||||
// }
|
||||
// }
|
||||
//#endif //WRITE_BARRIER_CHECK && !SERVER_GC
|
||||
//
|
||||
//#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
// if (GCHeapUtilities::SoftwareWriteWatchIsEnabled())
|
||||
// {
|
||||
// GCHeapUtilities::SoftwareWriteWatchSetDirtyRegion(start, len);
|
||||
// }
|
||||
//#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
//
|
||||
// size_t startAddress = (size_t)start;
|
||||
// size_t endAddress = startAddress + len;
|
||||
// size_t startingClump = startAddress >> card_byte_shift;
|
||||
// size_t endingClump = (endAddress + (1 << card_byte_shift) - 1) >> card_byte_shift;
|
||||
//
|
||||
// // calculate the number of clumps to mark (round_up(end) - start)
|
||||
// size_t clumpCount = endingClump - startingClump;
|
||||
//
|
||||
// // VolatileLoadWithoutBarrier() is used here to prevent fetch of g_card_table from being reordered
|
||||
// // with g_lowest/highest_address check above. See comment in StompWriteBarrier.
|
||||
// BYTE* card = (BYTE*)VolatileLoadWithoutBarrier(&g_card_table) + startingClump;
|
||||
//
|
||||
// // Fill the cards. To avoid cache line thrashing we check whether the cards have already been set before
|
||||
// // writing.
|
||||
// do
|
||||
// {
|
||||
// if (*card != 0xff)
|
||||
// {
|
||||
// *card = 0xff;
|
||||
// }
|
||||
//
|
||||
// card++;
|
||||
// clumpCount--;
|
||||
// }
|
||||
// while (clumpCount != 0);
|
||||
//
|
||||
//#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
|
||||
// size_t startBundleByte = startAddress >> card_bundle_byte_shift;
|
||||
// size_t endBundleByte = (endAddress + (1 << card_bundle_byte_shift) - 1) >> card_bundle_byte_shift;
|
||||
// size_t bundleByteCount = endBundleByte - startBundleByte;
|
||||
//
|
||||
// uint8_t* pBundleByte = ((uint8_t*)VolatileLoadWithoutBarrier(&g_card_bundle_table)) + startBundleByte;
|
||||
//
|
||||
// do
|
||||
// {
|
||||
// if (*pBundleByte != 0xFF)
|
||||
// {
|
||||
// *pBundleByte = 0xFF;
|
||||
// }
|
||||
//
|
||||
// pBundleByte++;
|
||||
// bundleByteCount--;
|
||||
// }
|
||||
// while (bundleByteCount != 0);
|
||||
//#endif
|
||||
//}
|
||||
|
||||
#endif // !_GCHELPERS_INL_
|
||||
|
|
|
@ -264,6 +264,9 @@ public:
|
|||
#ifdef FEATURE_SVR_GC
|
||||
WRITE_BARRIER_SVR64,
|
||||
#endif // FEATURE_SVR_GC
|
||||
#ifdef FEATURE_SATORI_GC
|
||||
WRITE_BARRIER_SATORI,
|
||||
#endif // FEATURE_SATORI_GC
|
||||
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
|
||||
WRITE_BARRIER_WRITE_WATCH_PREGROW64,
|
||||
WRITE_BARRIER_WRITE_WATCH_POSTGROW64,
|
||||
|
|
|
@ -1305,8 +1305,9 @@ void* __cdecl GCSafeMemCpy(void * dest, const void * src, size_t len)
|
|||
STATIC_CONTRACT_GC_NOTRIGGER;
|
||||
STATIC_CONTRACT_FORBID_FAULT;
|
||||
|
||||
if (!(((*(BYTE**)&dest) < g_lowest_address ) ||
|
||||
((*(BYTE**)&dest) >= g_highest_address)))
|
||||
if (IsInHeapSatori((Object**)dest))
|
||||
//if (!(((*(BYTE**)&dest) < g_lowest_address ) ||
|
||||
// ((*(BYTE**)&dest) >= g_highest_address)))
|
||||
{
|
||||
Thread* pThread = GetThread();
|
||||
|
||||
|
|
|
@ -3973,8 +3973,15 @@ public:
|
|||
if((val & ~3) != (size_t) ref || (val & 3) != 1)
|
||||
return(true);
|
||||
// If the pointer lives in the GC heap, than it is protected, and thus valid.
|
||||
if (dac_cast<TADDR>(g_lowest_address) <= val && val < dac_cast<TADDR>(g_highest_address))
|
||||
return(true);
|
||||
//if (dac_cast<TADDR>(g_lowest_address) <= val && val < dac_cast<TADDR>(g_highest_address))
|
||||
// return(true);
|
||||
|
||||
// TODO: Satori
|
||||
if (IsInHeapSatori((Object**)val))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
return(false);
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue