1
0
Fork 0
mirror of https://github.com/VSadov/Satori.git synced 2025-06-10 10:00:57 +09:00

Merge branch 'main' into Net8ChangesSecondAttempt

This commit is contained in:
Viktor Hofer 2022-11-29 15:21:43 +01:00 committed by GitHub
commit b0c8d0b03e
Signed by: github
GPG key ID: 4AEE18F83AFDEB23
433 changed files with 7636 additions and 6682 deletions

View file

@ -12088,6 +12088,17 @@
}
]
},
{
"operator": "not",
"operands": [
{
"name": "hasLabel",
"parameters": {
"label": "area-System.IO.Ports"
}
}
]
},
{
"operator": "not",
"operands": [
@ -12257,6 +12268,12 @@
"label": "area-System.DateTime"
}
},
{
"name": "hasLabel",
"parameters": {
"label": "area-System.IO.Ports"
}
},
{
"name": "hasLabel",
"parameters": {
@ -12368,6 +12385,12 @@
"label": "area-System.DateTime"
}
},
{
"name": "labelAdded",
"parameters": {
"label": "area-System.IO.Ports"
}
},
{
"name": "labelAdded",
"parameters": {
@ -12524,6 +12547,12 @@
"label": "area-System.DateTime"
}
},
{
"name": "hasLabel",
"parameters": {
"label": "area-System.IO.Ports"
}
},
{
"name": "hasLabel",
"parameters": {
@ -13452,6 +13481,17 @@
}
]
},
{
"operator": "not",
"operands": [
{
"name": "hasLabel",
"parameters": {
"label": "area-System.IO.Ports"
}
}
]
},
{
"operator": "not",
"operands": [
@ -13603,6 +13643,12 @@
"label": "area-System.DateTime"
}
},
{
"name": "hasLabel",
"parameters": {
"label": "area-System.IO.Ports"
}
},
{
"name": "hasLabel",
"parameters": {
@ -13866,6 +13912,12 @@
"label": "area-System.DateTime"
}
},
{
"name": "hasLabel",
"parameters": {
"label": "area-System.IO.Ports"
}
},
{
"name": "hasLabel",
"parameters": {
@ -14064,6 +14116,12 @@
"label": "area-System.DateTime"
}
},
{
"name": "hasLabel",
"parameters": {
"label": "area-System.IO.Ports"
}
},
{
"name": "hasLabel",
"parameters": {
@ -14254,6 +14312,12 @@
"label": "area-System.DateTime"
}
},
{
"name": "hasLabel",
"parameters": {
"label": "area-System.IO.Ports"
}
},
{
"name": "hasLabel",
"parameters": {
@ -14444,6 +14508,12 @@
"label": "area-System.DateTime"
}
},
{
"name": "hasLabel",
"parameters": {
"label": "area-System.IO.Ports"
}
},
{
"name": "hasLabel",
"parameters": {
@ -14634,6 +14704,12 @@
"label": "area-System.DateTime"
}
},
{
"name": "hasLabel",
"parameters": {
"label": "area-System.IO.Ports"
}
},
{
"name": "hasLabel",
"parameters": {

View file

@ -708,7 +708,7 @@ License for fastmod (https://github.com/lemire/fastmod) and ibm-fpgen (https://g
License for sse4-strstr (https://github.com/WojciechMula/sse4-strstr)
--------------------------------------
Copyright (c) 2008-2016, Wojciech Muła
Copyright (c) 2008-2016, Wojciech Mula
All rights reserved.
Redistribution and use in source and binary forms, with or without

View file

@ -1007,6 +1007,20 @@
],
"label": "area-System.IO.Pipelines"
},
{
"lead": "jeffhandley",
"pod": "eirik-krzysztof-layomi-tarek",
"owners": [
"eiriktsarpalis",
"ericstj",
"jeffhandley",
"krwq",
"layomia",
"tarekgh",
"dotnet/area-system-io-ports"
],
"label": "area-System.IO.Ports"
},
{
"lead": "jeffhandley",
"pod": "eirik-krzysztof-layomi-tarek",

View file

@ -97,6 +97,7 @@ Note: Editing this file doesn't update the mapping used by `@msftbot` for area-s
| area-System.IO.Compression | @jeffhandley | @dotnet/area-system-io-compression | Included:<ul><li>System.Formats.Tar</li><li>System.IO.Packaging</li></ul> |
| area-System.IO.Hashing | @jeffhandley | @dotnet/area-system-io-hashing | APIs within the System.IO.Hashing namespace, which align more with cryptography than with I/O |
| area-System.IO.Pipelines | @adityamandaleeka | @davidfowl @halter73 | |
| area-System.IO.Ports | @jeffhandley | @dotnet/area-system-io-ports | |
| area-System.Linq | @jeffhandley | @dotnet/area-system-linq | |
| area-System.Linq.Expressions | @jaredpar | @cston @333fred | Archived component - limited churn/contributions (see [#27790](https://github.com/dotnet/runtime/issues/27790)) |
| area-System.Linq.Parallel | @jeffhandley | @dotnet/area-system-linq-parallel | Consultants: @stephentoub @kouvel |

View file

@ -28,7 +28,7 @@ Such transport packages represent the set of libraries which are produced in dot
To add a library to the target's shared framework, that library should be listed in the `AspNetCoreAppLibrary` or `WindowsDesktopAppLibrary` section in `NetCoreAppLibrary.props`.
Source generators and analyzers can be included in the package by adding them to the `Microsoft.Internal.Runtime.**TARGET**.Transport.proj` as an AnalyzerReference. The analyzer projects should specify `AnalyzerLanguage` as mentioned [below](#analyzers--source-generators).
Source generators and analyzers can be included in the package by adding them to the `Microsoft.Internal.Runtime.**TARGET**.Transport.proj` as a ProjectReference with the `ReferenceOutputAssembly=false` and `PackAsAnalyzer=true` metadata set. The analyzer projects should specify `AnalyzerLanguage` as mentioned [below](#analyzers--source-generators).
Libraries included in this transport package should ensure all direct and transitive assembly references are also included in either the target's shared framework or the Microsoft.NETCore.App shared framework. This is not validated in dotnet/runtime at the moment: https://github.com/dotnet/runtime/issues/52562
@ -70,10 +70,13 @@ Build props and targets may be needed in NuGet packages. To define these, author
Some packages may wish to include a companion analyzer or source-generator with their library. Analyzers are much different from normal library contributors: their dependencies shouldn't be treated as nuget package dependencies, their TargetFramework isn't applicable to the project they are consumed in (since they run in the compiler). To facilitate this, we've defined some common infrastructure for packaging Analyzers.
To include an analyzer in a package, simply add an `AnalyzerReference` item to the project that produces the package that should contain the analyzer and set the `Pack` metadata to true. If you just want to include the analyzer but not consume it, set the `ReferenceAnalyzer` metadata to false.
To include an analyzer in a package, simply add a `ProjectReference` item to the project that produces the package that should contain the analyzer and set the `ReferenceOutputAssembly` metadata to false and the `PackAsAnalyzer` metadata to true. If you also want to consume the analyzer, set the `OutputItemType` metadata to `Analyzer`.
```xml
<ItemGroup>
<AnalyzerReference Include="..\gen\System.Banana.Generators.csproj" Pack="true" ReferenceAnalyzer="false" />
<!-- Includes the analyzer in the package without consuming it. -->
<ProjectReference Include="..\gen\System.Banana.Generators.csproj" ReferenceOutputAssembly="false" PackAsAnalyzer="true" />
<!-- Includes the analyzer in the package and consumes it. -->
<ProjectReference Include="..\gen\System.Banana.Generators.csproj" ReferenceOutputAssembly="false" OutputItemType="Analyzer" PackAsAnalyzer="true" />
</ItemGroup>
```

View file

@ -188,7 +188,7 @@ All test outputs should be under
## gen
In the gen directory any source generator related to the assembly should exist. This does not mean the source generator is only used for that assembly only that it is conceptually apart of that assembly. For example, the assembly may provide attributes or low-level types the source generator uses.
To consume a source generator, simply add an `<AnalyzerReference Include="..." />` item to the project, usually next to the `References` and `ProjectReferences` items.
To consume a source generator, simply add a `<ProjectReference Include="..." ReferenceOutputAssembly="false" OutputItemType="Analyzer" />` item to the project, usually next to the `Reference` and `ProjectReference` items.
## Facades
Facade are unique in that they don't have any code and instead are generated by finding a contract reference assembly with the matching identity and generating type forwards for all the types to where they live in the implementation assemblies (aka facade seeds). There are also partial facades which contain some type forwards as well as some code definitions. All the various build configurations should be contained in the one csproj file per library.

View file

@ -43,7 +43,7 @@ Install the following packages for the toolchain:
* ninja-build (optional, enables building native code with ninja instead of make)
```bash
sudo apt install -y cmake llvm lld clang build-essential
sudo apt install -y cmake llvm lld clang build-essential \
python-is-python3 curl git lldb libicu-dev liblttng-ust-dev \
libssl-dev libnuma-dev libkrb5-dev zlib1g-dev ninja-build
```

View file

@ -84,13 +84,13 @@
<Uri>https://github.com/dotnet/command-line-api</Uri>
<Sha>5618b2d243ccdeb5c7e50a298b33b13036b4351b</Sha>
</Dependency>
<Dependency Name="Microsoft.DotNet.Cecil" Version="0.11.4-alpha.22524.1">
<Dependency Name="Microsoft.DotNet.Cecil" Version="0.11.4-alpha.22571.1">
<Uri>https://github.com/dotnet/cecil</Uri>
<Sha>4a51257b6ac207cb7b0a51b34bfb3eab5d0dfae8</Sha>
<Sha>60a4b756026f3ff7164f47aa09ece9e7c4e0ffca</Sha>
</Dependency>
<Dependency Name="Microsoft.DotNet.Cecil.Pdb" Version="0.11.4-alpha.22524.1">
<Dependency Name="Microsoft.DotNet.Cecil.Pdb" Version="0.11.4-alpha.22571.1">
<Uri>https://github.com/dotnet/cecil</Uri>
<Sha>4a51257b6ac207cb7b0a51b34bfb3eab5d0dfae8</Sha>
<Sha>60a4b756026f3ff7164f47aa09ece9e7c4e0ffca</Sha>
</Dependency>
</ProductDependencies>
<ToolsetDependencies>

View file

@ -120,14 +120,13 @@
<SystemBuffersVersion>4.5.1</SystemBuffersVersion>
<SystemCollectionsImmutableVersion>6.0.0</SystemCollectionsImmutableVersion>
<SystemComponentModelAnnotationsVersion>5.0.0</SystemComponentModelAnnotationsVersion>
<SystemDataSqlClientVersion>4.8.3</SystemDataSqlClientVersion>
<SystemDataSqlClientVersion>4.8.5</SystemDataSqlClientVersion>
<SystemDataDataSetExtensionsVersion>4.5.0</SystemDataDataSetExtensionsVersion>
<SystemIOFileSystemAccessControlVersion>5.0.0</SystemIOFileSystemAccessControlVersion>
<SystemIOPipesAccessControlVersion>5.0.0</SystemIOPipesAccessControlVersion>
<SystemMemoryVersion>4.5.5</SystemMemoryVersion>
<SystemNumericsVectorsVersion>4.5.0</SystemNumericsVectorsVersion>
<SystemReflectionMetadataVersion>6.0.1</SystemReflectionMetadataVersion>
<SystemReflectionMetadataLoadContextVersion>6.0.0</SystemReflectionMetadataLoadContextVersion>
<SystemReflectionEmitVersion>4.7.0</SystemReflectionEmitVersion>
<SystemReflectionEmitILGenerationVersion>4.7.0</SystemReflectionEmitILGenerationVersion>
<SystemReflectionEmitLightweightVersion>4.7.0</SystemReflectionEmitLightweightVersion>
@ -175,7 +174,7 @@
<MicrosoftBuildVersion>17.3.2</MicrosoftBuildVersion>
<MicrosoftBuildTasksCoreVersion>$(MicrosoftBuildVersion)</MicrosoftBuildTasksCoreVersion>
<NugetProjectModelVersion>6.2.2</NugetProjectModelVersion>
<NugetPackagingVersion>6.2.1</NugetPackagingVersion>
<NugetPackagingVersion>6.2.2</NugetPackagingVersion>
<!-- Testing -->
<MicrosoftNETCoreCoreDisToolsVersion>1.1.0</MicrosoftNETCoreCoreDisToolsVersion>
<MicrosoftNETTestSdkVersion>17.4.0-preview-20220707-01</MicrosoftNETTestSdkVersion>
@ -210,8 +209,8 @@
<MicrosoftNETILLinkTasksVersion>7.0.100-1.22552.1</MicrosoftNETILLinkTasksVersion>
<MicrosoftNETILLinkAnalyzerPackageVersion>$(MicrosoftNETILLinkTasksVersion)</MicrosoftNETILLinkAnalyzerPackageVersion>
<!-- Mono Cecil -->
<MicrosoftDotNetCecilVersion>0.11.4-alpha.22524.1</MicrosoftDotNetCecilVersion>
<MicrosoftDotNetCecilPdbVersion>$(MicrosoftDotNetCecilVersion)</MicrosoftDotNetCecilPdbVersion>
<MicrosoftDotNetCecilVersion>0.11.4-alpha.22571.1</MicrosoftDotNetCecilVersion>
<MicrosoftDotNetCecilPdbVersion>0.11.4-alpha.22571.1</MicrosoftDotNetCecilPdbVersion>
<!-- ILLink dependencies -->
<MicrosoftBuildFrameworkVersion>17.0.0-preview-21267-01</MicrosoftBuildFrameworkVersion>
<MicrosoftBuildUtilitiesCoreVersion>17.0.0-preview-21267-01</MicrosoftBuildUtilitiesCoreVersion>

View file

@ -41,18 +41,10 @@
That is required as the EnabledGenerators condition checks on the Reference and ProjectReference items and hence can't be a property condition. -->
<ItemGroup Condition="'@(EnabledGenerators)' != '' and
@(EnabledGenerators->AnyHaveMetadataValue('Identity', 'LibraryImportGenerator'))">
<AnalyzerReference Include="$(LibrariesProjectRoot)System.Runtime.InteropServices\gen\LibraryImportGenerator\LibraryImportGenerator.csproj;
$(LibrariesProjectRoot)System.Runtime.InteropServices\gen\Microsoft.Interop.SourceGeneration\Microsoft.Interop.SourceGeneration.csproj" />
</ItemGroup>
<!-- AnalyzerReference items are transformed to ProjectReferences with the required analyzer metadata. -->
<ItemGroup>
<ProjectReference Include="@(AnalyzerReference)"
<ProjectReference Include="$(LibrariesProjectRoot)System.Runtime.InteropServices\gen\LibraryImportGenerator\LibraryImportGenerator.csproj;
$(LibrariesProjectRoot)System.Runtime.InteropServices\gen\Microsoft.Interop.SourceGeneration\Microsoft.Interop.SourceGeneration.csproj"
ReferenceOutputAssembly="false"
OutputItemType="Analyzer"
Pack="false" />
<ProjectReference Update="@(AnalyzerReference->WithMetadataValue('ReferenceAnalyzer', 'false'))"
OutputItemType="" />
OutputItemType="Analyzer" />
</ItemGroup>
<Target Name="ConfigureGenerators"

View file

@ -136,8 +136,8 @@
These will be returned as items with identity pointing to the built file, and PackagePath metadata
set to their location in the package. IsSymbol metadata will be set to distinguish symbols. -->
<Target Name="IncludeAnalyzersInPackage"
Condition="'@(AnalyzerReference)' != '' and @(AnalyzerReference->AnyHaveMetadataValue('Pack', 'true'))">
<MSBuild Projects="@(AnalyzerReference->WithMetadataValue('Pack', 'true'))"
Condition="'@(ProjectReference)' != '' and @(ProjectReference->AnyHaveMetadataValue('PackAsAnalyzer', 'true'))">
<MSBuild Projects="@(ProjectReference->WithMetadataValue('PackAsAnalyzer', 'true'))"
Targets="GetAnalyzerPackFiles">
<Output TaskParameter="TargetOutputs" ItemName="_AnalyzerFile" />
</MSBuild>
@ -161,8 +161,8 @@
<!-- In packages that contain Analyzers, include a .targets file that will select the correct analyzer. -->
<Target Name="IncludeMultiTargetRoslynComponentTargetsInPackage"
AfterTargets="IncludeAnalyzersInPackage"
Condition="'@(AnalyzerReference)' != '' and
@(AnalyzerReference->AnyHaveMetadataValue('Pack', 'true')) and
Condition="'@(ProjectReference)' != '' and
@(ProjectReference->AnyHaveMetadataValue('PackAsAnalyzer', 'true')) and
'$(IncludeMultiTargetRoslynComponentTargets)' == 'true'"
DependsOnTargets="GenerateMultiTargetRoslynComponentTargetsFile">
<ItemGroup>

View file

@ -39,10 +39,10 @@ jobs:
parameters:
${{ if eq(parameters.hostedOs, '') }}:
name: ${{ format('build_{0}{1}_{2}_{3}_{4}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig, parameters.nameSuffix) }}
displayName: ${{ format('Build {0}{1} {2} {3} {4} {5}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig, parameters.nameSuffix, parameters.runtimeVariant) }}
displayName: ${{ format('{0}{1} {2} {3} {4} {5}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig, parameters.nameSuffix, parameters.runtimeVariant) }}
${{ if ne(parameters.hostedOs, '') }}:
name: ${{ format('build_{0}{1}_{2}_{3}_{4}_{5}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.hostedOs, parameters.buildConfig, parameters.nameSuffix) }}
displayName: ${{ format('Build {0}{1} {2} {3} {4} {5} {6}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.hostedOs, parameters.buildConfig, parameters.nameSuffix, parameters.runtimeVariant) }}
displayName: ${{ format('{0}{1} {2} {3} {4} {5} {6}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.hostedOs, parameters.buildConfig, parameters.nameSuffix, parameters.runtimeVariant) }}
pool: ${{ parameters.pool }}
container: ${{ parameters.container }}
condition: and(succeeded(), ${{ parameters.condition }})

View file

@ -75,6 +75,11 @@ jobs:
crossBuild: ${{ parameters.crossBuild }}
gatherAssetManifests: true
# Component governance does not work on musl machines
${{ if eq(parameters.osSubGroup, '_musl') }}:
disableComponentGovernance: true
variables:
- ${{ each variable in parameters.variables }}:
- ${{ variable }}

View file

@ -22,6 +22,35 @@
<FirefoxStampFile>$([MSBuild]::NormalizePath($(FirefoxDir), '.install-firefox-$(FirefoxRevision).stamp'))</FirefoxStampFile>
</PropertyGroup>
<!--
We use https://omahaproxy.appspot.com/all.json to get details about the
latest stable chrome versions. The `branch_base_position` field in that is
used to locate closest snapshots that can be installed for testing.
But this `branch_base_position` seems to be incorrect sometimes, and can
cause failures like:
`Could not find a chrome snapshot folder under
https://storage.googleapis.com/chromium-browser-snapshots/Win_x64, for
branch positions 1202 to 1232, for version 107.0.5304.122`
For now, use the last branch position from the last working stable
version - `107.0.5304.110`, till we find a better way to do this.
Refer to `GetChromeVersions` task in `src/tasks` to see how we find
these snapshot urls.
-->
<PropertyGroup Label="Use specific version of chrome" Condition="$([MSBuild]::IsOSPlatform('linux'))">
<ChromeVersion>107.0.5304.110</ChromeVersion>
<ChromeRevision>1047731</ChromeRevision>
<_ChromeBaseSnapshotUrl>https://storage.googleapis.com/chromium-browser-snapshots/Linux_x64/1047731</_ChromeBaseSnapshotUrl>
</PropertyGroup>
<PropertyGroup Label="Use specific version of chrome" Condition="$([MSBuild]::IsOSPlatform('windows'))">
<ChromeVersion>107.0.5304.107</ChromeVersion>
<ChromeRevision>1047731</ChromeRevision>
<_ChromeBaseSnapshotUrl>https://storage.googleapis.com/chromium-browser-snapshots/Win_x64/1047737</_ChromeBaseSnapshotUrl>
</PropertyGroup>
<PropertyGroup Condition="'$(BrowserHost)' != 'windows'">
<FirefoxRevision>97.0.1</FirefoxRevision>
<FirefoxUrl>https://ftp.mozilla.org/pub/firefox/releases/$(FirefoxRevision)/linux-x86_64/en-US/firefox-$(FirefoxRevision).tar.bz2</FirefoxUrl>

View file

@ -72,7 +72,7 @@
</PropertyGroup>
<PropertyGroup>
<WorkItemTimeout>2:30</WorkItemTimeout>
<WorkItemTimeout>6:00</WorkItemTimeout>
<WorkItemTimeout Condition="'$(OnlySanityCheck)' == 'true'">1:30</WorkItemTimeout>
</PropertyGroup>

View file

@ -100,6 +100,7 @@
<Target Name="GetChromeVersion" AfterTargets="Build" Condition="'$(InstallChromeForTests)' == 'true'">
<GetChromeVersions
Condition="'$(ChromeVersion)' == ''"
OSIdentifier="$(ChromeOSIdentifier)"
OSPrefix="$(_ChromeOSPrefix)"
Channel="$(ChromeChannel)"

View file

@ -614,9 +614,6 @@ namespace System
if (attrib == null || attrib.Length == 0)
return null;
if (attrib.Length == 0)
return null;
if (attrib.Length == 1)
return attrib[0];

View file

@ -677,7 +677,7 @@ namespace System
/// If pinned is set to true, <typeparamref name="T"/> must not be a reference type or a type that contains object references.
/// </remarks>
[MethodImpl(MethodImplOptions.AggressiveInlining)] // forced to ensure no perf drop for small memory buffers (hot path)
public static T[] AllocateUninitializedArray<T>(int length, bool pinned = false) // T[] rather than T?[] to match `new T[length]` behavior
public static unsafe T[] AllocateUninitializedArray<T>(int length, bool pinned = false) // T[] rather than T?[] to match `new T[length]` behavior
{
if (!pinned)
{
@ -689,10 +689,13 @@ namespace System
// for debug builds we always want to call AllocateNewArray to detect AllocateNewArray bugs
#if !DEBUG
// small arrays are allocated using `new[]` as that is generally faster.
if (length < 2048 / Unsafe.SizeOf<T>())
#pragma warning disable 8500 // sizeof of managed types
if (length < 2048 / sizeof(T))
#pragma warning restore 8500
{
return new T[length];
}
#endif
}
else if (RuntimeHelpers.IsReferenceOrContainsReferences<T>())

View file

@ -237,13 +237,12 @@ namespace System.Runtime
[MethodImpl(MethodImplOptions.InternalCall)]
private static extern object? InternalGetTarget(IntPtr dependentHandle);
#else
private static unsafe object? InternalGetTarget(IntPtr dependentHandle)
{
// This optimization is the same that is used in GCHandle in RELEASE mode.
// This is not used in DEBUG builds as the runtime performs additional checks.
// The logic below is the inlined copy of ObjectFromHandle in the unmanaged runtime.
return Unsafe.As<IntPtr, object>(ref *(IntPtr*)(nint)dependentHandle);
}
#pragma warning disable 8500 // address of managed types
private static unsafe object? InternalGetTarget(IntPtr dependentHandle) => *(object*)dependentHandle;
#pragma warning restore 8500
#endif
[MethodImpl(MethodImplOptions.InternalCall)]

View file

@ -18,8 +18,9 @@ namespace System.Runtime.InteropServices
[MethodImpl(MethodImplOptions.InternalCall)]
internal static extern object? InternalGet(IntPtr handle);
#else
internal static unsafe object? InternalGet(IntPtr handle) =>
Unsafe.As<IntPtr, object>(ref *(IntPtr*)(nint)handle);
#pragma warning disable 8500 // address of managed types
internal static unsafe object? InternalGet(IntPtr handle) => *(object*)handle;
#pragma warning restore 8500
#endif
[MethodImpl(MethodImplOptions.InternalCall)]

View file

@ -24169,12 +24169,17 @@ BOOL ref_p (uint8_t* r)
return (straight_ref_p (r) || partial_object_p (r));
}
mark_queue_t::mark_queue_t() : curr_slot_index(0)
mark_queue_t::mark_queue_t()
#ifdef MARK_PHASE_PREFETCH
: curr_slot_index(0)
#endif //MARK_PHASE_PREFETCH
{
#ifdef MARK_PHASE_PREFETCH
for (size_t i = 0; i < slot_count; i++)
{
slot_table[i] = nullptr;
}
#endif //MARK_PHASE_PREFETCH
}
// place an object in the mark queue
@ -24184,6 +24189,7 @@ mark_queue_t::mark_queue_t() : curr_slot_index(0)
FORCEINLINE
uint8_t *mark_queue_t::queue_mark(uint8_t *o)
{
#ifdef MARK_PHASE_PREFETCH
Prefetch (o);
// while the prefetch is taking effect, park our object in the queue
@ -24196,6 +24202,9 @@ uint8_t *mark_queue_t::queue_mark(uint8_t *o)
curr_slot_index = (slot_index + 1) % slot_count;
if (old_o == nullptr)
return nullptr;
#else //MARK_PHASE_PREFETCH
uint8_t* old_o = o;
#endif //MARK_PHASE_PREFETCH
// this causes us to access the method table pointer of the old object
BOOL already_marked = marked (old_o);
@ -24247,6 +24256,7 @@ uint8_t *mark_queue_t::queue_mark(uint8_t *o, int condemned_gen)
// returns nullptr if there is no such object
uint8_t* mark_queue_t::get_next_marked()
{
#ifdef MARK_PHASE_PREFETCH
size_t slot_index = curr_slot_index;
size_t empty_slot_count = 0;
while (empty_slot_count < slot_count)
@ -24266,15 +24276,18 @@ uint8_t* mark_queue_t::get_next_marked()
}
empty_slot_count++;
}
#endif //MARK_PHASE_PREFETCH
return nullptr;
}
void mark_queue_t::verify_empty()
{
#ifdef MARK_PHASE_PREFETCH
for (size_t slot_index = 0; slot_index < slot_count; slot_index++)
{
assert(slot_table[slot_index] == nullptr);
}
#endif //MARK_PHASE_PREFETCH
}
void gc_heap::mark_object_simple1 (uint8_t* oo, uint8_t* start THREAD_NUMBER_DCL)
@ -25971,6 +25984,7 @@ BOOL gc_heap::process_mark_overflow(int condemned_gen_number)
BOOL overflow_p = FALSE;
recheck:
drain_mark_queue();
if ((! (max_overflow_address == 0) ||
! (min_overflow_address == MAX_PTR)))
{
@ -26235,7 +26249,8 @@ void gc_heap::scan_dependent_handles (int condemned_gen_number, ScanContext *sc,
if (process_mark_overflow(condemned_gen_number))
fUnscannedPromotions = true;
drain_mark_queue();
// mark queue must be empty after process_mark_overflow
mark_queue.verify_empty();
// Perform the scan and set the flag if any promotions resulted.
if (GCScan::GcDhReScan(sc))
@ -26866,7 +26881,9 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
// handle table has been fully promoted.
GCScan::GcDhInitialScan(GCHeap::Promote, condemned_gen_number, max_generation, &sc);
scan_dependent_handles(condemned_gen_number, &sc, true);
drain_mark_queue();
// mark queue must be empty after scan_dependent_handles
mark_queue.verify_empty();
fire_mark_event (ETW::GC_ROOT_DH_HANDLES, current_promoted_bytes, last_promoted_bytes);
#ifdef MULTIPLE_HEAPS
@ -26956,7 +26973,9 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
// Scan dependent handles again to promote any secondaries associated with primaries that were promoted
// for finalization. As before scan_dependent_handles will also process any mark stack overflow.
scan_dependent_handles(condemned_gen_number, &sc, false);
drain_mark_queue();
// mark queue must be empty after scan_dependent_handles
mark_queue.verify_empty();
fire_mark_event (ETW::GC_ROOT_DH_HANDLES, current_promoted_bytes, last_promoted_bytes);
#endif //FEATURE_PREMORTEM_FINALIZATION

View file

@ -62,6 +62,7 @@ inline void FATAL_GC_ERROR()
// + creates some ro segs
// We can add more mechanisms here.
//#define STRESS_REGIONS
#define MARK_PHASE_PREFETCH
#endif //USE_REGIONS
// FEATURE_STRUCTALIGN was added by Midori. In CLR we are not interested
@ -1222,9 +1223,11 @@ enum bookkeeping_element
class mark_queue_t
{
#ifdef MARK_PHASE_PREFETCH
static const size_t slot_count = 16;
uint8_t* slot_table[slot_count];
size_t curr_slot_index;
#endif //MARK_PHASE_PREFETCH
public:
mark_queue_t();

View file

@ -3212,6 +3212,7 @@ public:
CORINFO_FIELD_HANDLE field,
uint8_t *buffer,
int bufferSize,
int valueOffset = 0,
bool ignoreMovableObjects = true
) = 0;

View file

@ -625,6 +625,7 @@ bool getReadonlyStaticFieldValue(
CORINFO_FIELD_HANDLE field,
uint8_t* buffer,
int bufferSize,
int valueOffset,
bool ignoreMovableObjects) override;
CORINFO_CLASS_HANDLE getStaticFieldCurrentClass(

View file

@ -43,11 +43,11 @@ typedef const GUID *LPCGUID;
#define GUID_DEFINED
#endif // !GUID_DEFINED
constexpr GUID JITEEVersionIdentifier = { /* da097b39-7f43-458a-990a-0b65406d5ff3 */
0xda097b39,
0x7f43,
0x458a,
{0x99, 0xa, 0xb, 0x65, 0x40, 0x6d, 0x5f, 0xf3}
constexpr GUID JITEEVersionIdentifier = { /* 0330a175-dd05-4760-840f-a1a4c47284d3 */
0x330a175,
0xdd05,
0x4760,
{0x84, 0xf, 0xa1, 0xa4, 0xc4, 0x72, 0x84, 0xd3}
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////

View file

@ -1488,10 +1488,11 @@ bool WrapICorJitInfo::getReadonlyStaticFieldValue(
CORINFO_FIELD_HANDLE field,
uint8_t* buffer,
int bufferSize,
int valueOffset,
bool ignoreMovableObjects)
{
API_ENTER(getReadonlyStaticFieldValue);
bool temp = wrapHnd->getReadonlyStaticFieldValue(field, buffer, bufferSize, ignoreMovableObjects);
bool temp = wrapHnd->getReadonlyStaticFieldValue(field, buffer, bufferSize, valueOffset, ignoreMovableObjects);
API_LEAVE(getReadonlyStaticFieldValue);
return temp;
}

View file

@ -1141,12 +1141,12 @@ void Compiler::optPrintAssertion(AssertionDsc* curAssertion, AssertionIndex asse
if (curAssertion->op1.kind == O1K_EXACT_TYPE)
{
printf("Exact Type MT(%08X)", dspPtr(curAssertion->op2.u1.iconVal));
assert(curAssertion->op2.u1.iconFlags != GTF_EMPTY);
assert(curAssertion->op2.HasIconFlag());
}
else if (curAssertion->op1.kind == O1K_SUBTYPE)
{
printf("MT(%08X)", dspPtr(curAssertion->op2.u1.iconVal));
assert(curAssertion->op2.u1.iconFlags != GTF_EMPTY);
assert(curAssertion->op2.HasIconFlag());
}
else if ((curAssertion->op1.kind == O1K_BOUND_OPER_BND) ||
(curAssertion->op1.kind == O1K_BOUND_LOOP_BND) ||
@ -1183,7 +1183,7 @@ void Compiler::optPrintAssertion(AssertionDsc* curAssertion, AssertionIndex asse
}
else
{
if ((curAssertion->op2.u1.iconFlags & GTF_ICON_HDL_MASK) != 0)
if (curAssertion->op2.HasIconFlag())
{
printf("[%08p]", dspPtr(curAssertion->op2.u1.iconVal));
}
@ -1500,7 +1500,7 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1,
assertion.op2.kind = O2K_CONST_INT;
assertion.op2.vn = ValueNumStore::VNForNull();
assertion.op2.u1.iconVal = 0;
assertion.op2.u1.iconFlags = GTF_EMPTY;
assertion.op2.SetIconFlag(GTF_EMPTY);
}
//
// Are we making an assertion about a local variable?
@ -1554,7 +1554,7 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1,
assertion.op1.lcl.ssaNum = op1->AsLclVarCommon()->GetSsaNum();
assertion.op2.u1.iconVal = op2->AsIntCon()->gtIconVal;
assertion.op2.vn = optConservativeNormalVN(op2);
assertion.op2.u1.iconFlags = op2->GetIconHandleFlag();
assertion.op2.SetIconFlag(op2->GetIconHandleFlag());
//
// Ok everything has been set and the assertion looks good
@ -1643,7 +1643,7 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1,
#endif // TARGET_ARM
assertion.op2.u1.iconVal = iconVal;
assertion.op2.u1.iconFlags = op2->GetIconHandleFlag();
assertion.op2.SetIconFlag(op2->GetIconHandleFlag(), op2->AsIntCon()->gtFieldSeq);
}
else if (op2->gtOper == GT_CNS_LNG)
{
@ -1792,7 +1792,7 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1,
/* iconFlags should only contain bits in GTF_ICON_HDL_MASK */
assert((iconFlags & ~GTF_ICON_HDL_MASK) == 0);
assertion.op2.u1.iconFlags = iconFlags;
assertion.op2.SetIconFlag(iconFlags);
}
// JIT case
else if (optIsTreeKnownIntValue(!optLocalAssertionProp, op2, &cnsValue, &iconFlags))
@ -1804,7 +1804,7 @@ AssertionIndex Compiler::optCreateAssertion(GenTree* op1,
/* iconFlags should only contain bits in GTF_ICON_HDL_MASK */
assert((iconFlags & ~GTF_ICON_HDL_MASK) == 0);
assertion.op2.u1.iconFlags = iconFlags;
assertion.op2.SetIconFlag(iconFlags);
}
else
{
@ -2105,13 +2105,11 @@ void Compiler::optDebugCheckAssertion(AssertionDsc* assertion)
case O2K_CONST_INT:
{
// The only flags that can be set are those in the GTF_ICON_HDL_MASK.
assert((assertion->op2.u1.iconFlags & ~GTF_ICON_HDL_MASK) == 0);
switch (assertion->op1.kind)
{
case O1K_EXACT_TYPE:
case O1K_SUBTYPE:
assert(assertion->op2.u1.iconFlags != GTF_EMPTY);
assert(assertion->op2.HasIconFlag());
break;
case O1K_LCLVAR:
assert((lvaGetDesc(assertion->op1.lcl.lclNum)->lvType != TYP_REF) ||
@ -2130,7 +2128,7 @@ void Compiler::optDebugCheckAssertion(AssertionDsc* assertion)
{
// All handles should be represented by O2K_CONST_INT,
// so no handle bits should be set here.
assert((assertion->op2.u1.iconFlags & GTF_ICON_HDL_MASK) == 0);
assert(!assertion->op2.HasIconFlag());
}
break;
@ -2342,7 +2340,7 @@ AssertionInfo Compiler::optCreateJTrueBoundsAssertion(GenTree* tree)
dsc.op2.kind = O2K_CONST_INT;
dsc.op2.vn = vnStore->VNZeroForType(op2->TypeGet());
dsc.op2.u1.iconVal = 0;
dsc.op2.u1.iconFlags = GTF_EMPTY;
dsc.op2.SetIconFlag(GTF_EMPTY);
AssertionIndex index = optAddAssertion(&dsc);
optCreateComplementaryAssertion(index, nullptr, nullptr);
return index;
@ -2359,7 +2357,7 @@ AssertionInfo Compiler::optCreateJTrueBoundsAssertion(GenTree* tree)
dsc.op2.kind = O2K_CONST_INT;
dsc.op2.vn = vnStore->VNZeroForType(op2->TypeGet());
dsc.op2.u1.iconVal = 0;
dsc.op2.u1.iconFlags = GTF_EMPTY;
dsc.op2.SetIconFlag(GTF_EMPTY);
AssertionIndex index = optAddAssertion(&dsc);
optCreateComplementaryAssertion(index, nullptr, nullptr);
return index;
@ -2376,7 +2374,7 @@ AssertionInfo Compiler::optCreateJTrueBoundsAssertion(GenTree* tree)
dsc.op2.kind = O2K_CONST_INT;
dsc.op2.vn = vnStore->VNZeroForType(op2->TypeGet());
dsc.op2.u1.iconVal = 0;
dsc.op2.u1.iconFlags = GTF_EMPTY;
dsc.op2.SetIconFlag(GTF_EMPTY);
AssertionIndex index = optAddAssertion(&dsc);
optCreateComplementaryAssertion(index, nullptr, nullptr);
return index;
@ -2393,7 +2391,7 @@ AssertionInfo Compiler::optCreateJTrueBoundsAssertion(GenTree* tree)
dsc.op2.kind = O2K_CONST_INT;
dsc.op2.vn = vnStore->VNZeroForType(TYP_INT);
dsc.op2.u1.iconVal = 0;
dsc.op2.u1.iconFlags = GTF_EMPTY;
dsc.op2.SetIconFlag(GTF_EMPTY);
AssertionIndex index = optAddAssertion(&dsc);
optCreateComplementaryAssertion(index, nullptr, nullptr);
return index;
@ -2436,7 +2434,7 @@ AssertionInfo Compiler::optCreateJTrueBoundsAssertion(GenTree* tree)
dsc.op2.kind = O2K_CONST_INT;
dsc.op2.vn = vnStore->VNZeroForType(op2->TypeGet());
dsc.op2.u1.iconVal = 0;
dsc.op2.u1.iconFlags = GTF_EMPTY;
dsc.op2.SetIconFlag(GTF_EMPTY);
AssertionIndex index = optAddAssertion(&dsc);
optCreateComplementaryAssertion(index, nullptr, nullptr);
return index;
@ -2453,7 +2451,7 @@ AssertionInfo Compiler::optCreateJTrueBoundsAssertion(GenTree* tree)
dsc.op2.kind = O2K_CONST_INT;
dsc.op2.vn = vnStore->VNZeroForType(TYP_INT);
dsc.op2.u1.iconVal = 0;
dsc.op2.u1.iconFlags = GTF_EMPTY;
dsc.op2.SetIconFlag(GTF_EMPTY);
AssertionIndex index = optAddAssertion(&dsc);
optCreateComplementaryAssertion(index, nullptr, nullptr);
return index;
@ -2467,7 +2465,7 @@ AssertionInfo Compiler::optCreateJTrueBoundsAssertion(GenTree* tree)
dsc.op2.kind = O2K_CONST_INT;
dsc.op2.vn = vnStore->VNZeroForType(TYP_INT);
dsc.op2.u1.iconVal = 0;
dsc.op2.u1.iconFlags = GTF_EMPTY;
dsc.op2.SetIconFlag(GTF_EMPTY);
AssertionIndex index = optAddAssertion(&dsc);
optCreateComplementaryAssertion(index, nullptr, nullptr);
return index;
@ -2562,8 +2560,8 @@ AssertionInfo Compiler::optAssertionGenJtrue(GenTree* tree)
dsc.op1.bnd.vnLen = op1VN;
dsc.op2.vn = vnStore->VNConservativeNormalValue(op2->gtVNPair);
dsc.op2.kind = O2K_CONST_INT;
dsc.op2.u1.iconFlags = GTF_EMPTY;
dsc.op2.u1.iconVal = 0;
dsc.op2.SetIconFlag(GTF_EMPTY);
// when con is not zero, create an assertion on the arr.Length == con edge
// when con is zero, create an assertion on the arr.Length != 0 edge
@ -3338,6 +3336,7 @@ GenTree* Compiler::optConstantAssertionProp(AssertionDsc* curAssertion,
}
GenTree* newTree = tree;
bool propagateType = false;
// Update 'newTree' with the new value from our table
// Typically newTree == tree and we are updating the node in place
@ -3366,10 +3365,17 @@ GenTree* Compiler::optConstantAssertionProp(AssertionDsc* curAssertion,
case O2K_CONST_INT:
// Don't propagate handles if we need to report relocs.
if (opts.compReloc && ((curAssertion->op2.u1.iconFlags & GTF_ICON_HDL_MASK) != 0))
if (opts.compReloc && curAssertion->op2.HasIconFlag() && curAssertion->op2.u1.iconVal != 0)
{
if (curAssertion->op2.GetIconFlag() == GTF_ICON_STATIC_HDL)
{
propagateType = true;
}
else
{
return nullptr;
}
}
// We assume that we do not try to do assertion prop on mismatched
// accesses (note that we widen normalize-on-load local accesses
@ -3381,11 +3387,11 @@ GenTree* Compiler::optConstantAssertionProp(AssertionDsc* curAssertion,
assert(!varTypeIsSmall(tree) || (curAssertion->op2.u1.iconVal ==
optCastConstantSmall(curAssertion->op2.u1.iconVal, tree->TypeGet())));
if (curAssertion->op2.u1.iconFlags & GTF_ICON_HDL_MASK)
if (curAssertion->op2.HasIconFlag())
{
// Here we have to allocate a new 'large' node to replace the old one
newTree = gtNewIconHandleNode(curAssertion->op2.u1.iconVal,
curAssertion->op2.u1.iconFlags & GTF_ICON_HDL_MASK);
newTree = gtNewIconHandleNode(curAssertion->op2.u1.iconVal, curAssertion->op2.GetIconFlag(),
curAssertion->op2.u1.fieldSeq);
// Make sure we don't retype const gc handles to TYP_I_IMPL
// Although, it's possible for e.g. GTF_ICON_STATIC_HDL
@ -3396,6 +3402,11 @@ GenTree* Compiler::optConstantAssertionProp(AssertionDsc* curAssertion,
// Conservatively don't allow propagation of ICON TYP_REF into BYREF
return nullptr;
}
propagateType = true;
}
if (propagateType)
{
newTree->ChangeType(tree->TypeGet());
}
}

View file

@ -1107,7 +1107,11 @@ AGAIN:
if (op2->IsIntCnsFitsInI32() && (op2->gtType != TYP_REF) && FitsIn<INT32>(cns + op2->AsIntConCommon()->IconValue()))
{
// We should not be building address modes out of non-foldable constants
assert(op2->AsIntConCommon()->ImmedValCanBeFolded(compiler, addr->OperGet()));
if (!op2->AsIntConCommon()->ImmedValCanBeFolded(compiler, addr->OperGet()))
{
assert(compiler->opts.compReloc);
return false;
}
/* We're adding a constant */

View file

@ -5705,8 +5705,6 @@ private:
unsigned* indexOut,
unsigned* simdSizeOut,
bool ignoreUsedInSIMDIntrinsic = false);
GenTree* fgMorphFieldAssignToSimdSetElement(GenTree* tree);
GenTree* fgMorphFieldToSimdGetElement(GenTree* tree);
bool fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt);
void impMarkContiguousSIMDFieldAssignments(Statement* stmt);
@ -7058,7 +7056,7 @@ public:
O1K_COUNT
};
enum optOp2Kind
enum optOp2Kind : uint16_t
{
O2K_INVALID,
O2K_LCLVAR_COPY,
@ -7096,6 +7094,9 @@ public:
struct AssertionDscOp2
{
optOp2Kind kind; // a const or copy assignment
private:
uint16_t m_encodedIconFlags; // encoded icon gtFlags, don't use directly
public:
ValueNum vn;
struct IntVal
{
@ -7103,7 +7104,7 @@ public:
#if !defined(HOST_64BIT)
unsigned padding; // unused; ensures iconFlags does not overlap lconVal
#endif
GenTreeFlags iconFlags; // gtFlags
FieldSeq* fieldSeq;
};
union {
SsaVar lcl;
@ -7112,6 +7113,29 @@ public:
double dconVal;
IntegralRange u2;
};
bool HasIconFlag()
{
assert(m_encodedIconFlags <= 0xFF);
return m_encodedIconFlags != 0;
}
GenTreeFlags GetIconFlag()
{
// number of trailing zeros in GTF_ICON_HDL_MASK
const uint16_t iconMaskTzc = 24;
static_assert_no_msg((0xFF000000 == GTF_ICON_HDL_MASK) && (GTF_ICON_HDL_MASK >> iconMaskTzc) == 0xFF);
GenTreeFlags flags = (GenTreeFlags)(m_encodedIconFlags << iconMaskTzc);
assert((flags & ~GTF_ICON_HDL_MASK) == 0);
return flags;
}
void SetIconFlag(GenTreeFlags flags, FieldSeq* fieldSeq = nullptr)
{
const uint16_t iconMaskTzc = 24;
assert((flags & ~GTF_ICON_HDL_MASK) == 0);
m_encodedIconFlags = flags >> iconMaskTzc;
u1.fieldSeq = fieldSeq;
}
} op2;
bool IsCheckedBoundArithBound()
@ -7220,7 +7244,7 @@ public:
{
case O2K_IND_CNS_INT:
case O2K_CONST_INT:
return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.u1.iconFlags == that->op2.u1.iconFlags));
return ((op2.u1.iconVal == that->op2.u1.iconVal) && (op2.GetIconFlag() == that->op2.GetIconFlag()));
case O2K_CONST_LONG:
return (op2.lconVal == that->op2.lconVal);

View file

@ -17622,6 +17622,12 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeq** pFldSe
baseAddr = AsOp()->gtOp1;
fldSeq = AsOp()->gtOp2->AsIntCon()->gtFieldSeq;
offset = AsOp()->gtOp2->AsIntCon()->IconValue();
if ((fldSeq != nullptr) && (fldSeq->GetKind() == FieldSeq::FieldKind::SimpleStaticKnownAddress))
{
// fldSeq represents a known address (not a small offset) - bail out.
return false;
}
}
else
{
@ -17630,17 +17636,15 @@ bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeq** pFldSe
}
else if (IsIconHandle(GTF_ICON_STATIC_HDL))
{
baseAddr = this;
fldSeq = AsIntCon()->gtFieldSeq;
offset = AsIntCon()->IconValue();
assert((fldSeq == nullptr) || (fldSeq->GetKind() == FieldSeq::FieldKind::SimpleStaticKnownAddress));
}
else
{
return false;
}
assert(baseAddr != nullptr);
if (fldSeq == nullptr)
{
return false;

View file

@ -272,7 +272,8 @@ public:
{
Instance = 0, // An instance field.
SimpleStatic = 1, // Simple static field - the handle represents a unique location.
SharedStatic = 2, // Static field on a shared generic type: "Class<__Canon>.StaticField".
SimpleStaticKnownAddress = 2, // Simple static field - the handle represents a known location.
SharedStatic = 3, // Static field on a shared generic type: "Class<__Canon>.StaticField".
};
private:
@ -310,7 +311,8 @@ public:
bool IsStaticField() const
{
return (GetKind() == FieldKind::SimpleStatic) || (GetKind() == FieldKind::SharedStatic);
return (GetKind() == FieldKind::SimpleStatic) || (GetKind() == FieldKind::SharedStatic) ||
(GetKind() == FieldKind::SimpleStaticKnownAddress);
}
bool IsSharedStaticField() const

View file

@ -561,7 +561,10 @@ GenTree* Compiler::getArgForHWIntrinsic(var_types argType,
arg = impPopStack().val;
assert(varTypeIsArithmetic(arg->TypeGet()) || ((argType == TYP_BYREF) && arg->TypeIs(TYP_BYREF)));
assert(genActualType(arg->gtType) == genActualType(argType));
if (!impCheckImplicitArgumentCoercion(argType, arg->gtType))
{
BADCODE("the hwintrinsic argument has a type that can't be implicitly converted to the signature type");
}
}
return arg;

View file

@ -4381,12 +4381,16 @@ GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedT
}
else
{
bool hasConstAddr = (pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_ADDRESS) ||
bool hasKnownAddr = (pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_ADDRESS) ||
(pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_RVA_ADDRESS);
ssize_t offset;
if (hasConstAddr)
if (hasKnownAddr)
{
// Change SimpleStatic to SimpleStaticKnownAddress
assert(fieldKind == FieldSeq::FieldKind::SimpleStatic);
fieldKind = FieldSeq::FieldKind::SimpleStaticKnownAddress;
offset = reinterpret_cast<ssize_t>(info.compCompHnd->getFieldAddress(pResolvedToken->hField));
assert(offset != 0);
}
@ -10458,6 +10462,14 @@ void Compiler::impImportBlockCode(BasicBlock* block)
GenTree* boxPayloadAddress =
gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, boxPayloadOffset);
GenTree* nullcheck = gtNewNullCheck(op1, block);
// Add an ordering dependency between the null
// check and forming the byref; the JIT assumes
// in many places that the only legal null
// byref is literally 0, and since the byref
// leaks out here, we need to ensure it is
// nullchecked.
nullcheck->gtFlags |= GTF_ORDER_SIDEEFF;
boxPayloadAddress->gtFlags |= GTF_ORDER_SIDEEFF;
GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress);
impPushOnStack(result, tiRetVal);
break;

View file

@ -2728,6 +2728,11 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
// Prepare result
var_types resultType = JITtype2varType(sig->retType);
assert(resultType == result->TypeGet());
// Add an ordering dependency between the bounds check and
// forming the byref to prevent these from being reordered. The
// JIT is not allowed to create arbitrary illegal byrefs.
boundsCheck->gtFlags |= GTF_ORDER_SIDEEFF;
result->gtFlags |= GTF_ORDER_SIDEEFF;
retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
break;

View file

@ -34,16 +34,16 @@ class LocalAddressVisitor final : public GenTreeVisitor<LocalAddressVisitor>
//
class Value
{
GenTree* m_node;
GenTree** m_use;
unsigned m_lclNum;
unsigned m_offset;
bool m_address;
INDEBUG(bool m_consumed;)
INDEBUG(bool m_consumed);
public:
// Produce an unknown value associated with the specified node.
Value(GenTree* node)
: m_node(node)
Value(GenTree** use)
: m_use(use)
, m_lclNum(BAD_VAR_NUM)
, m_offset(0)
, m_address(false)
@ -53,10 +53,16 @@ class LocalAddressVisitor final : public GenTreeVisitor<LocalAddressVisitor>
{
}
// Get the use for the node that produced this value.
GenTree** Use() const
{
return m_use;
}
// Get the node that produced this value.
GenTree* Node() const
{
return m_node;
return *m_use;
}
// Does this value represent a location?
@ -294,6 +300,10 @@ class LocalAddressVisitor final : public GenTreeVisitor<LocalAddressVisitor>
None,
Nop,
BitCast,
#ifdef FEATURE_HW_INTRINSICS
GetElement,
WithElement,
#endif // FEATURE_HW_INTRINSICS
LclVar,
LclFld
};
@ -418,7 +428,7 @@ public:
}
}
PushValue(node);
PushValue(use);
return Compiler::WALK_CONTINUE;
}
@ -557,9 +567,9 @@ public:
}
private:
void PushValue(GenTree* node)
void PushValue(GenTree** use)
{
m_valueStack.Push(node);
m_valueStack.Push(use);
}
Value& TopValue(unsigned index)
@ -909,13 +919,46 @@ private:
case IndirTransform::BitCast:
indir->ChangeOper(GT_BITCAST);
indir->gtGetOp1()->ChangeOper(GT_LCL_VAR);
indir->gtGetOp1()->ChangeType(varDsc->TypeGet());
indir->gtGetOp1()->AsLclVar()->SetLclNum(lclNum);
lclNode = indir->gtGetOp1()->AsLclVarCommon();
lclNode = BashToLclVar(indir->gtGetOp1(), lclNum);
break;
#ifdef FEATURE_HW_INTRINSICS
case IndirTransform::GetElement:
{
var_types elementType = indir->TypeGet();
assert(elementType == TYP_FLOAT);
lclNode = BashToLclVar(indir->gtGetOp1(), lclNum);
GenTree* indexNode = m_compiler->gtNewIconNode(val.Offset() / genTypeSize(elementType));
GenTree* hwiNode = m_compiler->gtNewSimdGetElementNode(elementType, lclNode, indexNode,
CORINFO_TYPE_FLOAT, genTypeSize(varDsc),
/* isSimdAsHWIntrinsic */ false);
indir = hwiNode;
*val.Use() = hwiNode;
}
break;
case IndirTransform::WithElement:
{
assert(user->OperIs(GT_ASG) && (user->gtGetOp1() == indir));
var_types elementType = indir->TypeGet();
assert(elementType == TYP_FLOAT);
lclNode = BashToLclVar(indir, lclNum);
GenTree* simdLclNode = m_compiler->gtNewLclvNode(lclNum, varDsc->TypeGet());
GenTree* indexNode = m_compiler->gtNewIconNode(val.Offset() / genTypeSize(elementType));
GenTree* elementNode = user->gtGetOp2();
user->AsOp()->gtOp2 =
m_compiler->gtNewSimdWithElementNode(varDsc->TypeGet(), simdLclNode, indexNode, elementNode,
CORINFO_TYPE_FLOAT, genTypeSize(varDsc),
/* isSimdAsHWIntrinsic */ false);
user->ChangeType(varDsc->TypeGet());
}
break;
#endif // FEATURE_HW_INTRINSICS
case IndirTransform::LclVar:
// TODO-ADDR: use "BashToLclVar" here.
if (indir->TypeGet() != varDsc->TypeGet())
{
assert(genTypeSize(indir) == genTypeSize(varDsc)); // BOOL <-> UBYTE.
@ -996,14 +1039,6 @@ private:
return IndirTransform::LclVar;
}
if (varTypeIsSIMD(varDsc))
{
// TODO-ADDR: skip SIMD variables for now, fgMorphFieldAssignToSimdSetElement and
// fgMorphFieldToSimdGetElement need to be updated to recognize LCL_FLDs or moved
// here.
return IndirTransform::None;
}
// Bool and ubyte are the same type.
if ((indir->TypeIs(TYP_BOOL) && (varDsc->TypeGet() == TYP_UBYTE)) ||
(indir->TypeIs(TYP_UBYTE) && (varDsc->TypeGet() == TYP_BOOL)))
@ -1011,9 +1046,10 @@ private:
return IndirTransform::LclVar;
}
bool isDef = user->OperIs(GT_ASG) && (user->gtGetOp1() == indir);
// For small locals on the LHS we can ignore the signed/unsigned diff.
if (user->OperIs(GT_ASG) && (user->gtGetOp1() == indir) &&
(varTypeToSigned(indir) == varTypeToSigned(varDsc)))
if (isDef && (varTypeToSigned(indir) == varTypeToSigned(varDsc)))
{
assert(varTypeIsSmall(indir));
return IndirTransform::LclVar;
@ -1024,6 +1060,14 @@ private:
return IndirTransform::LclFld;
}
#ifdef FEATURE_HW_INTRINSICS
if (varTypeIsSIMD(varDsc) && indir->TypeIs(TYP_FLOAT) && ((val.Offset() % genTypeSize(TYP_FLOAT)) == 0) &&
m_compiler->IsBaselineSimdIsaSupported())
{
return isDef ? IndirTransform::WithElement : IndirTransform::GetElement;
}
#endif // FEATURE_HW_INTRINSICS
// Turn this into a bitcast if we can.
if ((genTypeSize(indir) == genTypeSize(varDsc)) && (varTypeIsFloating(indir) || varTypeIsFloating(varDsc)))
{
@ -1139,16 +1183,23 @@ private:
// the promoted local would look like "{ int a, B }", while the IR would contain "FIELD"
// nodes for the outer struct "A".
//
if (indir->TypeIs(TYP_STRUCT))
{
// TODO-1stClassStructs: delete this once "IND<struct>" nodes are no more.
if (indir->OperIs(GT_IND))
if (indir->OperIs(GT_IND) && indir->TypeIs(TYP_STRUCT))
{
// We do not have a layout for this node.
return;
}
ClassLayout* layout = indir->GetLayout(m_compiler);
ClassLayout* layout = indir->TypeIs(TYP_STRUCT) ? indir->GetLayout(m_compiler) : nullptr;
unsigned indSize = indir->TypeIs(TYP_STRUCT) ? layout->GetSize() : genTypeSize(indir);
if (indSize > genTypeSize(fieldType))
{
// Retargeting this indirection to reference the promoted field would make it
// "wide", address-exposing the whole parent struct (with all of its fields).
return;
}
if (indir->TypeIs(TYP_STRUCT))
{
indir->SetOper(GT_OBJ);
indir->AsBlk()->SetLayout(layout);
indir->AsBlk()->gtBlkOpKind = GenTreeBlk::BlkOpKindInvalid;
@ -1298,6 +1349,27 @@ private:
{
return (user == nullptr) || (user->OperIs(GT_COMMA) && (user->AsOp()->gtGetOp1() == node));
}
//------------------------------------------------------------------------
// BashToLclVar: Bash node to a LCL_VAR.
//
// Arguments:
// node - the node to bash
// lclNum - the local's number
//
// Return Value:
// The bashed node.
//
GenTreeLclVar* BashToLclVar(GenTree* node, unsigned lclNum)
{
LclVarDsc* varDsc = m_compiler->lvaGetDesc(lclNum);
node->ChangeOper(GT_LCL_VAR);
node->ChangeType(varDsc->lvNormalizeOnLoad() ? varDsc->TypeGet() : genActualType(varDsc));
node->AsLclVar()->SetLclNum(lclNum);
return node->AsLclVar();
}
};
//------------------------------------------------------------------------
@ -1314,6 +1386,7 @@ private:
//
PhaseStatus Compiler::fgMarkAddressExposedLocals()
{
bool madeChanges = false;
LocalAddressVisitor visitor(this);
for (BasicBlock* const block : Blocks())
@ -1323,27 +1396,129 @@ PhaseStatus Compiler::fgMarkAddressExposedLocals()
for (Statement* const stmt : block->Statements())
{
visitor.VisitStmt(stmt);
}
}
return visitor.MadeChanges() ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING;
}
//------------------------------------------------------------------------
// fgMarkAddressExposedLocals: Traverses the specified statement and marks address
// exposed locals.
//
// Arguments:
// stmt - the statement to traverse
//
// Notes:
// Trees such as IND(ADDR(LCL_VAR)), that morph is expected to fold
// to just LCL_VAR, do not result in the involved local being marked
// address exposed.
//
void Compiler::fgMarkAddressExposedLocals(Statement* stmt)
#ifdef FEATURE_SIMD
if (opts.OptimizationEnabled() && stmt->GetRootNode()->TypeIs(TYP_FLOAT) &&
stmt->GetRootNode()->OperIs(GT_ASG))
{
LocalAddressVisitor visitor(this);
madeChanges |= fgMorphCombineSIMDFieldAssignments(block, stmt);
}
#endif
visitor.VisitStmt(stmt);
}
}
madeChanges |= visitor.MadeChanges();
return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING;
}
#ifdef FEATURE_SIMD
//-----------------------------------------------------------------------------------
// fgMorphCombineSIMDFieldAssignments:
// If the RHS of the input stmt is a read for simd vector X Field, then this
// function will keep reading next few stmts based on the vector size(2, 3, 4).
// If the next stmts LHS are located contiguous and RHS are also located
// contiguous, then we replace those statements with one store.
//
// Argument:
// block - BasicBlock*. block which stmt belongs to
// stmt - Statement*. the stmt node we want to check
//
// Return Value:
// Whether the assignments were successfully coalesced.
//
bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt)
{
GenTree* tree = stmt->GetRootNode();
assert(tree->OperGet() == GT_ASG);
GenTree* originalLHS = tree->AsOp()->gtOp1;
GenTree* prevLHS = tree->AsOp()->gtOp1;
GenTree* prevRHS = tree->AsOp()->gtOp2;
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &simdBaseJitType, &index, &simdSize, true);
if ((simdStructNode == nullptr) || (index != 0) || (simdBaseJitType != CORINFO_TYPE_FLOAT))
{
// if the RHS is not from a SIMD vector field X, then there is no need to check further.
return false;
}
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
var_types simdType = getSIMDTypeForSize(simdSize);
int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1;
int remainingAssignments = assignmentsCount;
Statement* curStmt = stmt->GetNextStmt();
Statement* lastStmt = stmt;
while (curStmt != nullptr && remainingAssignments > 0)
{
GenTree* exp = curStmt->GetRootNode();
if (exp->OperGet() != GT_ASG)
{
break;
}
GenTree* curLHS = exp->gtGetOp1();
GenTree* curRHS = exp->gtGetOp2();
if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS))
{
break;
}
remainingAssignments--;
prevLHS = curLHS;
prevRHS = curRHS;
lastStmt = curStmt;
curStmt = curStmt->GetNextStmt();
}
if (remainingAssignments > 0)
{
// if the left assignments number is bigger than zero, then this means
// that the assignments are not assigning to the contiguously memory
// locations from same vector.
return false;
}
JITDUMP("\nFound contiguous assignments from a SIMD vector to memory.\n");
JITDUMP("From " FMT_BB ", " FMT_STMT " to " FMT_STMT "\n", block->bbNum, stmt->GetID(), lastStmt->GetID());
for (int i = 0; i < assignmentsCount; i++)
{
fgRemoveStmt(block, stmt->GetNextStmt());
}
GenTree* dstNode;
if (originalLHS->OperIs(GT_LCL_FLD))
{
dstNode = originalLHS;
dstNode->gtType = simdType;
}
else
{
GenTree* copyBlkDst = createAddressNodeForSIMDInit(originalLHS, simdSize);
dstNode = gtNewOperNode(GT_IND, simdType, copyBlkDst);
}
JITDUMP("\n" FMT_BB " " FMT_STMT " (before):\n", block->bbNum, stmt->GetID());
DISPSTMT(stmt);
assert(!simdStructNode->CanCSE() && varTypeIsSIMD(simdStructNode));
simdStructNode->ClearDoNotCSE();
tree = gtNewAssignNode(dstNode, simdStructNode);
stmt->SetRootNode(tree);
JITDUMP("\nReplaced " FMT_BB " " FMT_STMT " (after):\n", block->bbNum, stmt->GetID());
DISPSTMT(stmt);
return true;
}
#endif // FEATURE_SIMD

View file

@ -317,7 +317,7 @@ private:
GenTree* LowerSignedDivOrMod(GenTree* node);
void LowerBlockStore(GenTreeBlk* blkNode);
void LowerBlockStoreCommon(GenTreeBlk* blkNode);
void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr);
void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr, GenTree* addrParent);
void LowerPutArgStkOrSplit(GenTreePutArgStk* putArgNode);
#ifdef TARGET_XARCH
void LowerPutArgStk(GenTreePutArgStk* putArgStk);

View file

@ -566,7 +566,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
src->AsIntCon()->SetIconValue(fill);
ContainBlockStoreAddress(blkNode, size, dstAddr);
ContainBlockStoreAddress(blkNode, size, dstAddr, nullptr);
}
else
{
@ -637,10 +637,10 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
if (src->OperIs(GT_IND))
{
ContainBlockStoreAddress(blkNode, size, src->AsIndir()->Addr());
ContainBlockStoreAddress(blkNode, size, src->AsIndir()->Addr(), src->AsIndir());
}
ContainBlockStoreAddress(blkNode, size, dstAddr);
ContainBlockStoreAddress(blkNode, size, dstAddr, nullptr);
}
else
{
@ -658,8 +658,9 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
// blkNode - the block store node
// size - the block size
// addr - the address node to try to contain
// addrParent - the parent of addr, in case this is checking containment of the source address.
//
void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr)
void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr, GenTree* addrParent)
{
assert(blkNode->OperIs(GT_STORE_BLK) && (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll));
assert(size < INT32_MAX);
@ -692,7 +693,7 @@ void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenT
}
#endif // !TARGET_ARM
if (!IsSafeToContainMem(blkNode, addr))
if (!IsSafeToContainMem(blkNode, addrParent, addr))
{
return;
}

View file

@ -252,7 +252,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
}
src->AsIntCon()->SetIconValue(fill);
ContainBlockStoreAddress(blkNode, size, dstAddr);
ContainBlockStoreAddress(blkNode, size, dstAddr, nullptr);
}
else
{
@ -307,10 +307,10 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
if (src->OperIs(GT_IND))
{
ContainBlockStoreAddress(blkNode, size, src->AsIndir()->Addr());
ContainBlockStoreAddress(blkNode, size, src->AsIndir()->Addr(), src->AsIndir());
}
ContainBlockStoreAddress(blkNode, size, dstAddr);
ContainBlockStoreAddress(blkNode, size, dstAddr, nullptr);
}
else
{
@ -328,8 +328,10 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
// blkNode - the block store node
// size - the block size
// addr - the address node to try to contain
// addrParent - the parent of addr, in case this is checking containment of the source address.
//
void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr)
void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr, GenTree* addrParent)
{
assert(blkNode->OperIs(GT_STORE_BLK) && (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll));
assert(size < INT32_MAX);
@ -354,7 +356,7 @@ void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenT
return;
}
if (!IsSafeToContainMem(blkNode, addr))
if (!IsSafeToContainMem(blkNode, addrParent, addr))
{
return;
}

View file

@ -373,7 +373,7 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
src->AsIntCon()->SetIconValue(fill);
ContainBlockStoreAddress(blkNode, size, dstAddr);
ContainBlockStoreAddress(blkNode, size, dstAddr, nullptr);
}
}
else
@ -478,10 +478,10 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
if (src->OperIs(GT_IND))
{
ContainBlockStoreAddress(blkNode, size, src->AsIndir()->Addr());
ContainBlockStoreAddress(blkNode, size, src->AsIndir()->Addr(), src->AsIndir());
}
ContainBlockStoreAddress(blkNode, size, dstAddr);
ContainBlockStoreAddress(blkNode, size, dstAddr, nullptr);
}
else
{
@ -504,8 +504,9 @@ void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
// blkNode - the block store node
// size - the block size
// addr - the address node to try to contain
// addrParent - the parent of addr, in case this is checking containment of the source address.
//
void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr)
void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr, GenTree* addrParent)
{
assert(blkNode->OperIs(GT_STORE_BLK) && (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll));
assert(size < INT32_MAX);
@ -536,7 +537,7 @@ void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenT
// Note that the parentNode is always the block node, even if we're dealing with the source address.
// The source address is not directly used by the block node but by an IND node and that IND node is
// always contained.
if (!IsSafeToContainMem(blkNode, addrMode))
if (!IsSafeToContainMem(blkNode, addrParent, addrMode))
{
return;
}

View file

@ -4608,6 +4608,13 @@ GenTree* Compiler::fgMorphIndexAddr(GenTreeIndexAddr* indexAddr)
// Prepend the bounds check and the assignment trees that were created (if any).
if (boundsCheck != nullptr)
{
// This is changing a value dependency (INDEX_ADDR node) into a flow
// dependency, so make sure this dependency remains visible. Also, the
// JIT is not allowed to create arbitrary byrefs, so we must make sure
// the address is not reordered with the bounds check.
boundsCheck->gtFlags |= GTF_ORDER_SIDEEFF;
addr->gtFlags |= GTF_ORDER_SIDEEFF;
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), boundsCheck, tree);
fgSetRngChkTarget(boundsCheck);
}
@ -4988,30 +4995,6 @@ GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac)
((tree->gtFlags & GTF_GLOB_REF) != 0));
}
#ifdef FEATURE_SIMD
// if this field belongs to simd struct, translate it to simd intrinsic.
if ((mac == nullptr) && tree->OperIs(GT_FIELD))
{
if (IsBaselineSimdIsaSupported())
{
GenTree* newTree = fgMorphFieldToSimdGetElement(tree);
if (newTree != tree)
{
newTree = fgMorphTree(newTree);
return newTree;
}
}
}
else if ((objRef != nullptr) && (objRef->OperGet() == GT_ADDR) && varTypeIsSIMD(objRef->gtGetOp1()))
{
GenTreeLclVarCommon* lcl = objRef->IsLocalAddrExpr();
if (lcl != nullptr)
{
lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField));
}
}
#endif
MorphAddrContext indMAC(MACK_Ind);
MorphAddrContext addrMAC(MACK_Addr);
bool isAddr = tree->OperIs(GT_FIELD_ADDR);
@ -5190,6 +5173,8 @@ GenTree* Compiler::fgMorphExpandInstanceField(GenTree* tree, MorphAddrContext* m
GenTree* lclVar = gtNewLclvNode(lclNum, objRefType);
GenTree* nullchk = gtNewNullCheck(lclVar, compCurBB);
nullchk->gtFlags |= GTF_ORDER_SIDEEFF;
if (asg != nullptr)
{
// Create the "comma" node.
@ -5201,6 +5186,10 @@ GenTree* Compiler::fgMorphExpandInstanceField(GenTree* tree, MorphAddrContext* m
}
addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node.
// Ensure the creation of the byref does not get reordered with the
// null check, as that could otherwise create an illegal byref.
addr->gtFlags |= GTF_ORDER_SIDEEFF;
}
else
{
@ -5408,7 +5397,7 @@ GenTree* Compiler::fgMorphExpandStaticField(GenTree* tree)
{
// Only simple statics get importred as GT_FIELDs.
fieldSeq = GetFieldSeqStore()->Create(fieldHandle, reinterpret_cast<size_t>(fldAddr),
FieldSeq::FieldKind::SimpleStatic);
FieldSeq::FieldKind::SimpleStaticKnownAddress);
}
// TODO-CQ: enable this optimization for 32 bit targets.
@ -8865,129 +8854,6 @@ GenTree* Compiler::getSIMDStructFromField(GenTree* tree,
return nullptr;
}
/*****************************************************************************
* If a read operation tries to access simd struct field, then transform the operation
* to the SimdGetElementNode, and return the new tree. Otherwise, return the old tree.
* Argument:
* tree - GenTree*. If this pointer points to simd struct which is used for simd
* intrinsic, we will morph it as simd intrinsic NI_Vector128_GetElement.
* Return:
* A GenTree* which points to the new tree. If the tree is not for simd intrinsic,
* return nullptr.
*/
GenTree* Compiler::fgMorphFieldToSimdGetElement(GenTree* tree)
{
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(tree, &simdBaseJitType, &index, &simdSize);
if (simdStructNode != nullptr)
{
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
GenTree* op2 = gtNewIconNode(index, TYP_INT);
assert(simdSize <= 32);
assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType)));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
if (!compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
return tree;
}
break;
}
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
{
if (!compOpportunisticallyDependsOn(InstructionSet_SSE2))
{
return tree;
}
break;
}
default:
{
unreached();
}
}
#elif defined(TARGET_ARM64)
if (!compOpportunisticallyDependsOn(InstructionSet_AdvSimd))
{
return tree;
}
#endif // !TARGET_XARCH && !TARGET_ARM64
tree = gtNewSimdGetElementNode(simdBaseType, simdStructNode, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
}
return tree;
}
/*****************************************************************************
* Transform an assignment of a SIMD struct field to SimdWithElementNode, and
* return a new tree. If it is not such an assignment, then return the old tree.
* Argument:
* tree - GenTree*. If this pointer points to simd struct which is used for simd
* intrinsic, we will morph it as simd intrinsic set.
* Return:
* A GenTree* which points to the new tree. If the tree is not for simd intrinsic,
* return nullptr.
*/
GenTree* Compiler::fgMorphFieldAssignToSimdSetElement(GenTree* tree)
{
assert(tree->OperGet() == GT_ASG);
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(tree->gtGetOp1(), &simdBaseJitType, &index, &simdSize);
if (simdStructNode != nullptr)
{
var_types simdType = simdStructNode->gtType;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(simdSize <= 32);
assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType)));
GenTree* op2 = gtNewIconNode(index, TYP_INT);
GenTree* op3 = tree->gtGetOp2();
NamedIntrinsic intrinsicId = NI_Vector128_WithElement;
GenTree* target = gtClone(simdStructNode);
assert(target != nullptr);
GenTree* simdTree = gtNewSimdWithElementNode(simdType, simdStructNode, op2, op3, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
tree->AsOp()->gtOp1 = target;
tree->AsOp()->gtOp2 = simdTree;
#ifdef DEBUG
tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
return tree;
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------------
@ -9201,26 +9067,6 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA
noway_assert(op1 == tree->AsOp()->gtOp1);
op2 = tree->AsOp()->gtOp2;
#ifdef FEATURE_SIMD
if (IsBaselineSimdIsaSupported())
{
// We should check whether op2 should be assigned to a SIMD field or not.
// If it is, we should translate the tree to simd intrinsic.
assert(!fgGlobalMorph || ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0));
GenTree* newTree = fgMorphFieldAssignToSimdSetElement(tree);
typ = tree->TypeGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2();
#ifdef DEBUG
assert((tree == newTree) && (tree->OperGet() == oper));
if ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) != 0)
{
tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
}
#endif // DEBUG
}
#endif
// Location nodes cannot be CSEd.
op1->gtFlags |= GTF_DONT_CSE;
break;
@ -10646,7 +10492,7 @@ DONE_MORPHING_CHILDREN:
// could result in an invalid value number for the newly generated GT_IND node.
if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph)
{
// Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)).
// Perform the transform IND(COMMA(x, ..., z)) -> COMMA(x, ..., IND(z)).
// TBD: this transformation is currently necessary for correctness -- it might
// be good to analyze the failures that result if we don't do this, and fix them
// in other ways. Ideally, this should be optional.
@ -10679,9 +10525,12 @@ DONE_MORPHING_CHILDREN:
// TODO-1stClassStructs: we often create a struct IND without a handle, fix it.
op1 = gtNewIndir(typ, addr);
// Determine flags on the indir.
// GTF_GLOB_EFFECT flags can be recomputed from the child
// nodes. GTF_ORDER_SIDEEFF may be set already and indicate no
// reordering is allowed with sibling nodes, so we cannot
// recompute that.
//
op1->gtFlags |= treeFlags & ~GTF_ALL_EFFECT;
op1->gtFlags |= treeFlags & ~GTF_GLOB_EFFECT;
op1->gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
// if this was a non-faulting indir, clear GTF_EXCEPT,
@ -14243,13 +14092,6 @@ void Compiler::fgMorphStmts(BasicBlock* block)
fgRemoveStmt(block, stmt);
continue;
}
#ifdef FEATURE_SIMD
if (opts.OptimizationEnabled() && stmt->GetRootNode()->TypeGet() == TYP_FLOAT &&
stmt->GetRootNode()->OperGet() == GT_ASG)
{
fgMorphCombineSIMDFieldAssignments(block, stmt);
}
#endif
fgMorphStmt = stmt;
compCurStmt = stmt;
@ -15861,181 +15703,6 @@ void Compiler::fgMarkDemotedImplicitByRefArgs()
#endif // FEATURE_IMPLICIT_BYREFS
}
#ifdef FEATURE_SIMD
//-----------------------------------------------------------------------------------
// fgMorphCombineSIMDFieldAssignments:
// If the RHS of the input stmt is a read for simd vector X Field, then this function
// will keep reading next few stmts based on the vector size(2, 3, 4).
// If the next stmts LHS are located contiguous and RHS are also located
// contiguous, then we replace those statements with a copyblk.
//
// Argument:
// block - BasicBlock*. block which stmt belongs to
// stmt - Statement*. the stmt node we want to check
//
// return value:
// if this function successfully optimized the stmts, then return true. Otherwise
// return false;
bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt)
{
GenTree* tree = stmt->GetRootNode();
assert(tree->OperGet() == GT_ASG);
GenTree* originalLHS = tree->AsOp()->gtOp1;
GenTree* prevLHS = tree->AsOp()->gtOp1;
GenTree* prevRHS = tree->AsOp()->gtOp2;
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &simdBaseJitType, &index, &simdSize, true);
if (simdStructNode == nullptr || index != 0 || simdBaseJitType != CORINFO_TYPE_FLOAT)
{
// if the RHS is not from a SIMD vector field X, then there is no need to check further.
return false;
}
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
var_types simdType = getSIMDTypeForSize(simdSize);
int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1;
int remainingAssignments = assignmentsCount;
Statement* curStmt = stmt->GetNextStmt();
Statement* lastStmt = stmt;
while (curStmt != nullptr && remainingAssignments > 0)
{
GenTree* exp = curStmt->GetRootNode();
if (exp->OperGet() != GT_ASG)
{
break;
}
GenTree* curLHS = exp->gtGetOp1();
GenTree* curRHS = exp->gtGetOp2();
if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS))
{
break;
}
remainingAssignments--;
prevLHS = curLHS;
prevRHS = curRHS;
lastStmt = curStmt;
curStmt = curStmt->GetNextStmt();
}
if (remainingAssignments > 0)
{
// if the left assignments number is bigger than zero, then this means
// that the assignments are not assigning to the contiguously memory
// locations from same vector.
return false;
}
#ifdef DEBUG
if (verbose)
{
printf("\nFound contiguous assignments from a SIMD vector to memory.\n");
printf("From " FMT_BB ", stmt ", block->bbNum);
printStmtID(stmt);
printf(" to stmt");
printStmtID(lastStmt);
printf("\n");
}
#endif
for (int i = 0; i < assignmentsCount; i++)
{
fgRemoveStmt(block, stmt->GetNextStmt());
}
GenTree* dstNode;
if (originalLHS->OperIs(GT_LCL_FLD))
{
dstNode = originalLHS;
dstNode->gtType = simdType;
dstNode->AsLclFld()->SetLayout(nullptr);
// This may have changed a partial local field into full local field
if (dstNode->IsPartialLclFld(this))
{
dstNode->gtFlags |= GTF_VAR_USEASG;
}
else
{
dstNode->gtFlags &= ~GTF_VAR_USEASG;
}
}
else
{
GenTree* copyBlkDst = createAddressNodeForSIMDInit(originalLHS, simdSize);
if (simdStructNode->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(simdStructNode);
}
GenTreeLclVarCommon* localDst = copyBlkDst->IsLocalAddrExpr();
if (localDst != nullptr)
{
setLclRelatedToSIMDIntrinsic(localDst);
}
if (simdStructNode->TypeGet() == TYP_BYREF)
{
assert(simdStructNode->OperIsLocal());
assert(lvaIsImplicitByRefLocal(simdStructNode->AsLclVarCommon()->GetLclNum()));
simdStructNode = gtNewIndir(simdType, simdStructNode);
}
else
{
assert(varTypeIsSIMD(simdStructNode));
}
dstNode = gtNewOperNode(GT_IND, simdType, copyBlkDst);
}
#ifdef DEBUG
if (verbose)
{
printf("\n" FMT_BB " stmt ", block->bbNum);
printStmtID(stmt);
printf("(before)\n");
gtDispStmt(stmt);
}
#endif
assert(!simdStructNode->CanCSE());
simdStructNode->ClearDoNotCSE();
tree = gtNewAssignNode(dstNode, simdStructNode);
stmt->SetRootNode(tree);
// Since we generated a new address node which didn't exist before,
// we should expose this address manually here.
// TODO-ADDR: Remove this when LocalAddressVisitor transforms all
// local field access into LCL_FLDs, at that point we would be
// combining 2 existing LCL_FLDs or 2 FIELDs that do not reference
// a local and thus cannot result in a new address exposed local.
fgMarkAddressExposedLocals(stmt);
#ifdef DEBUG
if (verbose)
{
printf("\nReplaced " FMT_BB " stmt", block->bbNum);
printStmtID(stmt);
printf("(after)\n");
gtDispStmt(stmt);
}
#endif
return true;
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// fgCheckStmtAfterTailCall: check that statements after the tail call stmt
// candidate are in one of expected forms, that are desctibed below.

View file

@ -4794,6 +4794,17 @@ bool Compiler::optIfConvert(BasicBlock* block)
return false;
}
// Evaluating op1/op2 unconditionally effectively has the same effect as
// reordering them with the condition (for example, the condition could be
// an explicit bounds check and the operand could read an array element).
// Disallow this except for some common cases that we know are always side
// effect free.
if (((cond->gtFlags & GTF_ORDER_SIDEEFF) != 0) && !asgNode->gtGetOp2()->IsInvariant() &&
!asgNode->gtGetOp2()->OperIsLocal())
{
return false;
}
#ifdef DEBUG
if (verbose)
{

View file

@ -1748,7 +1748,11 @@ GenTree* Compiler::createAddressNodeForSIMDInit(GenTree* tree, unsigned simdSize
byrefNode = gtNewOperNode(GT_COMMA, arrayRef->TypeGet(), arrBndsChk, gtCloneExpr(arrayRef));
}
GenTree* address = gtNewOperNode(GT_ADD, TYP_BYREF, byrefNode, gtNewIconNode(offset, TYP_I_IMPL));
GenTree* address = byrefNode;
if (offset != 0)
{
address = gtNewOperNode(GT_ADD, TYP_BYREF, address, gtNewIconNode(offset, TYP_I_IMPL));
}
return address;
}

View file

@ -213,6 +213,12 @@ GenTree* Compiler::impSimdAsHWIntrinsic(NamedIntrinsic intrinsic,
if (retType == TYP_STRUCT)
{
simdBaseJitType = getBaseJitTypeAndSizeOfSIMDType(sig->retTypeSigClass, &simdSize);
if ((simdBaseJitType == CORINFO_TYPE_UNDEF) || !varTypeIsArithmetic(JitType2PreciseVarType(simdBaseJitType)) ||
(simdSize == 0))
{
// Unsupported type
return nullptr;
}
retType = getSIMDTypeForSize(simdSize);
}
else if (numArgs != 0)

View file

@ -2113,7 +2113,7 @@ ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN)
{
uint8_t buffer[TARGET_POINTER_SIZE] = {0};
if (m_pComp->info.compCompHnd->getReadonlyStaticFieldValue(field, buffer,
TARGET_POINTER_SIZE, false))
TARGET_POINTER_SIZE, 0, false))
{
// In case of 64bit jit emitting 32bit codegen this handle will be 64bit
// value holding 32bit handle with upper half zeroed (hence, "= NULL").
@ -8494,6 +8494,55 @@ void Compiler::fgValueNumberSsaVarDef(GenTreeLclVarCommon* lcl)
}
}
//----------------------------------------------------------------------------------
// fgGetStaticFieldSeqAndAddress: Try to obtain a constant address with a FieldSeq from the
// given tree. It can be either INT_CNS or e.g. ADD(INT_CNS, ADD(INT_CNS, INT_CNS))
// tree where only one of the constants is expected to have a field sequence.
//
// Arguments:
// tree - tree node to inspect
// pAddress - [Out] resulting address with all offsets combined
// pFseq - [Out] field sequence
//
// Return Value:
// true if the given tree is a static field address
//
static bool fgGetStaticFieldSeqAndAddress(GenTree* tree, ssize_t* pAddress, FieldSeq** pFseq)
{
ssize_t val = 0;
// Accumulate final offset
while (tree->OperIs(GT_ADD))
{
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
if (op1->IsCnsIntOrI() && (op1->AsIntCon()->gtFieldSeq == nullptr))
{
val += op1->AsIntCon()->IconValue();
tree = op2;
}
else if (op2->IsCnsIntOrI() && (op2->AsIntCon()->gtFieldSeq == nullptr))
{
val += op2->AsIntCon()->IconValue();
tree = op1;
}
else
{
// We only inspect constants and additions
return false;
}
}
// Base address is expected to be static field's address
if ((tree->IsCnsIntOrI()) && (tree->AsIntCon()->gtFieldSeq != nullptr) &&
(tree->AsIntCon()->gtFieldSeq->GetKind() == FieldSeq::FieldKind::SimpleStaticKnownAddress))
{
*pFseq = tree->AsIntCon()->gtFieldSeq;
*pAddress = tree->AsIntCon()->IconValue() + val;
return true;
}
return false;
}
//----------------------------------------------------------------------------------
// fgValueNumberConstLoad: Try to detect const_immutable_array[cns_index] tree
// and apply a constant VN representing given element at cns_index in that array.
@ -8506,9 +8555,104 @@ void Compiler::fgValueNumberSsaVarDef(GenTreeLclVarCommon* lcl)
//
bool Compiler::fgValueNumberConstLoad(GenTreeIndir* tree)
{
if (!tree->gtVNPair.BothEqual())
{
return false;
}
// First, let's check if we can detect RVA[const_index] pattern to fold, e.g.:
//
// static ReadOnlySpan<sbyte> RVA => new sbyte[] { -100, 100 }
//
// sbyte GetVal() => RVA[1]; // fold to '100'
//
ssize_t address = 0;
FieldSeq* fieldSeq = nullptr;
if (fgGetStaticFieldSeqAndAddress(tree->gtGetOp1(), &address, &fieldSeq))
{
assert(fieldSeq->GetKind() == FieldSeq::FieldKind::SimpleStaticKnownAddress);
CORINFO_FIELD_HANDLE fieldHandle = fieldSeq->GetFieldHandle();
ssize_t byteOffset = address - fieldSeq->GetOffset();
int size = (int)genTypeSize(tree->TypeGet());
const int maxElementSize = sizeof(int64_t);
if ((fieldHandle != nullptr) && (size > 0) && (size <= maxElementSize) && ((size_t)byteOffset < INT_MAX))
{
uint8_t buffer[maxElementSize] = {0};
if (info.compCompHnd->getReadonlyStaticFieldValue(fieldHandle, (uint8_t*)&buffer, size, (int)byteOffset))
{
// For now we only support these primitives, we can extend this list to FP, SIMD and structs in future.
switch (tree->TypeGet())
{
#define READ_VALUE(typ) \
typ val = 0; \
memcpy(&val, buffer, sizeof(typ));
case TYP_BOOL:
case TYP_UBYTE:
{
READ_VALUE(uint8_t);
tree->gtVNPair.SetBoth(vnStore->VNForIntCon(val));
return true;
}
case TYP_BYTE:
{
READ_VALUE(int8_t);
tree->gtVNPair.SetBoth(vnStore->VNForIntCon(val));
return true;
}
case TYP_SHORT:
{
READ_VALUE(int16_t);
tree->gtVNPair.SetBoth(vnStore->VNForIntCon(val));
return true;
}
case TYP_USHORT:
{
READ_VALUE(uint16_t);
tree->gtVNPair.SetBoth(vnStore->VNForIntCon(val));
return true;
}
case TYP_INT:
{
READ_VALUE(int32_t);
tree->gtVNPair.SetBoth(vnStore->VNForIntCon(val));
return true;
}
case TYP_UINT:
{
READ_VALUE(uint32_t);
tree->gtVNPair.SetBoth(vnStore->VNForIntCon(val));
return true;
}
case TYP_LONG:
{
READ_VALUE(int64_t);
tree->gtVNPair.SetBoth(vnStore->VNForLongCon(val));
return true;
}
case TYP_ULONG:
{
READ_VALUE(uint64_t);
tree->gtVNPair.SetBoth(vnStore->VNForLongCon(val));
return true;
}
default:
break;
}
}
}
}
// Throughput check, the logic below is only for USHORT (char)
if (!tree->TypeIs(TYP_USHORT))
{
return false;
}
ValueNum addrVN = tree->gtGetOp1()->gtVNPair.GetLiberal();
VNFuncApp funcApp;
if (!tree->TypeIs(TYP_USHORT) || !tree->gtVNPair.BothEqual() || !vnStore->GetVNFunc(addrVN, &funcApp))
if (!vnStore->GetVNFunc(addrVN, &funcApp))
{
return false;
}

View file

@ -82,7 +82,8 @@ The .NET Foundation licenses this file to you under the MIT license.
<NativeObject>$(NativeIntermediateOutputPath)$(TargetName)$(NativeObjectExt)</NativeObject>
<NativeBinary>$(NativeOutputPath)$(TargetName)$(NativeBinaryExt)</NativeBinary>
<ExportsFile Condition="$(NativeLib) == 'Shared' and $(ExportsFile) == ''">$(NativeIntermediateOutputPath)$(TargetName)$(ExportsFileExt)</ExportsFile>
<IlcExportUnmanagedEntrypoints Condition="$(NativeLib) == 'Shared'">true</IlcExportUnmanagedEntrypoints>
<ExportsFile Condition="$(IlcExportUnmanagedEntrypoints) == 'true' and $(ExportsFile) == ''">$(NativeIntermediateOutputPath)$(TargetName)$(ExportsFileExt)</ExportsFile>
<IlcCompileOutput>$(NativeObject)</IlcCompileOutput>

View file

@ -132,7 +132,7 @@ namespace Internal.Runtime.CompilerHelpers
IntPtr staticsSection = RuntimeImports.RhGetModuleSection(typeManager, ReadyToRunSectionType.GCStaticRegion, out length);
if (staticsSection != IntPtr.Zero)
{
Debug.Assert(length % IntPtr.Size == 0);
Debug.Assert(length % (MethodTable.SupportsRelativePointers ? sizeof(int) : sizeof(nint)) == 0);
object[] spine = InitializeStatics(staticsSection, length);
@ -170,32 +170,40 @@ namespace Internal.Runtime.CompilerHelpers
private static unsafe void RunInitializers(TypeManagerHandle typeManager, ReadyToRunSectionType section)
{
var initializers = (delegate*<void>*)RuntimeImports.RhGetModuleSection(typeManager, section, out int length);
Debug.Assert(length % IntPtr.Size == 0);
int count = length / IntPtr.Size;
for (int i = 0; i < count; i++)
var pInitializers = (byte*)RuntimeImports.RhGetModuleSection(typeManager, section, out int length);
Debug.Assert(length % (MethodTable.SupportsRelativePointers ? sizeof(int) : sizeof(nint)) == 0);
for (byte* pCurrent = pInitializers;
pCurrent < (pInitializers + length);
pCurrent += MethodTable.SupportsRelativePointers ? sizeof(int) : sizeof(nint))
{
initializers[i]();
var initializer = MethodTable.SupportsRelativePointers ? (delegate*<void>)ReadRelPtr32(pCurrent) : (delegate*<void>)pCurrent;
initializer();
}
static void* ReadRelPtr32(void* address)
=> (byte*)address + *(int*)address;
}
private static unsafe object[] InitializeStatics(IntPtr gcStaticRegionStart, int length)
{
IntPtr gcStaticRegionEnd = (IntPtr)((byte*)gcStaticRegionStart + length);
byte* gcStaticRegionEnd = (byte*)gcStaticRegionStart + length;
object[] spine = new object[length / IntPtr.Size];
object[] spine = new object[length / (MethodTable.SupportsRelativePointers ? sizeof(int) : sizeof(nint))];
ref object rawSpineData = ref Unsafe.As<byte, object>(ref Unsafe.As<RawArrayData>(spine).Data);
int currentBase = 0;
for (IntPtr* block = (IntPtr*)gcStaticRegionStart; block < (IntPtr*)gcStaticRegionEnd; block++)
for (byte* block = (byte*)gcStaticRegionStart;
block < gcStaticRegionEnd;
block += MethodTable.SupportsRelativePointers ? sizeof(int) : sizeof(nint))
{
// Gc Static regions can be shared by modules linked together during compilation. To ensure each
// is initialized once, the static region pointer is stored with lowest bit set in the image.
// The first time we initialize the static region its pointer is replaced with an object reference
// whose lowest bit is no longer set.
IntPtr* pBlock = (IntPtr*)*block;
nint blockAddr = *pBlock;
IntPtr* pBlock = MethodTable.SupportsRelativePointers ? (IntPtr*)ReadRelPtr32(block) : *(IntPtr**)block;
nint blockAddr = MethodTable.SupportsRelativePointers ? (nint)ReadRelPtr32(pBlock) : *pBlock;
if ((blockAddr & GCStaticRegionConstants.Uninitialized) == GCStaticRegionConstants.Uninitialized)
{
object? obj = null;
@ -215,7 +223,7 @@ namespace Internal.Runtime.CompilerHelpers
// which are pointer relocs to GC objects in frozen segment.
// It actually has all GC fields including non-preinitialized fields and we simply copy over the
// entire blob to this object, overwriting everything.
IntPtr pPreInitDataAddr = *(pBlock + 1);
void* pPreInitDataAddr = MethodTable.SupportsRelativePointers ? ReadRelPtr32((int*)pBlock + 1) : (void*)*(pBlock + 1);
RuntimeImports.RhBulkMoveWithWriteBarrier(ref obj.GetRawData(), ref *(byte *)pPreInitDataAddr, obj.GetRawObjectDataSize());
}
@ -231,6 +239,9 @@ namespace Internal.Runtime.CompilerHelpers
}
return spine;
static void* ReadRelPtr32(void* address)
=> (byte*)address + *(int*)address;
}
private static unsafe void RehydrateData(IntPtr dehydratedData, int length)

View file

@ -902,6 +902,9 @@ namespace Internal.Runtime
return DynamicTemplateType->DispatchMap;
}
if (SupportsRelativePointers)
return (DispatchMap*)FollowRelativePointer((int*)TypeManager.DispatchMap + idxDispatchMap);
else
return ((DispatchMap**)TypeManager.DispatchMap)[idxDispatchMap];
}
}

View file

@ -15,7 +15,9 @@ namespace System.Runtime
private static unsafe IntPtr RhpCidResolve(IntPtr callerTransitionBlockParam, IntPtr pCell)
{
IntPtr locationOfThisPointer = callerTransitionBlockParam + TransitionBlock.GetThisOffset();
object pObject = Unsafe.As<IntPtr, object>(ref *(IntPtr*)locationOfThisPointer);
#pragma warning disable 8500 // address of managed types
object pObject = *(object*)locationOfThisPointer;
#pragma warning restore 8500
IntPtr dispatchResolveTarget = RhpCidResolve_Worker(pObject, pCell);
return dispatchResolveTarget;
}

View file

@ -41,7 +41,9 @@ namespace System.Runtime.InteropServices
// The runtime performs additional checks in debug builds
return InternalCalls.RhHandleGet(_handle);
#else
return Unsafe.As<IntPtr, object>(ref *(IntPtr*)_handle);
#pragma warning disable 8500 // address of managed types
return *(object*)_handle;
#pragma warning restore 8500
#endif
}

View file

@ -5,11 +5,12 @@
#define DEBUG
using System;
using System.Text;
using System.Runtime;
using System.Diagnostics;
using System.Diagnostics.Contracts;
using System.IO;
using System.Reflection;
using System.Runtime;
using System.Text;
using Internal.Runtime.Augments;
@ -65,8 +66,8 @@ namespace Internal.DeveloperExperience
}
StringBuilder sb = new StringBuilder();
string fileNameWithoutExtension = GetFileNameWithoutExtension(moduleFullFileName);
int rva = (int)(ip.ToInt64() - moduleBase.ToInt64());
ReadOnlySpan<char> fileNameWithoutExtension = Path.GetFileNameWithoutExtension(moduleFullFileName.AsSpan());
int rva = (int)(ip - moduleBase);
sb.Append(fileNameWithoutExtension);
sb.Append("!<BaseAddress>+0x");
sb.Append(rva.ToString("x"));
@ -122,28 +123,6 @@ namespace Internal.DeveloperExperience
}
}
private static string GetFileNameWithoutExtension(string path)
{
path = GetFileName(path);
int i;
if ((i = path.LastIndexOf('.')) == -1)
return path; // No path extension found
else
return path.Substring(0, i);
}
private static string GetFileName(string path)
{
int length = path.Length;
for (int i = length; --i >= 0;)
{
char ch = path[i];
if (ch == '/' || ch == '\\' || ch == ':')
return path.Substring(i + 1, length - i - 1);
}
return path;
}
private static DeveloperExperience s_developerExperience;
}
}

View file

@ -773,7 +773,9 @@ namespace System
// for debug builds we always want to call AllocateNewArray to detect AllocateNewArray bugs
#if !DEBUG
// small arrays are allocated using `new[]` as that is generally faster.
if (length < 2048 / Unsafe.SizeOf<T>())
#pragma warning disable 8500 // sizeof of managed types
if (length < 2048 / sizeof(T))
#pragma warning restore 8500
{
return new T[length];
}

View file

@ -305,8 +305,10 @@ namespace System.Runtime.InteropServices
// Compat note: CLR wouldn't bother with a range check. If someone does this,
// they're likely taking dependency on some CLR implementation detail quirk.
if (checked(ofs + Unsafe.SizeOf<T>()) > size)
#pragma warning disable 8500 // sizeof of managed types
if (checked(ofs + sizeof(T)) > size)
throw new ArgumentOutOfRangeException(nameof(ofs));
#pragma warning restore 8500
IntPtr nativeBytes = AllocCoTaskMem(size);
NativeMemory.Clear((void*)nativeBytes, (nuint)size);
@ -384,8 +386,10 @@ namespace System.Runtime.InteropServices
// Compat note: CLR wouldn't bother with a range check. If someone does this,
// they're likely taking dependency on some CLR implementation detail quirk.
if (checked(ofs + Unsafe.SizeOf<T>()) > size)
#pragma warning disable 8500 // sizeof of managed types
if (checked(ofs + sizeof(T)) > size)
throw new ArgumentOutOfRangeException(nameof(ofs));
#pragma warning restore 8500
IntPtr nativeBytes = AllocCoTaskMem(size);
NativeMemory.Clear((void*)nativeBytes, (nuint)size);

View file

@ -72,13 +72,13 @@ namespace Internal.Runtime.TypeLoader
var sb = new System.Text.StringBuilder();
sb.AppendLine("Generic virtual method pointer lookup failure.");
sb.AppendLine();
sb.AppendLine("Declaring type handle: " + declaringType.LowLevelToStringRawEETypeAddress());
sb.AppendLine("Target type handle: " + targetHandle.LowLevelToStringRawEETypeAddress());
sb.AppendLine("Declaring type handle: " + RuntimeAugments.GetLastResortString(declaringType));
sb.AppendLine("Target type handle: " + RuntimeAugments.GetLastResortString(targetHandle));
sb.AppendLine("Method name: " + methodNameAndSignature.Name);
sb.AppendLine("Instantiation:");
for (int i = 0; i < genericArguments.Length; i++)
{
sb.AppendLine(" Argument " + i.LowLevelToString() + ": " + genericArguments[i].LowLevelToStringRawEETypeAddress());
sb.AppendLine(" Argument " + i.LowLevelToString() + ": " + RuntimeAugments.GetLastResortString(genericArguments[i]));
}
Environment.FailFast(sb.ToString());
@ -616,13 +616,13 @@ namespace Internal.Runtime.TypeLoader
var sb = new System.Text.StringBuilder();
sb.AppendLine("Generic virtual method pointer lookup failure.");
sb.AppendLine();
sb.AppendLine("Declaring type handle: " + declaringType.LowLevelToStringRawEETypeAddress());
sb.AppendLine("Target type handle: " + targetTypeHandle.LowLevelToStringRawEETypeAddress());
sb.AppendLine("Declaring type handle: " + RuntimeAugments.GetLastResortString(declaringType));
sb.AppendLine("Target type handle: " + RuntimeAugments.GetLastResortString(targetTypeHandle));
sb.AppendLine("Method name: " + targetMethodNameAndSignature.Name);
sb.AppendLine("Instantiation:");
for (int i = 0; i < genericArguments.Length; i++)
{
sb.AppendLine(" Argument " + i.LowLevelToString() + ": " + genericArguments[i].LowLevelToStringRawEETypeAddress());
sb.AppendLine(" Argument " + i.LowLevelToString() + ": " + RuntimeAugments.GetLastResortString(genericArguments[i]));
}
Environment.FailFast(sb.ToString());

View file

@ -16,7 +16,7 @@ under the `<Project>` node of your project file.
The Native AOT compiler supports the [documented options](https://docs.microsoft.com/en-us/dotnet/core/deploying/trim-self-contained) for removing unused code (trimming). By default, the compiler tries to very conservatively remove some of the unused code.
🛈 Native AOT difference: The documented `PublishTrimmed` property is implied to be `true` when Native AOT is active.
:information_source: Native AOT difference: The documented `PublishTrimmed` property is implied to be `true` when Native AOT is active.
By default, the compiler tries to maximize compatibility with existing .NET code at the expense of compilation speed and size of the output executable. This allows people to use their existing code that worked well in a fully dynamic mode without hitting issues caused by trimming. To read more about reflection, see the [Reflection in AOT mode](reflection-in-aot-mode.md) document.

View file

@ -222,7 +222,7 @@ BOOL Internal_ExtractFormatA(CPalThread *pthrCurrent, LPCSTR *Fmt, LPSTR Out, LP
*Prefix = PFF_PREFIX_LONGLONG;
}
#endif
if ((*Fmt)[0] == 'I')
if ((*Fmt)[0] == 'I' || (*Fmt)[0] == 'z')
{
/* grab prefix of 'I64' for __int64 */
if ((*Fmt)[1] == '6' && (*Fmt)[2] == '4')

View file

@ -477,6 +477,14 @@ BOOL Silent_ExtractFormatA(LPCSTR *Fmt, LPSTR Out, LPINT Flags, LPINT Width, LPI
*Fmt += 3;
*Prefix = PFF_PREFIX_LONGLONG;
}
/* grab a prefix of 'z' */
else if (**Fmt == 'z')
{
#ifdef HOST_64BIT
*Prefix = PFF_PREFIX_LONGLONG;
#endif
++(*Fmt);
}
/* grab a prefix of 'h' */
else if (**Fmt == 'h')
{

View file

@ -351,6 +351,7 @@ static int __check_float_string(size_t nFloatStrUsed,
break;
#if _INTEGRAL_MAX_BITS >= 64
case _T('z'):
case _T('I'):
if ( (*(format + 1) == _T('6')) &&
(*(format + 2) == _T('4')) )

View file

@ -394,14 +394,16 @@ static const unsigned char __lookuptable_s[] = {
/* 'u' */ 0x08,
/* 'v' */ 0x00,
/* 'w' */ 0x07,
/* 'x' */ 0x08
/* 'x' */ 0x08,
/* 'y' */ 0x00,
/* 'z' */ 0x57
};
//#endif /* defined (_UNICODE) || defined (CPRFLAG) */
#endif /* FORMAT_VALIDATIONS */
#define FIND_CHAR_CLASS(lookuptbl, c) \
((c) < _T(' ') || (c) > _T('x') ? \
((c) < _T(' ') || (c) > _T('z') ? \
CH_OTHER \
: \
(enum CHARTYPE)(lookuptbl[(c)-_T(' ')] & 0xF))
@ -776,6 +778,7 @@ int __cdecl _output (
}
break;
case _T('z'):
case _T('I'):
/*
* In order to handle the I, I32, and I64 size modifiers, we

View file

@ -311,7 +311,9 @@ const char __lookuptable[] = {
/* 'u' */ 0x08,
/* 'v' */ 0x00,
/* 'w' */ 0x07,
/* 'x' */ 0x08
/* 'x' */ 0x08,
/* 'y' */ 0x00,
/* 'z' */ 0x37
};
#endif /* defined (_UNICODE) || defined (CPRFLAG) */
@ -322,7 +324,7 @@ const char __lookuptable[] = {
#endif /* FORMAT_VALIDATIONS */
#define FIND_CHAR_CLASS(lookuptbl, c) \
((c) < _T(' ') || (c) > _T('x') ? \
((c) < _T(' ') || (c) > _T('z') ? \
CH_OTHER \
: \
(enum CHARTYPE)(lookuptbl[(c)-_T(' ')] & 0xF))
@ -696,6 +698,7 @@ int __cdecl _output (
}
break;
case _T('z'):
case _T('I'):
/*
* In order to handle the I, I32, and I64 size modifiers, we

View file

@ -290,7 +290,7 @@ collect_parser.add_argument("-assemblies", dest="assemblies", nargs="+", default
collect_parser.add_argument("-exclude", dest="exclude", nargs="+", default=[], help="A list of files or directories to exclude from the files and directories specified by `-assemblies`.")
collect_parser.add_argument("-pmi_location", help="Path to pmi.dll to use during PMI run. Optional; pmi.dll will be downloaded from Azure Storage if necessary.")
collect_parser.add_argument("-pmi_path", metavar="PMIPATH_DIR", nargs='*', help="Specify a \"load path\" where assemblies can be found during pmi.dll run. Optional; the argument values are translated to PMIPATH environment variable.")
collect_parser.add_argument("-output_mch_path", help="Location to place the final MCH file.")
collect_parser.add_argument("-output_mch_path", help="Location to place the final MCH file. Default is a constructed file name in the current directory.")
collect_parser.add_argument("--merge_mch_files", action="store_true", help="Merge multiple MCH files. Use the -mch_files flag to pass a list of MCH files to merge.")
collect_parser.add_argument("-mch_files", metavar="MCH_FILE", nargs='+', help="Pass a sequence of MCH files which will be merged. Required by --merge_mch_files.")
collect_parser.add_argument("--use_zapdisable", action="store_true", help="Sets DOTNET_ZapDisable=1 and DOTNET_ReadyToRun=0 when doing collection to cause NGEN/ReadyToRun images to not be used, and thus causes JIT compilation and SuperPMI collection of these methods.")
@ -664,6 +664,8 @@ class SuperPMICollect:
self.coreclr_args = coreclr_args
self.temp_location = None
# Pathname for a temporary .MCL file used for noticing SuperPMI replay failures against base MCH.
self.base_fail_mcl_file = None
@ -671,12 +673,22 @@ class SuperPMICollect:
self.base_mch_file = None
# Final .MCH file path
self.final_mch_file = None
if self.coreclr_args.output_mch_path is not None:
self.final_mch_file = os.path.abspath(self.coreclr_args.output_mch_path)
final_mch_dir = os.path.dirname(self.final_mch_file)
if not os.path.isdir(final_mch_dir):
os.makedirs(final_mch_dir)
else:
# Default directory is the current working directory (before we've changed the directory using "TempDir")
default_mch_location = os.path.abspath(os.getcwd())
if not os.path.isdir(default_mch_location):
os.makedirs(default_mch_location)
default_mch_basename = "{}.{}.{}".format(self.coreclr_args.host_os, self.coreclr_args.arch, self.coreclr_args.build_type)
default_mch_extension = "mch"
self.final_mch_file = create_unique_file_name(default_mch_location, default_mch_basename, default_mch_extension)
# The .TOC file path for the clean thin unique .MCH file
self.toc_file = None
self.temp_location = None
self.toc_file = "{}.mct".format(self.final_mch_file)
############################################################################
# Instance Methods
@ -708,19 +720,6 @@ class SuperPMICollect:
self.temp_location = temp_location
if self.coreclr_args.output_mch_path is not None:
self.final_mch_file = os.path.abspath(self.coreclr_args.output_mch_path)
final_mch_dir = os.path.dirname(self.final_mch_file)
if not os.path.isdir(final_mch_dir):
os.makedirs(final_mch_dir)
else:
default_coreclr_bin_mch_location = os.path.join(self.coreclr_args.spmi_location, "mch", "{}.{}.{}".format(self.coreclr_args.host_os, self.coreclr_args.arch, self.coreclr_args.build_type))
if not os.path.isdir(default_coreclr_bin_mch_location):
os.makedirs(default_coreclr_bin_mch_location)
self.final_mch_file = os.path.abspath(os.path.join(default_coreclr_bin_mch_location, "{}.{}.{}.mch".format(self.coreclr_args.host_os, self.coreclr_args.arch, self.coreclr_args.build_type)))
self.toc_file = "{}.mct".format(self.final_mch_file)
# If we have passed temp_dir, then we have a few flags we need
# to check to see where we are in the collection process. Note that this
# functionality exists to help not lose progress during a SuperPMI collection.
@ -757,6 +756,9 @@ class SuperPMICollect:
except Exception as exception:
logging.critical(exception)
if passed:
logging.info("Generated MCH file: %s", self.final_mch_file)
return passed
############################################################################
@ -4272,9 +4274,6 @@ def main(args):
collection = SuperPMICollect(coreclr_args)
success = collection.collect()
if success and coreclr_args.output_mch_path is not None:
logging.info("Generated MCH file: %s", coreclr_args.output_mch_path)
end_time = datetime.datetime.now()
elapsed_time = end_time - begin_time

View file

@ -4079,5 +4079,25 @@ namespace Internal.JitInterface
return supportEnabled ? _compilation.InstructionSetSupport.IsInstructionSetSupported(instructionSet) : false;
}
#endif
private static bool TryReadRvaFieldData(FieldDesc field, byte* buffer, int bufferSize, int valueOffset)
{
Debug.Assert(buffer != null);
Debug.Assert(bufferSize > 0);
Debug.Assert(valueOffset >= 0);
Debug.Assert(field.IsStatic);
Debug.Assert(field.HasRva);
if (!field.IsThreadStatic && field.IsInitOnly && field is EcmaField ecmaField)
{
ReadOnlySpan<byte> rvaData = ecmaField.GetFieldRvaData();
if (rvaData.Length >= bufferSize && valueOffset <= rvaData.Length - bufferSize)
{
rvaData.Slice(valueOffset, bufferSize).CopyTo(new Span<byte>(buffer, bufferSize));
return true;
}
}
return false;
}
}
}

View file

@ -2244,12 +2244,12 @@ namespace Internal.JitInterface
}
[UnmanagedCallersOnly]
private static byte _getReadonlyStaticFieldValue(IntPtr thisHandle, IntPtr* ppException, CORINFO_FIELD_STRUCT_* field, byte* buffer, int bufferSize, byte ignoreMovableObjects)
private static byte _getReadonlyStaticFieldValue(IntPtr thisHandle, IntPtr* ppException, CORINFO_FIELD_STRUCT_* field, byte* buffer, int bufferSize, int valueOffset, byte ignoreMovableObjects)
{
var _this = GetThis(thisHandle);
try
{
return _this.getReadonlyStaticFieldValue(field, buffer, bufferSize, ignoreMovableObjects != 0) ? (byte)1 : (byte)0;
return _this.getReadonlyStaticFieldValue(field, buffer, bufferSize, valueOffset, ignoreMovableObjects != 0) ? (byte)1 : (byte)0;
}
catch (Exception ex)
{
@ -2838,7 +2838,7 @@ namespace Internal.JitInterface
callbacks[148] = (delegate* unmanaged<IntPtr, IntPtr*, CORINFO_CLASS_STRUCT_*, byte>)&_isRIDClassDomainID;
callbacks[149] = (delegate* unmanaged<IntPtr, IntPtr*, CORINFO_CLASS_STRUCT_*, void**, uint>)&_getClassDomainID;
callbacks[150] = (delegate* unmanaged<IntPtr, IntPtr*, CORINFO_FIELD_STRUCT_*, void**, void*>)&_getFieldAddress;
callbacks[151] = (delegate* unmanaged<IntPtr, IntPtr*, CORINFO_FIELD_STRUCT_*, byte*, int, byte, byte>)&_getReadonlyStaticFieldValue;
callbacks[151] = (delegate* unmanaged<IntPtr, IntPtr*, CORINFO_FIELD_STRUCT_*, byte*, int, int, byte, byte>)&_getReadonlyStaticFieldValue;
callbacks[152] = (delegate* unmanaged<IntPtr, IntPtr*, CORINFO_FIELD_STRUCT_*, byte*, CORINFO_CLASS_STRUCT_*>)&_getStaticFieldCurrentClass;
callbacks[153] = (delegate* unmanaged<IntPtr, IntPtr*, CORINFO_SIG_INFO*, void**, IntPtr>)&_getVarArgsHandle;
callbacks[154] = (delegate* unmanaged<IntPtr, IntPtr*, CORINFO_SIG_INFO*, byte>)&_canGetVarArgsHandle;

View file

@ -308,7 +308,7 @@ FUNCTIONS
bool isRIDClassDomainID(CORINFO_CLASS_HANDLE cls);
unsigned getClassDomainID (CORINFO_CLASS_HANDLE cls, void **ppIndirection);
void* getFieldAddress(CORINFO_FIELD_HANDLE field, VOIDSTARSTAR ppIndirection);
bool getReadonlyStaticFieldValue(CORINFO_FIELD_HANDLE field, uint8_t *buffer, int bufferSize, bool ignoreMovableObjects);
bool getReadonlyStaticFieldValue(CORINFO_FIELD_HANDLE field, uint8_t *buffer, int bufferSize, int valueOffset, bool ignoreMovableObjects);
CORINFO_CLASS_HANDLE getStaticFieldCurrentClass(CORINFO_FIELD_HANDLE field, BoolStar pIsSpeculative);
CORINFO_VARARGS_HANDLE getVarArgsHandle(CORINFO_SIG_INFO *pSig, void **ppIndirection);
bool canGetVarArgsHandle(CORINFO_SIG_INFO *pSig);

View file

@ -22,8 +22,6 @@ namespace Internal.IL.Stubs
{
case "AsPointer":
return new ILStubMethodIL(method, new byte[] { (byte)ILOpcode.ldarg_0, (byte)ILOpcode.conv_u, (byte)ILOpcode.ret }, Array.Empty<LocalVariableDefinition>(), null);
case "SizeOf":
return EmitSizeOf(method);
case "As":
case "AsRef":
return new ILStubMethodIL(method, new byte[] { (byte)ILOpcode.ldarg_0, (byte)ILOpcode.ret }, Array.Empty<LocalVariableDefinition>(), null);
@ -98,19 +96,6 @@ namespace Internal.IL.Stubs
return null;
}
private static MethodIL EmitSizeOf(MethodDesc method)
{
Debug.Assert(method.Signature.IsStatic && method.Signature.Length == 0);
TypeSystemContext context = method.Context;
ILEmitter emit = new ILEmitter();
ILCodeStream codeStream = emit.NewCodeStream();
codeStream.Emit(ILOpcode.sizeof_, emit.NewToken(context.GetSignatureVariable(0, method: true)));
codeStream.Emit(ILOpcode.ret);
return emit.Link(method);
}
private static MethodIL EmitAdd(MethodDesc method)
{
Debug.Assert(method.Signature.IsStatic && method.Signature.Length == 2);

View file

@ -10,7 +10,7 @@ using System.Runtime.InteropServices;
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyProduct("parse-hb-log")]
[assembly: AssemblyCopyright("Copyright © 2019")]
[assembly: AssemblyCopyright("Copyright \u00A9 2019")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]

View file

@ -20,19 +20,19 @@
// pass-zero-pass1-nX-ia1-thread.txt
//
// thread tells you the thread index running on each proc at each timestamp.
// 4240| 63₉ | 65₉ | 62₉ | 56₁₀| 87₁₀|109₁₀| 59₉ | 70₁₀| 78₉ | 64₉ | 71₁₀|107₁₀|
// 4240| 63\u2089 | 65\u2089 | 62\u2089 | 56\u2081\u2080| 87\u2081\u2080|109\u2081\u2080| 59\u2089 | 70\u2081\u2080| 78\u2089 | 64\u2089 | 71\u2081\u2080|107\u2081\u2080|
//
// 4240 is the 4240th ms since we started recording.
// the numbers following are the thread indices and the subscript is the # of samples
// observed during that ms. The tool can do a time unit that's larger than 1ms.
//
// alloc tells you which alloc heap the each proc, for the same timestamp
// 4240| 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 |
// 4240| 56 | 57 | 58\u2071 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 |
// 56 means heap 56. The subscript i means we did a SetIdealProcessor during this
// ms. You may also see
// meaning we went through the balancing logic due to the proc for the thread changed
// \u1D56 meaning we went through the balancing logic due to the proc for the thread changed
// from the home heap.
// meaning while we were going through balancing logic the proc switched.
// \u1D50 meaning while we were going through balancing logic the proc switched.
using System;
using System.Collections.Generic;
@ -637,15 +637,15 @@ namespace parse_hb_log
string strFormattedFlags = "";
if ((flags & (int)HeapBalanceFlagMask.MutipleProcs) != 0)
{
strFormattedFlags += "";
strFormattedFlags += "\u1D50";
}
if ((flags & (int)HeapBalanceFlagMask.EnterDueToProc) != 0)
{
strFormattedFlags += "";
strFormattedFlags += "\u1D56";
}
if ((flags & (int)HeapBalanceFlagMask.SetIdeal) != 0)
{
strFormattedFlags += "";
strFormattedFlags += "\u2071";
}
return strFormattedFlags;
@ -696,7 +696,7 @@ namespace parse_hb_log
// see https://en.wikipedia.org/wiki/Unicode_subscripts_and_superscripts
// for subscript characters
// ᵐ,ᵖ,ⁱ
// \u1D50,\u1D56,\u2071
if (fPrint)
{
int procsHadSamples = 0;

View file

@ -33,7 +33,7 @@ namespace ILVerify
public Option<bool> Statistics { get; } =
new(new[] { "--statistics" }, "Print verification statistics");
public Option<bool> Verbose { get; } =
new(new[] { "--verbose", "-v" }, "Verbose output");
new(new[] { "--verbose" }, "Verbose output");
public Option<bool> Tokens { get; } =
new(new[] { "--tokens", "-t" }, "Include metadata tokens in error messages");

View file

@ -456,7 +456,7 @@ namespace ILVerify
private static int Main(string[] args) =>
new CommandLineBuilder(new ILVerifyRootCommand())
.UseTokenReplacer(Helpers.TryReadResponseFile)
.UseVersionOption()
.UseVersionOption("--version", "-v")
.UseHelp()
.UseParseErrorReporting()
.Build()

View file

@ -3,7 +3,7 @@
<PropertyGroup>
<TargetFramework>$(NetCoreAppToolCurrent)-windows</TargetFramework>
<UseWindowsForms>true</UseWindowsForms>
<OutputType>Exe</OutputType>
<OutputType>WinExe</OutputType>
<RootNamespace>DependencyLogViewer</RootNamespace>
</PropertyGroup>

View file

@ -29,10 +29,18 @@ namespace ILCompiler.DependencyAnalysis
public override bool StaticDependenciesAreComputed => true;
public override void EncodeData(ref ObjectDataBuilder dataBuilder, NodeFactory factory, bool relocsOnly)
{
if (factory.Target.SupportsRelativePointers)
{
dataBuilder.RequireInitialAlignment(sizeof(int));
dataBuilder.EmitReloc(Target, RelocType.IMAGE_REL_BASED_RELPTR32);
}
else
{
dataBuilder.RequireInitialPointerAlignment();
dataBuilder.EmitPointerReloc(Target);
}
}
// At minimum, Target needs to be reported as a static dependency by inheritors.
public abstract override IEnumerable<DependencyListEntry> GetStaticDependencies(NodeFactory factory);

View file

@ -12,7 +12,7 @@ namespace ILCompiler.DependencyAnalysis
public class GCStaticsNode : ObjectNode, ISymbolDefinitionNode, ISortableSymbolNode
{
private readonly MetadataType _type;
private readonly TypePreinit.PreinitializationInfo _preinitializationInfo;
private readonly GCStaticsPreInitDataNode _preinitializationInfo;
public GCStaticsNode(MetadataType type, PreinitializationManager preinitManager)
{
@ -21,7 +21,10 @@ namespace ILCompiler.DependencyAnalysis
_type = type;
if (preinitManager.IsPreinitialized(type))
_preinitializationInfo = preinitManager.GetPreinitializationInfo(_type);
{
var info = preinitManager.GetPreinitializationInfo(_type);
_preinitializationInfo = new GCStaticsPreInitDataNode(info);
}
}
protected override string GetName(NodeFactory factory) => this.GetMangledName(factory.NameMangler);
@ -45,12 +48,6 @@ namespace ILCompiler.DependencyAnalysis
return factory.GCStaticEEType(map);
}
public GCStaticsPreInitDataNode NewPreInitDataNode()
{
Debug.Assert(_preinitializationInfo != null && _preinitializationInfo.IsPreinitialized);
return new GCStaticsPreInitDataNode(_preinitializationInfo);
}
protected override DependencyList ComputeNonRelocationBasedDependencies(NodeFactory factory)
{
DependencyList dependencyList = new DependencyList();
@ -79,19 +76,37 @@ namespace ILCompiler.DependencyAnalysis
{
ObjectDataBuilder builder = new ObjectDataBuilder(factory, relocsOnly);
// Even though we're only generating 32-bit relocs here (if SupportsRelativePointers),
// align the blob at pointer boundary since at runtime we're going to write a pointer in here.
builder.RequireInitialPointerAlignment();
int delta = GCStaticRegionConstants.Uninitialized;
// Set the flag that indicates next pointer following MethodTable is the preinit data
bool isPreinitialized = _preinitializationInfo != null && _preinitializationInfo.IsPreinitialized;
bool isPreinitialized = _preinitializationInfo != null;
if (isPreinitialized)
delta |= GCStaticRegionConstants.HasPreInitializedData;
if (factory.Target.SupportsRelativePointers)
builder.EmitReloc(GetGCStaticEETypeNode(factory), RelocType.IMAGE_REL_BASED_RELPTR32, delta);
else
builder.EmitPointerReloc(GetGCStaticEETypeNode(factory), delta);
if (isPreinitialized)
builder.EmitPointerReloc(factory.GCStaticsPreInitDataNode(_type));
{
if (factory.Target.SupportsRelativePointers)
builder.EmitReloc(_preinitializationInfo, RelocType.IMAGE_REL_BASED_RELPTR32);
else
builder.EmitPointerReloc(_preinitializationInfo);
}
else if (factory.Target.SupportsRelativePointers && factory.Target.PointerSize == 8)
{
// At runtime, we replace the EEType pointer with a full pointer to the data on the GC
// heap. If the EEType pointer was 32-bit relative, and we don't have a 32-bit relative
// pointer to the preinit data following it, and the pointer size on the target
// machine is 8, we need to emit additional 4 bytes to make room for the full pointer.
builder.EmitZeros(4);
}
builder.AddSymbol(this);

View file

@ -141,7 +141,11 @@ namespace ILCompiler.DependencyAnalysis
foreach (var module in sortedModules)
{
builder.EmitPointerReloc(factory.MethodEntrypoint(module.GetGlobalModuleType().GetStaticConstructor()));
IMethodNode entrypoint = factory.MethodEntrypoint(module.GetGlobalModuleType().GetStaticConstructor());
if (factory.Target.SupportsRelativePointers)
builder.EmitReloc(entrypoint, RelocType.IMAGE_REL_BASED_RELPTR32);
else
builder.EmitPointerReloc(entrypoint);
}
var result = builder.ToObjectData();

View file

@ -206,13 +206,6 @@ namespace ILCompiler.DependencyAnalysis
}
});
_GCStaticsPreInitDataNodes = new NodeCache<MetadataType, GCStaticsPreInitDataNode>((MetadataType type) =>
{
ISymbolNode gcStaticsNode = TypeGCStaticsSymbol(type);
Debug.Assert(gcStaticsNode is GCStaticsNode);
return ((GCStaticsNode)gcStaticsNode).NewPreInitDataNode();
});
_GCStaticIndirectionNodes = new NodeCache<MetadataType, EmbeddedObjectNode>((MetadataType type) =>
{
ISymbolNode gcStaticsNode = TypeGCStaticsSymbol(type);
@ -627,13 +620,6 @@ namespace ILCompiler.DependencyAnalysis
return _GCStatics.GetOrAdd(type);
}
private NodeCache<MetadataType, GCStaticsPreInitDataNode> _GCStaticsPreInitDataNodes;
public GCStaticsPreInitDataNode GCStaticsPreInitDataNode(MetadataType type)
{
return _GCStaticsPreInitDataNodes.GetOrAdd(type);
}
private NodeCache<MetadataType, EmbeddedObjectNode> _GCStaticIndirectionNodes;
public EmbeddedObjectNode GCStaticIndirection(MetadataType type)

View file

@ -4,6 +4,7 @@
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Runtime.InteropServices;
using ILCompiler.DependencyAnalysis;
@ -402,6 +403,7 @@ namespace ILCompiler
break;
case ILOpcode.call:
case ILOpcode.callvirt:
{
MethodDesc method = (MethodDesc)methodIL.GetObject(reader.ReadILToken());
MethodSignature methodSig = method.Signature;
@ -427,6 +429,13 @@ namespace ILCompiler
methodParams[i] = stack.PopIntoLocation(GetArgType(method, i));
}
if (opcode == ILOpcode.callvirt)
{
// Only support non-virtual methods for now + we don't emulate NRE on null this
if (method.IsVirtual || methodParams[0] == null)
return Status.Fail(methodIL.OwningMethod, opcode);
}
Value retVal;
if (!method.IsIntrinsic || !TryHandleIntrinsicCall(method, methodParams, out retVal))
{
@ -598,6 +607,11 @@ namespace ILCompiler
return Status.Fail(methodIL.OwningMethod, opcode, "Reference field");
}
if (field.FieldType.IsByRef)
{
return Status.Fail(methodIL.OwningMethod, opcode, "Byref field");
}
var settableInstance = instance.Value as IHasInstanceFields;
if (settableInstance == null)
{
@ -2259,28 +2273,65 @@ namespace ILCompiler
public override ReferenceTypeValue ToForeignInstance(int baseInstructionCounter) => this;
}
private sealed class StringInstance : ReferenceTypeValue
private sealed class StringInstance : ReferenceTypeValue, IHasInstanceFields
{
private readonly string _value;
private readonly byte[] _value;
private string ValueAsString
{
get
{
FieldDesc firstCharField = Type.GetField("_firstChar");
int startOffset = firstCharField.Offset.AsInt;
int length = _value.Length - startOffset - sizeof(char) /* terminating null */;
return new string(MemoryMarshal.Cast<byte, char>(
((ReadOnlySpan<byte>)_value).Slice(startOffset, length)));
}
}
public StringInstance(TypeDesc stringType, string value)
: base(stringType)
{
_value = value;
_value = ConstructStringInstance(stringType, value);
}
private static byte[] ConstructStringInstance(TypeDesc stringType, ReadOnlySpan<char> value)
{
int pointerSize = stringType.Context.Target.PointerSize;
var bytes = new byte[
pointerSize /* MethodTable */
+ sizeof(int) /* length */
+ (value.Length * sizeof(char)) /* bytes */
+ sizeof(char) /* null terminator */];
FieldDesc lengthField = stringType.GetField("_stringLength");
Debug.Assert(lengthField.FieldType.IsWellKnownType(WellKnownType.Int32)
&& lengthField.Offset.AsInt == pointerSize);
new FieldAccessor(bytes).SetField(lengthField, ValueTypeValue.FromInt32(value.Length));
FieldDesc firstCharField = stringType.GetField("_firstChar");
Debug.Assert(firstCharField.FieldType.IsWellKnownType(WellKnownType.Char)
&& firstCharField.Offset.AsInt == pointerSize + sizeof(int) /* length */);
value.CopyTo(MemoryMarshal.Cast<byte, char>(((Span<byte>)bytes).Slice(firstCharField.Offset.AsInt)));
return bytes;
}
public override void WriteFieldData(ref ObjectDataBuilder builder, NodeFactory factory)
{
builder.EmitPointerReloc(factory.SerializedStringObject(_value));
builder.EmitPointerReloc(factory.SerializedStringObject(ValueAsString));
}
public override bool GetRawData(NodeFactory factory, out object data)
{
data = factory.SerializedStringObject(_value);
data = factory.SerializedStringObject(ValueAsString);
return true;
}
public override ReferenceTypeValue ToForeignInstance(int baseInstructionCounter) => this;
Value IHasInstanceFields.GetField(FieldDesc field) => new FieldAccessor(_value).GetField(field);
void IHasInstanceFields.SetField(FieldDesc field, Value value) => ThrowHelper.ThrowInvalidProgramException();
ByRefValue IHasInstanceFields.GetFieldAddress(FieldDesc field) => new FieldAccessor(_value).GetFieldAddress(field);
}
#pragma warning disable CA1852

View file

@ -2998,8 +2998,16 @@ namespace Internal.JitInterface
return 0;
}
private bool getReadonlyStaticFieldValue(CORINFO_FIELD_STRUCT_* fieldHandle, byte* buffer, int bufferSize, bool ignoreMovableObjects)
private bool getReadonlyStaticFieldValue(CORINFO_FIELD_STRUCT_* fieldHandle, byte* buffer, int bufferSize, int valueOffset, bool ignoreMovableObjects)
{
Debug.Assert(fieldHandle != null);
FieldDesc field = HandleToObject(fieldHandle);
// For crossgen2 we only support RVA fields
if (_compilation.NodeFactory.CompilationModuleGroup.VersionsWithType(field.OwningType) && field.HasRva)
{
return TryReadRvaFieldData(field, buffer, bufferSize, valueOffset);
}
return false;
}

View file

@ -13,6 +13,7 @@ using Internal.ReadyToRunConstants;
using ILCompiler;
using ILCompiler.DependencyAnalysis;
using Internal.TypeSystem.Ecma;
#if SUPPORT_JIT
using MethodCodeNode = Internal.Runtime.JitSupport.JitMethodCodeNode;
@ -2213,17 +2214,24 @@ namespace Internal.JitInterface
return index;
}
private bool getReadonlyStaticFieldValue(CORINFO_FIELD_STRUCT_* fieldHandle, byte* buffer, int bufferSize, bool ignoreMovableObjects)
private bool getReadonlyStaticFieldValue(CORINFO_FIELD_STRUCT_* fieldHandle, byte* buffer, int bufferSize, int valueOffset, bool ignoreMovableObjects)
{
Debug.Assert(fieldHandle != null);
Debug.Assert(buffer != null);
Debug.Assert(bufferSize > 0);
Debug.Assert(valueOffset >= 0);
FieldDesc field = HandleToObject(fieldHandle);
Debug.Assert(field.IsStatic);
if (!field.IsThreadStatic && field.IsInitOnly && field.OwningType is MetadataType owningType)
{
if (field.HasRva)
{
return TryReadRvaFieldData(field, buffer, bufferSize, valueOffset);
}
PreinitializationManager preinitManager = _compilation.NodeFactory.PreinitializationManager;
if (preinitManager.IsPreinitialized(owningType))
{
@ -2234,6 +2242,7 @@ namespace Internal.JitInterface
if (value == null)
{
Debug.Assert(valueOffset == 0);
Debug.Assert(bufferSize == targetPtrSize);
// Write "null" to buffer
@ -2246,13 +2255,15 @@ namespace Internal.JitInterface
switch (data)
{
case byte[] bytes:
Debug.Assert(bufferSize == bytes.Length);
// Ensure we have enough room in the buffer, it can be a large struct
bytes.AsSpan().CopyTo(new Span<byte>(buffer, bufferSize));
if (bytes.Length >= bufferSize && valueOffset <= bytes.Length - bufferSize)
{
bytes.AsSpan(valueOffset, bufferSize).CopyTo(new Span<byte>(buffer, bufferSize));
return true;
}
return false;
case FrozenObjectNode or FrozenStringNode:
Debug.Assert(valueOffset == 0);
Debug.Assert(bufferSize == targetPtrSize);
// save handle's value to buffer

View file

@ -24,9 +24,9 @@ namespace ILCompiler
public Option<bool> Optimize { get; } =
new(new[] { "--optimize", "-O" }, "Enable optimizations");
public Option<bool> OptimizeSpace { get; } =
new(new[] { "--optimize-space", "-Os" }, "Enable optimizations, favor code space");
new(new[] { "--optimize-space", "--Os" }, "Enable optimizations, favor code space");
public Option<bool> OptimizeTime { get; } =
new(new[] { "--optimize-time", "-Ot" }, "Enable optimizations, favor code speed");
new(new[] { "--optimize-time", "--Ot" }, "Enable optimizations, favor code speed");
public Option<string[]> MibcFilePaths { get; } =
new(new[] { "--mibc", "-m" }, Array.Empty<string>, "Mibc file(s) for profile guided optimization");
public Option<bool> EnableDebugInfo { get; } =

View file

@ -657,7 +657,7 @@ namespace ILCompiler
private static int Main(string[] args) =>
new CommandLineBuilder(new ILCompilerRootCommand(args))
.UseTokenReplacer(Helpers.TryReadResponseFile)
.UseVersionOption("-v")
.UseVersionOption("--version", "-v")
.UseHelp(context => context.HelpBuilder.CustomizeLayout(ILCompilerRootCommand.GetExtendedHelp))
.UseParseErrorReporting()
.Build()

View file

@ -8,6 +8,7 @@
<DefineConstants>$(DefineConstants);INCLUDE_EXPECTATIONS</DefineConstants>
<WarningLevel>0</WarningLevel>
<AnalysisLevel>0</AnalysisLevel>
<RunAnalyzers>true</RunAnalyzers>
</PropertyGroup>
<ItemGroup>

View file

@ -188,12 +188,9 @@ namespace Mono.Linker.Tests.Cases.RequiresCapability
public static void GenericTypeWithStaticMethodWhichRequires () { }
}
// NativeAOT doesnt produce Requires warnings in Generics https://github.com/dotnet/runtime/issues/68688
// [ExpectedWarning("IL2026", "--GenericTypeWithStaticMethodWhichRequires--"]
[ExpectedWarning ("IL2026", "--GenericTypeWithStaticMethodWhichRequires--", ProducedBy = ProducedBy.Analyzer | ProducedBy.Trimmer)]
// [ExpectedWarning("IL3002", "--GenericTypeWithStaticMethodWhichRequires--", ProducedBy = ProducedBy.Analyzer | ProducedBy.NativeAot)]
// NativeAot is missing ldftn detection: https://github.com/dotnet/runtime/issues/68786
[ExpectedWarning ("IL2026", "--GenericTypeWithStaticMethodWhichRequires--", ProducedBy = ProducedBy.Trimmer | ProducedBy.Analyzer)]
[ExpectedWarning ("IL3002", "--GenericTypeWithStaticMethodWhichRequires--", ProducedBy = ProducedBy.Analyzer)]
// [ExpectedWarning("IL3050", "--GenericTypeWithStaticMethodWhichRequires--", ProducedBy = ProducedBy.Analyzer | ProducedBy.NativeAot)]
[ExpectedWarning ("IL3050", "--GenericTypeWithStaticMethodWhichRequires--", ProducedBy = ProducedBy.Analyzer)]
public static void GenericTypeWithStaticMethodViaLdftn ()
{

View file

@ -0,0 +1,30 @@
// Copyright (c) .NET Foundation and contributors. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System;
using System.Collections.Generic;
using System.Diagnostics.CodeAnalysis;
using System.Reflection;
using System.Threading.Tasks;
using Mono.Linker.Tests.Cases.Expectations.Assertions;
using Mono.Linker.Tests.Cases.Expectations.Metadata;
namespace Mono.Linker.Tests.Cases.RequiresCapability
{
[ExpectedNoWarnings]
[SkipKeptItemsValidation]
[SetupCompileArgument ("/optimize+")]
[Define ("RELEASE")]
[SetupCompileArgument ("/main:Mono.Linker.Tests.Cases.RequiresCapability.RequiresInCompilerGeneratedCodeRelease")]
[SandboxDependency ("RequiresInCompilerGeneratedCode.cs")]
class RequiresInCompilerGeneratedCodeRelease
{
// This test just links the RequiresIncompilerGeneratedCode test in the Release configuration, to test
// with optimizations enabled for closures and state machine types.
// Sometimes the compiler optimizes away unused references to lambdas.
public static void Main ()
{
RequiresInCompilerGeneratedCode.Main ();
}
}
}

View file

@ -4,11 +4,11 @@
// The MIT License(MIT)
// =====================
//
// Copyright © `2015-2017` `Lucas Meijer`
// Copyright \u00A9 `2015-2017` `Lucas Meijer`
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the “Software”), to deal in the Software without
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
@ -18,7 +18,7 @@
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT

View file

@ -102,8 +102,6 @@ namespace Mono.Linker.Tests.TestCasesRunner
List<MessageContainer> loggedMessages = logger.GetLoggedMessages ();
List<(IMemberDefinition, CustomAttribute)> expectedNoWarningsAttributes = new List<(IMemberDefinition, CustomAttribute)> ();
foreach (var attrProvider in GetAttributeProviders (original)) {
if (attrProvider.ToString () is string mystring && mystring.Contains ("RequiresInCompilerGeneratedCode/SuppressInLambda"))
Debug.WriteLine ("Print");
foreach (var attr in attrProvider.CustomAttributes) {
if (!IsProducedByNativeAOT (attr))
continue;
@ -159,10 +157,6 @@ namespace Mono.Linker.Tests.TestCasesRunner
bool expectedWarningFound = false;
foreach (var loggedMessage in loggedMessages) {
if (loggedMessage.ToString ().Contains ("RequiresInCompilerGeneratedCode.SuppressInLambda")) {
Debug.WriteLine ("Print 2");
}
if (loggedMessage.Category != MessageCategory.Warning || loggedMessage.Code != expectedWarningCodeNumber)
continue;
@ -213,15 +207,17 @@ namespace Mono.Linker.Tests.TestCasesRunner
if (attrProvider is not IMemberDefinition expectedMember)
continue;
string actualName = methodDesc.OwningType.ToString ().Replace ("+", ".") + "." + methodDesc.Name;
if (actualName.Contains (expectedMember.DeclaringType.FullName.Replace ("/", ".")) &&
actualName.Contains ("<" + expectedMember.Name + ">")) {
string? actualName = GetActualOriginDisplayName (methodDesc);
string expectedTypeName = ConvertSignatureToIlcFormat (GetExpectedOriginDisplayName (expectedMember.DeclaringType));
if (actualName?.Contains (expectedTypeName) == true &&
actualName?.Contains ("<" + expectedMember.Name + ">") == true) {
expectedWarningFound = true;
loggedMessages.Remove (loggedMessage);
break;
}
if (actualName.StartsWith (expectedMember.DeclaringType.FullName) &&
actualName.Contains (".cctor") && (expectedMember is FieldDefinition || expectedMember is PropertyDefinition)) {
if (actualName?.StartsWith (expectedTypeName) == true &&
actualName?.Contains (".cctor") == true &&
(expectedMember is FieldDefinition || expectedMember is PropertyDefinition)) {
expectedWarningFound = true;
loggedMessages.Remove (loggedMessage);
break;
@ -359,6 +355,7 @@ namespace Mono.Linker.Tests.TestCasesRunner
provider switch {
MethodDefinition method => method.GetDisplayName (),
FieldDefinition field => field.GetDisplayName (),
TypeDefinition type => type.GetDisplayName (),
IMemberDefinition member => member.FullName,
AssemblyDefinition asm => asm.Name.Name,
_ => throw new NotImplementedException ()

View file

@ -1,4 +1,4 @@
// Copyright (c) .NET Foundation and contributors. All rights reserved.
// Copyright (c) .NET Foundation and contributors. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System;

View file

@ -32,11 +32,11 @@ namespace ILCompiler
public Option<bool> Optimize { get; } =
new(new[] { "--optimize", "-O" }, SR.EnableOptimizationsOption);
public Option<bool> OptimizeDisabled { get; } =
new(new[] { "--optimize-disabled", "-Od" }, SR.DisableOptimizationsOption);
new(new[] { "--optimize-disabled", "--Od" }, SR.DisableOptimizationsOption);
public Option<bool> OptimizeSpace { get; } =
new(new[] { "--optimize-space", "-Os" }, SR.OptimizeSpaceOption);
new(new[] { "--optimize-space", "--Os" }, SR.OptimizeSpaceOption);
public Option<bool> OptimizeTime { get; } =
new(new[] { "--optimize-time", "-Ot" }, SR.OptimizeSpeedOption);
new(new[] { "--optimize-time", "--Ot" }, SR.OptimizeSpeedOption);
public Option<bool> InputBubble { get; } =
new(new[] { "--inputbubble" }, SR.InputBubbleOption);
public Option<Dictionary<string, string>> InputBubbleReferenceFilePaths { get; } =

View file

@ -891,7 +891,7 @@ namespace ILCompiler
private static int Main(string[] args) =>
new CommandLineBuilder(new Crossgen2RootCommand(args))
.UseTokenReplacer(Helpers.TryReadResponseFile)
.UseVersionOption("-v")
.UseVersionOption("--version", "-v")
.UseHelp(context => context.HelpBuilder.CustomizeLayout(Crossgen2RootCommand.GetExtendedHelp))
.UseParseErrorReporting()
.Build()

View file

@ -7,6 +7,7 @@
<NoWarn>8002,NU1701</NoWarn>
<Platforms>x64;x86;arm64;arm;loongarch64</Platforms>
<PlatformTarget>AnyCPU</PlatformTarget>
<AppendRuntimeIdentifierToOutputPath>false</AppendRuntimeIdentifierToOutputPath>
<AppendTargetFrameworkToOutputPath>false</AppendTargetFrameworkToOutputPath>
<AppendTargetFrameworkToOutputPath Condition="'$(BuildingInsideVisualStudio)' == 'true'">true</AppendTargetFrameworkToOutputPath>
<GenerateRuntimeConfigurationFiles>true</GenerateRuntimeConfigurationFiles>

View file

@ -162,7 +162,7 @@ struct JitInterfaceCallbacks
bool (* isRIDClassDomainID)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_CLASS_HANDLE cls);
unsigned (* getClassDomainID)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_CLASS_HANDLE cls, void** ppIndirection);
void* (* getFieldAddress)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_FIELD_HANDLE field, void** ppIndirection);
bool (* getReadonlyStaticFieldValue)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_FIELD_HANDLE field, uint8_t* buffer, int bufferSize, bool ignoreMovableObjects);
bool (* getReadonlyStaticFieldValue)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_FIELD_HANDLE field, uint8_t* buffer, int bufferSize, int valueOffset, bool ignoreMovableObjects);
CORINFO_CLASS_HANDLE (* getStaticFieldCurrentClass)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_FIELD_HANDLE field, bool* pIsSpeculative);
CORINFO_VARARGS_HANDLE (* getVarArgsHandle)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_SIG_INFO* pSig, void** ppIndirection);
bool (* canGetVarArgsHandle)(void * thisHandle, CorInfoExceptionClass** ppException, CORINFO_SIG_INFO* pSig);
@ -1665,10 +1665,11 @@ public:
CORINFO_FIELD_HANDLE field,
uint8_t* buffer,
int bufferSize,
int valueOffset,
bool ignoreMovableObjects)
{
CorInfoExceptionClass* pException = nullptr;
bool temp = _callbacks->getReadonlyStaticFieldValue(_thisHandle, &pException, field, buffer, bufferSize, ignoreMovableObjects);
bool temp = _callbacks->getReadonlyStaticFieldValue(_thisHandle, &pException, field, buffer, bufferSize, valueOffset, ignoreMovableObjects);
if (pException != nullptr) throw pException;
return temp;
}

View file

@ -80,7 +80,7 @@ namespace Microsoft.Diagnostics.Tools.Pgo
private Option<bool> _includeReadyToRun { get; } =
new("--includeReadyToRun", "Include ReadyToRun methods in the trace file");
private Option<Verbosity> _verbosity { get; } =
new(new[] { "--verbose", "-v" }, () => Verbosity.normal, "Adjust verbosity level. Supported levels are minimal, normal, detailed, and diagnostic");
new(new[] { "--verbose" }, () => Verbosity.normal, "Adjust verbosity level. Supported levels are minimal, normal, detailed, and diagnostic");
private Option<bool> _isSorted { get; } =
new("--sorted", "Generate sorted output.");
private Option<bool> _showTimestamp { get; } =

View file

@ -162,7 +162,7 @@ namespace Microsoft.Diagnostics.Tools.Pgo
private static int Main(string[] args) =>
new CommandLineBuilder(new PgoRootCommand(args))
.UseTokenReplacer(Helpers.TryReadResponseFile)
.UseVersionOption("-v")
.UseVersionOption("--version", "-v")
.UseHelp(context => context.HelpBuilder.CustomizeLayout(PgoRootCommand.GetExtendedHelp))
.UseParseErrorReporting()
.Build()
@ -776,7 +776,7 @@ namespace Microsoft.Diagnostics.Tools.Pgo
int shareWidth = (int)(Math.Round(share * tableWidth));
bool lastRow = (i == rows.Length - 1);
Console.Write($" {(lastRow ? "" : " ")}{i,2}: [");
Console.Write($" {(lastRow ? "\u2265" : " ")}{i,2}: [");
Console.Write(new string('#', shareWidth));
Console.Write(new string('.', tableWidth - shareWidth));
Console.Write("] ");

View file

@ -499,6 +499,7 @@ namespace R2RDump
public static int Main(string[] args) =>
new CommandLineBuilder(new R2RDumpRootCommand())
.UseTokenReplacer(Helpers.TryReadResponseFile)
.UseVersionOption("--version", "-v")
.UseHelp()
.UseParseErrorReporting()
.Build()

View file

@ -49,7 +49,7 @@ namespace R2RDump
public Option<bool> HideTransitions { get; } =
new(new[] { "--hide-transitions", "--ht" }, "Don't include GC transitions in disassembly output");
public Option<bool> Verbose { get; } =
new(new[] { "--verbose", "-v" }, "Dump disassembly, unwindInfo, gcInfo and sectionContents");
new(new[] { "--verbose" }, "Dump disassembly, unwindInfo, gcInfo and sectionContents");
public Option<bool> Diff { get; } =
new(new[] { "--diff" }, "Compare two R2R images");
public Option<bool> DiffHideSameDisasm { get; } =

View file

@ -301,15 +301,14 @@ namespace R2RTest
public Option<DirectoryInfo> AspNetPath { get; } =
new Option<DirectoryInfo>(new[] { "--asp-net-path", "-asp" }, "Path to SERP's ASP.NET Core folder").AcceptExistingOnly();
static int Main(string[] args)
{
return new CommandLineBuilder(new R2RTestRootCommand())
private static int Main(string[] args) =>
new CommandLineBuilder(new R2RTestRootCommand())
.UseVersionOption("--version", "-v")
.UseHelp()
.UseParseErrorReporting()
.Build()
.Invoke(args);
}
}
public partial class BuildOptions
{

View file

@ -78,7 +78,7 @@ LWM(GetDelegateCtor, Agnostic_GetDelegateCtorIn, Agnostic_GetDelegateCtorOut)
LWM(GetEEInfo, DWORD, Agnostic_CORINFO_EE_INFO)
LWM(GetEHinfo, DLD, Agnostic_CORINFO_EH_CLAUSE)
LWM(GetFieldAddress, DWORDLONG, Agnostic_GetFieldAddress)
LWM(GetReadonlyStaticFieldValue, DLDD, DD)
LWM(GetReadonlyStaticFieldValue, DLDDD, DD)
LWM(GetStaticFieldCurrentClass, DWORDLONG, Agnostic_GetStaticFieldCurrentClass)
LWM(GetFieldClass, DWORDLONG, DWORDLONG)
LWM(GetFieldInClass, DLD, DWORDLONG)

View file

@ -3569,16 +3569,17 @@ void* MethodContext::repGetFieldAddress(CORINFO_FIELD_HANDLE field, void** ppInd
return (void*)value.fieldAddress;
}
void MethodContext::recGetReadonlyStaticFieldValue(CORINFO_FIELD_HANDLE field, uint8_t* buffer, int bufferSize, bool ignoreMovableObjects, bool result)
void MethodContext::recGetReadonlyStaticFieldValue(CORINFO_FIELD_HANDLE field, uint8_t* buffer, int bufferSize, int valueOffset, bool ignoreMovableObjects, bool result)
{
if (GetReadonlyStaticFieldValue == nullptr)
GetReadonlyStaticFieldValue = new LightWeightMap<DLDD, DD>();
GetReadonlyStaticFieldValue = new LightWeightMap<DLDDD, DD>();
DLDD key;
DLDDD key;
ZeroMemory(&key, sizeof(key));
key.A = CastHandle(field);
key.B = (DWORD)bufferSize;
key.C = (DWORD)ignoreMovableObjects;
key.D = (DWORD)valueOffset;
DWORD tmpBuf = (DWORD)-1;
if (buffer != nullptr && result)
@ -3591,18 +3592,19 @@ void MethodContext::recGetReadonlyStaticFieldValue(CORINFO_FIELD_HANDLE field, u
GetReadonlyStaticFieldValue->Add(key, value);
DEBUG_REC(dmpGetReadonlyStaticFieldValue(key, value));
}
void MethodContext::dmpGetReadonlyStaticFieldValue(DLDD key, DD value)
void MethodContext::dmpGetReadonlyStaticFieldValue(DLDDD key, DD value)
{
printf("GetReadonlyStaticFieldValue key fld-%016llX bufSize-%u, ignoremovable-%u, result-%u", key.A, key.B, key.C, value.A);
printf("GetReadonlyStaticFieldValue key fld-%016llX bufSize-%u, ignoremovable-%u, valOffset-%u result-%u", key.A, key.B, key.C, key.D, value.A);
GetReadonlyStaticFieldValue->Unlock();
}
bool MethodContext::repGetReadonlyStaticFieldValue(CORINFO_FIELD_HANDLE field, uint8_t* buffer, int bufferSize, bool ignoreMovableObjects)
bool MethodContext::repGetReadonlyStaticFieldValue(CORINFO_FIELD_HANDLE field, uint8_t* buffer, int bufferSize, int valueOffset, bool ignoreMovableObjects)
{
DLDD key;
DLDDD key;
ZeroMemory(&key, sizeof(key));
key.A = CastHandle(field);
key.B = (DWORD)bufferSize;
key.C = (DWORD)ignoreMovableObjects;
key.D = (DWORD)valueOffset;
DD value = LookupByKeyOrMiss(GetReadonlyStaticFieldValue, key, ": key %016llX", key.A);

View file

@ -492,9 +492,9 @@ public:
void dmpGetFieldAddress(DWORDLONG key, const Agnostic_GetFieldAddress& value);
void* repGetFieldAddress(CORINFO_FIELD_HANDLE field, void** ppIndirection);
void recGetReadonlyStaticFieldValue(CORINFO_FIELD_HANDLE field, uint8_t* buffer, int bufferSize, bool ignoreMovableObjects, bool result);
void dmpGetReadonlyStaticFieldValue(DLDD key, DD value);
bool repGetReadonlyStaticFieldValue(CORINFO_FIELD_HANDLE field, uint8_t* buffer, int bufferSize, bool ignoreMovableObjects);
void recGetReadonlyStaticFieldValue(CORINFO_FIELD_HANDLE field, uint8_t* buffer, int bufferSize, int valueOffset, bool ignoreMovableObjects, bool result);
void dmpGetReadonlyStaticFieldValue(DLDDD key, DD value);
bool repGetReadonlyStaticFieldValue(CORINFO_FIELD_HANDLE field, uint8_t* buffer, int bufferSize, int valueOffset, bool ignoreMovableObjects);
void recGetStaticFieldCurrentClass(CORINFO_FIELD_HANDLE field, bool isSpeculative, CORINFO_CLASS_HANDLE result);
void dmpGetStaticFieldCurrentClass(DWORDLONG key, const Agnostic_GetStaticFieldCurrentClass& value);

View file

@ -1713,11 +1713,11 @@ void* interceptor_ICJI::getFieldAddress(CORINFO_FIELD_HANDLE field, void** ppInd
return temp;
}
bool interceptor_ICJI::getReadonlyStaticFieldValue(CORINFO_FIELD_HANDLE field, uint8_t* buffer, int bufferSize, bool ignoreMovableObjects)
bool interceptor_ICJI::getReadonlyStaticFieldValue(CORINFO_FIELD_HANDLE field, uint8_t* buffer, int bufferSize, int valueOffset, bool ignoreMovableObjects)
{
mc->cr->AddCall("getReadonlyStaticFieldValue");
bool result = original_ICorJitInfo->getReadonlyStaticFieldValue(field, buffer, bufferSize, ignoreMovableObjects);
mc->recGetReadonlyStaticFieldValue(field, buffer, bufferSize, ignoreMovableObjects, result);
bool result = original_ICorJitInfo->getReadonlyStaticFieldValue(field, buffer, bufferSize, valueOffset, ignoreMovableObjects);
mc->recGetReadonlyStaticFieldValue(field, buffer, bufferSize, valueOffset, ignoreMovableObjects, result);
return result;
}

View file

@ -1220,10 +1220,11 @@ bool interceptor_ICJI::getReadonlyStaticFieldValue(
CORINFO_FIELD_HANDLE field,
uint8_t* buffer,
int bufferSize,
int valueOffset,
bool ignoreMovableObjects)
{
mcs->AddCall("getReadonlyStaticFieldValue");
return original_ICorJitInfo->getReadonlyStaticFieldValue(field, buffer, bufferSize, ignoreMovableObjects);
return original_ICorJitInfo->getReadonlyStaticFieldValue(field, buffer, bufferSize, valueOffset, ignoreMovableObjects);
}
CORINFO_CLASS_HANDLE interceptor_ICJI::getStaticFieldCurrentClass(

View file

@ -1069,9 +1069,10 @@ bool interceptor_ICJI::getReadonlyStaticFieldValue(
CORINFO_FIELD_HANDLE field,
uint8_t* buffer,
int bufferSize,
int valueOffset,
bool ignoreMovableObjects)
{
return original_ICorJitInfo->getReadonlyStaticFieldValue(field, buffer, bufferSize, ignoreMovableObjects);
return original_ICorJitInfo->getReadonlyStaticFieldValue(field, buffer, bufferSize, valueOffset, ignoreMovableObjects);
}
CORINFO_CLASS_HANDLE interceptor_ICJI::getStaticFieldCurrentClass(

Some files were not shown because too many files have changed in this diff Show more