diff --git a/src/coreclr/src/jit/compiler.h b/src/coreclr/src/jit/compiler.h index b8b6a1aea2f44..3e9f60f87c73b 100644 --- a/src/coreclr/src/jit/compiler.h +++ b/src/coreclr/src/jit/compiler.h @@ -2770,9 +2770,10 @@ class Compiler void impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo); - bool impCanPInvokeInline(var_types callRetTyp); - bool impCanPInvokeInlineCallSite(var_types callRetTyp); - void impCheckForPInvokeCall(GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags); + bool impCanPInvokeInline(var_types callRetTyp, BasicBlock* block); + bool impCanPInvokeInlineCallSite(var_types callRetTyp, BasicBlock* block); + void impCheckForPInvokeCall( + GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block); GenTreePtr impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset = BAD_IL_OFFSET); void impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig); diff --git a/src/coreclr/src/jit/flowgraph.cpp b/src/coreclr/src/jit/flowgraph.cpp index da71438315fb2..0b991bd290fdb 100644 --- a/src/coreclr/src/jit/flowgraph.cpp +++ b/src/coreclr/src/jit/flowgraph.cpp @@ -21981,6 +21981,9 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) compNeedsGSSecurityCookie |= InlineeCompiler->compNeedsGSSecurityCookie; compGSReorderStackLayout |= InlineeCompiler->compGSReorderStackLayout; + // Update unmanaged call count + info.compCallUnmanaged += InlineeCompiler->info.compCallUnmanaged; + // Update optMethodFlags #ifdef DEBUG diff --git a/src/coreclr/src/jit/importer.cpp b/src/coreclr/src/jit/importer.cpp index 094bfc4e687c0..59bf63386716d 100644 --- a/src/coreclr/src/jit/importer.cpp +++ b/src/coreclr/src/jit/importer.cpp @@ -5367,59 +5367,111 @@ GenTreePtr Compiler::impTransformThis(GenTreePtr thisPtr, } } -bool Compiler::impCanPInvokeInline(var_types callRetTyp) +//------------------------------------------------------------------------ +// impCanPInvokeInline: examine information from a call to see if the call +// qualifies as an inline pinvoke. +// +// Arguments: +// callRetTyp - return type of the call +// block - block contaning the call, or for inlinees, block +// contaiing the call being inlined +// +// Return Value: +// true if this call qualifies as an inline pinvoke, false otherwise +// +// Notes: +// Checks basic legality and then a number of ambient conditions +// where we could pinvoke but choose not to + +bool Compiler::impCanPInvokeInline(var_types callRetTyp, BasicBlock* block) { - return impCanPInvokeInlineCallSite(callRetTyp) && getInlinePInvokeEnabled() && (!opts.compDbgCode) && + return impCanPInvokeInlineCallSite(callRetTyp, block) && getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke ; } -// Returns false only if the callsite really cannot be inlined. Ignores global variables -// like debugger, profiler etc. -bool Compiler::impCanPInvokeInlineCallSite(var_types callRetTyp) +//------------------------------------------------------------------------ +// impCanPInvokeInlineSallSite: basic legality checks using information +// from a call to see if the call qualifies as an inline pinvoke. +// +// Arguments: +// callRetTyp - return type of the call +// block - block contaning the call, or for inlinees, block +// contaiing the call being inlined +// +// Return Value: +// true if this call can legally qualify as an inline pinvoke, false otherwise +// +// Notes: +// Inline PInvoke is not legal in these cases: +// * Within handler regions (finally/catch/filter) +// * Within trees with localloc +// * If the call returns a struct +// +// We have to disable pinvoke inlining inside of filters +// because in case the main execution (i.e. in the try block) is inside +// unmanaged code, we cannot reuse the inlined stub (we still need the +// original state until we are in the catch handler) +// +// We disable pinvoke inlining inside handlers since the GSCookie is +// in the inlined Frame (see CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), +// but this would not protect framelets/return-address of handlers. +// +// On x64, we disable pinvoke inlining inside of try regions. +// Here is the comment from JIT64 explaining why: +// +// [VSWhidbey: 611015] - because the jitted code links in the +// Frame (instead of the stub) we rely on the Frame not being +// 'active' until inside the stub. This normally happens by the +// stub setting the return address pointer in the Frame object +// inside the stub. On a normal return, the return address +// pointer is zeroed out so the Frame can be safely re-used, but +// if an exception occurs, nobody zeros out the return address +// pointer. Thus if we re-used the Frame object, it would go +// 'active' as soon as we link it into the Frame chain. +// +// Technically we only need to disable PInvoke inlining if we're +// in a handler or if we're in a try body with a catch or +// filter/except where other non-handler code in this method +// might run and try to re-use the dirty Frame object. +// +// A desktop test case where this seems to matter is +// jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe + +bool Compiler::impCanPInvokeInlineCallSite(var_types callRetTyp, BasicBlock* block) { - return - // We have to disable pinvoke inlining inside of filters - // because in case the main execution (i.e. in the try block) is inside - // unmanaged code, we cannot reuse the inlined stub (we still need the - // original state until we are in the catch handler) - (!bbInFilterILRange(compCurBB)) && - // We disable pinvoke inlining inside handlers since the GSCookie is - // in the inlined Frame (see CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), - // but this would not protect framelets/return-address of handlers. - !compCurBB->hasHndIndex() && + #ifdef _TARGET_AMD64_ - // Turns out JIT64 doesn't perform PInvoke inlining inside try regions, here's an excerpt of - // the comment from JIT64 explaining why: - // - //// [VSWhidbey: 611015] - because the jitted code links in the Frame (instead - //// of the stub) we rely on the Frame not being 'active' until inside the - //// stub. This normally happens by the stub setting the return address - //// pointer in the Frame object inside the stub. On a normal return, the - //// return address pointer is zeroed out so the Frame can be safely re-used, - //// but if an exception occurs, nobody zeros out the return address pointer. - //// Thus if we re-used the Frame object, it would go 'active' as soon as we - //// link it into the Frame chain. - //// - //// Technically we only need to disable PInvoke inlining if we're in a - //// handler or if we're - //// in a try body with a catch or filter/except where other non-handler code - //// in this method might run and try to re-use the dirty Frame object. - // - // Now, because of this, the VM actually assumes that in 64 bit we never PInvoke - // inline calls on any EH construct, you can verify that on VM\ExceptionHandling.cpp:203 - // The method responsible for resuming execution is UpdateObjectRefInResumeContextCallback - // you can see how it aligns with JIT64 policy of not inlining PInvoke calls almost right - // at the beginning of the body of the method. - !compCurBB->hasTryIndex() && -#endif - (!impLocAllocOnStack()) && (callRetTyp != TYP_STRUCT); + const bool inX64Try = block->hasTryIndex(); +#else + const bool inX64Try = false; +#endif // _TARGET_AMD64_ + + return !inX64Try && !block->hasHndIndex() && !impLocAllocOnStack() && (callRetTyp != TYP_STRUCT); } -void Compiler::impCheckForPInvokeCall(GenTreePtr call, - CORINFO_METHOD_HANDLE methHnd, - CORINFO_SIG_INFO* sig, - unsigned mflags) +//------------------------------------------------------------------------ +// impCheckForPInvokeCall examine call to see if it is a pinvoke and if so +// if it can be expressed as an inline pinvoke. +// +// Arguments: +// call - tree for the call +// methHnd - handle for the method being called (may be null) +// sig - signature of the method being called +// mflags - method flags for the method being called +// block - block contaning the call, or for inlinees, block +// contaiing the call being inlined +// +// Notes: +// Sets GTF_CALL_M_PINVOKE on the call for pinvokes. +// +// Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the +// call passes a combination of legality and profitabilty checks. +// +// If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged + +void Compiler::impCheckForPInvokeCall( + GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block) { var_types callRetTyp = JITtype2varType(sig->retType); CorInfoUnmanagedCallConv unmanagedCallConv; @@ -5466,13 +5518,13 @@ void Compiler::impCheckForPInvokeCall(GenTreePtr call, { #ifdef _TARGET_X86_ // CALLI in IL stubs must be inlined - assert(impCanPInvokeInlineCallSite(callRetTyp)); + assert(impCanPInvokeInlineCallSite(callRetTyp, block)); assert(!info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig)); #endif // _TARGET_X86_ } else { - if (!impCanPInvokeInline(callRetTyp)) + if (!impCanPInvokeInline(callRetTyp, block)) { return; } @@ -5481,6 +5533,14 @@ void Compiler::impCheckForPInvokeCall(GenTreePtr call, { return; } + + // Size-speed tradeoff: don't use inline pinvoke at rarely + // executed call sites. The non-inline version is more + // compact. + if (block->isRunRarely()) + { + return; + } } JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName)); @@ -5488,8 +5548,6 @@ void Compiler::impCheckForPInvokeCall(GenTreePtr call, call->gtFlags |= GTF_CALL_UNMANAGED; info.compCallUnmanaged++; - assert(!compIsForInlining()); - // AMD64 convention is same for native and managed if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C) { @@ -6146,7 +6204,7 @@ bool Compiler::impIsTailCallILPattern(bool tailPrefixed, ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly // one pop seen so far. #else - nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode); + nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode); #endif if (isCallPopAndRet) @@ -6920,9 +6978,15 @@ var_types Compiler::impImportCall(OPCODE opcode, //--------------------------- Inline NDirect ------------------------------ - if (!compIsForInlining()) + // For inline cases we technically should look at both the current + // block and the call site block (or just the latter if we've + // fused the EH trees). However the block-related checks pertain to + // EH and we currently won't inline a method with EH. So for + // inlinees, just checking the call site block is sufficient. { - impCheckForPInvokeCall(call, methHnd, sig, mflags); + // New lexical block here to avoid compilation errors because of GOTOs. + BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB; + impCheckForPInvokeCall(call, methHnd, sig, mflags, block); } if (call->gtFlags & GTF_CALL_UNMANAGED) diff --git a/src/coreclr/src/jit/inlinepolicy.cpp b/src/coreclr/src/jit/inlinepolicy.cpp index d108b987e020d..c7b0e91cc6911 100644 --- a/src/coreclr/src/jit/inlinepolicy.cpp +++ b/src/coreclr/src/jit/inlinepolicy.cpp @@ -449,16 +449,16 @@ void LegacyPolicy::NoteInt(InlineObservation obs, int value) // Now that we know size and forceinline state, // update candidacy. - if (m_CodeSize <= InlineStrategy::ALWAYS_INLINE_SIZE) - { - // Candidate based on small size - SetCandidate(InlineObservation::CALLEE_BELOW_ALWAYS_INLINE_SIZE); - } - else if (m_IsForceInline) + if (m_IsForceInline) { // Candidate based on force inline SetCandidate(InlineObservation::CALLEE_IS_FORCE_INLINE); } + else if (m_CodeSize <= InlineStrategy::ALWAYS_INLINE_SIZE) + { + // Candidate based on small size + SetCandidate(InlineObservation::CALLEE_BELOW_ALWAYS_INLINE_SIZE); + } else if (m_CodeSize <= m_RootCompiler->m_inlineStrategy->GetMaxInlineILSize()) { // Candidate, pending profitability evaluation diff --git a/src/coreclr/src/jit/morph.cpp b/src/coreclr/src/jit/morph.cpp index d4dd7fa974509..95fe017533b0c 100644 --- a/src/coreclr/src/jit/morph.cpp +++ b/src/coreclr/src/jit/morph.cpp @@ -16832,14 +16832,6 @@ void Compiler::fgMorph() fgRemoveEmptyBlocks(); - /* Add any internal blocks/trees we may need */ - - fgAddInternal(); - -#if OPT_BOOL_OPS - fgMultipleNots = false; -#endif - #ifdef DEBUG /* Inliner could add basic blocks. Check that the flowgraph data is up-to-date */ fgDebugCheckBBlist(false, false); @@ -16858,6 +16850,14 @@ void Compiler::fgMorph() EndPhase(PHASE_MORPH_INLINE); + /* Add any internal blocks/trees we may need */ + + fgAddInternal(); + +#if OPT_BOOL_OPS + fgMultipleNots = false; +#endif + #ifdef DEBUG /* Inliner could add basic blocks. Check that the flowgraph data is up-to-date */ fgDebugCheckBBlist(false, false); diff --git a/src/coreclr/src/jit/stackfp.cpp b/src/coreclr/src/jit/stackfp.cpp index 2342b22c36495..43c463039ee16 100644 --- a/src/coreclr/src/jit/stackfp.cpp +++ b/src/coreclr/src/jit/stackfp.cpp @@ -4140,8 +4140,26 @@ void Compiler::raEnregisterVarsPostPassStackFP() { raSetRegLclBirthDeath(tree, lastlife, false); } + + // Model implicit use (& hence last use) of frame list root at pinvokes. + if (tree->gtOper == GT_CALL) + { + GenTreeCall* call = tree->AsCall(); + if (call->IsUnmanaged() && !opts.ShouldUsePInvokeHelpers()) + { + LclVarDsc* frameVarDsc = &lvaTable[info.compLvFrameListRoot]; + + if (frameVarDsc->lvTracked && ((call->gtCallMoreFlags & GTF_CALL_M_FRAME_VAR_DEATH) != 0)) + { + // Frame var dies here + unsigned varIndex = frameVarDsc->lvVarIndex; + VarSetOps::RemoveElemD(this, lastlife, varIndex); + } + } + } } } + assert(VarSetOps::Equal(this, lastlife, block->bbLiveOut)); } compCurBB = NULL; diff --git a/src/coreclr/tests/src/JIT/Directed/pinvoke/pinvoke-examples.cs b/src/coreclr/tests/src/JIT/Directed/pinvoke/pinvoke-examples.cs new file mode 100644 index 0000000000000..26080d8d71737 --- /dev/null +++ b/src/coreclr/tests/src/JIT/Directed/pinvoke/pinvoke-examples.cs @@ -0,0 +1,223 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +// Test cases showing interaction of inlining and inline pinvoke, +// along with the impact of EH. + +using System; +using System.Runtime.CompilerServices; + + +namespace PInvokeTest +{ + internal class Test + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + static int AsForceInline() + { + return Environment.ProcessorCount; + } + + static int AsNormalInline() + { + return Environment.ProcessorCount; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static int AsNoInline() + { + return Environment.ProcessorCount; + } + + static bool FromTryCatch() + { + bool result = false; + try + { + // All pinvokes should be inline, except on x64 + result = (Environment.ProcessorCount == AsNormalInline()); + } + catch (Exception) + { + result = false; + } + return result; + } + + static bool FromTryFinally() + { + bool result = false; + bool result1 = false; + bool result2 = false; + try + { + // All pinvokes should be inline, except on x64 + result1 = (Environment.ProcessorCount == AsNormalInline()); + result2 = (Environment.ProcessorCount == AsNormalInline()); + } + finally + { + result = result1 && result2; + } + return result; + } + + static bool FromTryFinally2() + { + bool result = false; + bool result1 = false; + bool result2 = false; + + try + { + // These two pinvokes should be inline, except on x64 + result1 = (Environment.ProcessorCount == AsNormalInline()); + } + finally + { + // These two pinvokes should *not* be inline (finally) + result2 = (Environment.ProcessorCount == AsNormalInline()); + result = result1 && result2; + } + + return result; + } + + static bool FromTryFinally3() + { + bool result = false; + bool result1 = false; + bool result2 = false; + + try + { + // These two pinvokes should be inline, except on x64 + result1 = (Environment.ProcessorCount == AsNormalInline()); + } + finally + { + try + { + // These two pinvokes should *not* be inline (finally) + result2 = (Environment.ProcessorCount == AsNormalInline()); + } + catch (Exception) + { + result2 = false; + } + + result = result1 && result2; + } + + return result; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool FromInline() + { + // These two pinvokes should be inline + bool result = (Environment.ProcessorCount == AsForceInline()); + return result; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool FromInline2() + { + // These four pinvokes should be inline + bool result1 = (Environment.ProcessorCount == AsNormalInline()); + bool result2 = (Environment.ProcessorCount == AsForceInline()); + return result1 && result2; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool FromNoInline() + { + // The only pinvoke should be inline + bool result = (Environment.ProcessorCount == AsNoInline()); + return result; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool FromNoInline2() + { + // Three pinvokes should be inline + bool result1 = (Environment.ProcessorCount == AsNormalInline()); + bool result2 = (Environment.ProcessorCount == AsNoInline()); + return result1 && result2; + } + + static bool FromFilter() + { + bool result = false; + + try + { + throw new Exception("expected"); + } + // These two pinvokes should *not* be inline (filter) + // + // For the first call the jit won't inline the wrapper, so + // it just calls get_ProcessorCount. + // + // For the second call, the force inline works, and the + // subsequent inline of get_ProcessorCount exposes a call + // to the pinvoke GetProcessorCount. This pinvoke will + // not be inline. + catch (Exception) when (Environment.ProcessorCount == AsForceInline()) + { + result = true; + } + + return result; + } + + static bool FromColdCode() + { + int pc = 0; + bool result1 = false; + bool result2 = false; + + try + { + // This pinvoke should not be inline (cold) + pc = Environment.ProcessorCount; + throw new Exception("expected"); + } + catch (Exception) + { + // These two pinvokes should not be inline (catch) + // + // For the first call the jit won't inline the + // wrapper, so it just calls get_ProcessorCount. + // + // For the second call, the force inline works, and + // the subsequent inline of get_ProcessorCount exposes + // a call to the pinvoke GetProcessorCount. This + // pinvoke will not be inline. + result1 = (pc == Environment.ProcessorCount); + result2 = (pc == AsForceInline()); + } + + return result1 && result2; + } + + private static int Main() + { + bool result = true; + + result &= FromTryCatch(); + result &= FromTryFinally(); + result &= FromTryFinally2(); + result &= FromTryFinally3(); + result &= FromInline(); + result &= FromInline2(); + result &= FromNoInline(); + result &= FromNoInline2(); + result &= FromFilter(); + result &= FromColdCode(); + + return (result ? 100 : -1); + } + } +} diff --git a/src/coreclr/tests/src/JIT/Directed/pinvoke/pinvoke-examples.csproj b/src/coreclr/tests/src/JIT/Directed/pinvoke/pinvoke-examples.csproj new file mode 100644 index 0000000000000..78cf4471fe7e0 --- /dev/null +++ b/src/coreclr/tests/src/JIT/Directed/pinvoke/pinvoke-examples.csproj @@ -0,0 +1,44 @@ + + + + + Debug + AnyCPU + $(MSBuildProjectName) + 2.0 + {95DFC527-4DC1-495E-97D7-E94EE1F7140D} + Exe + Properties + 512 + {786C830F-07A1-408B-BD7F-6EE04809D6DB};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC} + $(ProgramFiles)\Common Files\microsoft shared\VSTT\11.0\UITestExtensionPackages + ..\..\ + 7a9bfb7d + + + + + + + + + False + + + + PdbOnly + + + + + + + + + $(JitPackagesConfigFileDirectory)minimal\project.json + $(JitPackagesConfigFileDirectory)minimal\project.lock.json + + + + + \ No newline at end of file diff --git a/src/coreclr/tests/src/jit/Directed/pinvoke/pinvoke-bug.cs b/src/coreclr/tests/src/jit/Directed/pinvoke/pinvoke-bug.cs new file mode 100644 index 0000000000000..2d4b5f6aeac0d --- /dev/null +++ b/src/coreclr/tests/src/jit/Directed/pinvoke/pinvoke-bug.cs @@ -0,0 +1,60 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. +// See the LICENSE file in the project root for more information. + +using System; +using System.Runtime.InteropServices; + +// Test includes an intentional unreachable return +#pragma warning disable 162 + +namespace PInvokeTest +{ + internal class Test + { + [DllImport("msvcrt", EntryPoint = "sin")] + private static extern double sin(double x); + + private static double g; + private static bool b; + + public static int Main(string[] args) + { + bool result = false; + g = 0.0; + double val = 1.0; + b = false; + try + { + Func(val); + } + catch(Exception) + { + result = (Math.Abs(g - sin(val)) < 0.0001); + } + + return (result ? 100 : -1); + } + + // An inline pinvoke in a method with float math followed by a + // throw may causes trouble for liveness models for the inline + // frame var. + static double Func(double x) + { + g = sin(x); + + // A bit of control flow to throw off rareness detection + // Also we need float in here + if (b) + { + g = 0.0; + } + + throw new Exception(); + + // Deliberately unreachable return + return g; + } + } +} + diff --git a/src/coreclr/tests/src/jit/Directed/pinvoke/pinvoke-bug.csproj b/src/coreclr/tests/src/jit/Directed/pinvoke/pinvoke-bug.csproj new file mode 100644 index 0000000000000..2f8a24638bf4e --- /dev/null +++ b/src/coreclr/tests/src/jit/Directed/pinvoke/pinvoke-bug.csproj @@ -0,0 +1,44 @@ + + + + + Debug + AnyCPU + $(MSBuildProjectName) + 2.0 + {95DFC527-4DC1-495E-97D7-E94EE1F7140D} + Exe + Properties + 512 + {786C830F-07A1-408B-BD7F-6EE04809D6DB};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC} + $(ProgramFiles)\Common Files\microsoft shared\VSTT\11.0\UITestExtensionPackages + ..\..\ + 7a9bfb7d + + + + + + + + + False + + + + PdbOnly + + + + + + + + + $(JitPackagesConfigFileDirectory)minimal\project.json + $(JitPackagesConfigFileDirectory)minimal\project.lock.json + + + + + \ No newline at end of file diff --git a/src/coreclr/tests/testsUnsupportedOutsideWindows.txt b/src/coreclr/tests/testsUnsupportedOutsideWindows.txt index 4f68d7136ff19..0f8f194b41886 100644 --- a/src/coreclr/tests/testsUnsupportedOutsideWindows.txt +++ b/src/coreclr/tests/testsUnsupportedOutsideWindows.txt @@ -147,6 +147,7 @@ JIT/Directed/intrinsic/interlocked/rva_rvastatic3/rva_rvastatic3.sh JIT/Directed/intrinsic/interlocked/rva_rvastatic4/rva_rvastatic4.sh JIT/Directed/pinvoke/calli_excep/calli_excep.sh JIT/Directed/pinvoke/jump/jump.sh +JIT/Directed/pinvoke/pinvoke-bug/pinvoke-bug.sh JIT/Directed/pinvoke/sin/sin.sh JIT/Directed/pinvoke/sysinfo_cs/sysinfo_cs.sh JIT/Directed/pinvoke/sysinfo_il/sysinfo_il.sh