Permalink
Fetching contributors…
Cannot retrieve contributors at this time
7446 lines (6441 sloc) 282 KB
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//
// File: eventtrace.cpp
// Abstract: This module implements Event Tracing support
//
//
//
// ============================================================================
#include "common.h"
#ifdef FEATURE_REDHAWK
#include "commontypes.h"
#include "daccess.h"
#include "debugmacrosext.h"
#include "palredhawkcommon.h"
#include "gcrhenv.h"
#define Win32EventWrite PalEtwEventWrite
#define InterlockedExchange64 PalInterlockedExchange64
#else // !FEATURE_REDHAWK
#include "eventtrace.h"
#include "winbase.h"
#include "contract.h"
#include "ex.h"
#include "dbginterface.h"
#include "finalizerthread.h"
#define Win32EventWrite EventWrite
#ifdef FEATURE_COMINTEROP
#include "comcallablewrapper.h"
#include "runtimecallablewrapper.h"
#endif
// Flags used to store some runtime information for Event Tracing
BOOL g_fEEOtherStartup=FALSE;
BOOL g_fEEComActivatedStartup=FALSE;
GUID g_EEComObjectGuid=GUID_NULL;
BOOL g_fEEHostedStartup = FALSE;
#endif // FEATURE_REDHAWK
#include "eventtracepriv.h"
#ifdef FEATURE_REDHAWK
volatile LONGLONG ETW::GCLog::s_l64LastClientSequenceNumber = 0;
#else // FEATURE_REDHAWK
Volatile<LONGLONG> ETW::GCLog::s_l64LastClientSequenceNumber = 0;
#endif // FEATURE_REDHAWK
#ifndef FEATURE_REDHAWK
//---------------------------------------------------------------------------------------
// Helper macros to determine which version of the Method events to use
//
// The V2 versions of these events include the ReJITID, the V1 versions do not.
// Historically, when we version events, we'd just stop sending the old version and only
// send the new one. However, now that we have xperf in heavy use internally and soon to be
// used externally, we need to be a bit careful. In particular, we'd like to allow
// current xperf to continue working without knowledge of ReJITIDs, and allow future
// xperf to decode symbols in ReJITted functions. Thus,
// * During a first-JIT, only issue the existing V1 MethodLoad, etc. events (NOT v0,
// NOT v2). This event does not include a ReJITID, and can thus continue to be
// parsed by older decoders.
// * During a rejit, only issue the new V2 events (NOT v0 or v1), which will include a
// nonzero ReJITID. Thus, your unique key for a method extent would be MethodID +
// ReJITID + extent (hot/cold). These events will be ignored by older decoders
// (including current xperf) because of the version number, but xperf will be
// updated to decode these in the future.
#define FireEtwMethodLoadVerbose_V1_or_V2(ullMethodIdentifier, ullModuleID, ullMethodStartAddress, ulMethodSize, ulMethodToken, ulMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID) \
{ \
if (rejitID == 0) \
{ FireEtwMethodLoadVerbose_V1(ullMethodIdentifier, ullModuleID, ullMethodStartAddress, ulMethodSize, ulMethodToken, ulMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID); } \
else \
{ FireEtwMethodLoadVerbose_V2(ullMethodIdentifier, ullModuleID, ullMethodStartAddress, ulMethodSize, ulMethodToken, ulMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID); } \
}
#define FireEtwMethodLoad_V1_or_V2(ullMethodIdentifier, ullModuleID, ullMethodStartAddress, ulMethodSize, ulMethodToken, ulMethodFlags, clrInstanceID, rejitID) \
{ \
if (rejitID == 0) \
{ FireEtwMethodLoad_V1(ullMethodIdentifier, ullModuleID, ullMethodStartAddress, ulMethodSize, ulMethodToken, ulMethodFlags, clrInstanceID); } \
else \
{ FireEtwMethodLoad_V2(ullMethodIdentifier, ullModuleID, ullMethodStartAddress, ulMethodSize, ulMethodToken, ulMethodFlags, clrInstanceID, rejitID); } \
}
#define FireEtwMethodUnloadVerbose_V1_or_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID) \
{ \
if (rejitID == 0) \
{ FireEtwMethodUnloadVerbose_V1(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID); } \
else \
{ FireEtwMethodUnloadVerbose_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID); } \
}
#define FireEtwMethodUnload_V1_or_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID, rejitID) \
{ \
if (rejitID == 0) \
{ FireEtwMethodUnload_V1(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID); } \
else \
{ FireEtwMethodUnload_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID, rejitID); } \
}
#define FireEtwMethodDCStartVerbose_V1_or_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID) \
{ \
if (rejitID == 0) \
{ FireEtwMethodDCStartVerbose_V1(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID); } \
else \
{ FireEtwMethodDCStartVerbose_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID); } \
}
#define FireEtwMethodDCStart_V1_or_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID, rejitID) \
{ \
if (rejitID == 0) \
{ FireEtwMethodDCStart_V1(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID); } \
else \
{ FireEtwMethodDCStart_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID, rejitID); } \
}
#define FireEtwMethodDCEndVerbose_V1_or_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID) \
{ \
if (rejitID == 0) \
{ FireEtwMethodDCEndVerbose_V1(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID); } \
else \
{ FireEtwMethodDCEndVerbose_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, szDtraceOutput1, szDtraceOutput2, szDtraceOutput3, clrInstanceID, rejitID); } \
}
#define FireEtwMethodDCEnd_V1_or_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID, rejitID) \
{ \
if (rejitID == 0) \
{ FireEtwMethodDCEnd_V1(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID); } \
else \
{ FireEtwMethodDCEnd_V2(ullMethodIdentifier, ullModuleID, ullColdMethodStartAddress, ulColdMethodSize, ulMethodToken, ulColdMethodFlags, clrInstanceID, rejitID); } \
}
// Module load / unload events:
#define FireEtwModuleLoad_V1_or_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath) \
FireEtwModuleLoad_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath)
#define FireEtwModuleUnload_V1_or_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath) \
FireEtwModuleUnload_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath)
#define FireEtwModuleDCStart_V1_or_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath) \
FireEtwModuleDCStart_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath)
#define FireEtwModuleDCEnd_V1_or_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath) \
FireEtwModuleDCEnd_V2(ullModuleId, ullAssemblyId, ulFlags, ulReservedFlags, szDtraceOutput1, szDtraceOutput2, clrInstanceId, ManagedPdbSignature, ManagedPdbAge, ManagedPdbPath, NativePdbSignature, NativePdbAge, NativePdbPath)
//---------------------------------------------------------------------------------------
//
// Rather than checking the NGEN keyword on the runtime provider directly, use this
// helper that checks that the NGEN runtime provider keyword is enabled AND the
// OverrideAndSuppressNGenEvents keyword on the runtime provider is NOT enabled.
//
// OverrideAndSuppressNGenEvents allows controllers to set the expensive NGEN keyword for
// older runtimes (< 4.0) where NGEN PDB info is NOT available, while suppressing those
// expensive events on newer runtimes (>= 4.5) where NGEN PDB info IS available. Note
// that 4.0 has NGEN PDBS but unfortunately not the OverrideAndSuppressNGenEvents
// keyword, b/c NGEN PDBs were made publicly only after 4.0 shipped. So tools that need
// to consume both <4.0 and 4.0 events would neeed to enable the expensive NGEN events to
// deal properly with 3.5, even though those events aren't necessary on 4.0.
//
// On CoreCLR, this keyword is a no-op, because coregen PDBs don't exist (and thus we'll
// need the NGEN rundown to still work on Silverligth).
//
// Return Value:
// nonzero iff NGenKeyword is enabled on the runtime provider and
// OverrideAndSuppressNGenEventsKeyword is not enabled on the runtime provider.
//
BOOL IsRuntimeNgenKeywordEnabledAndNotSuppressed()
{
LIMITED_METHOD_CONTRACT;
return
(
ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_NGEN_KEYWORD)
&& ! ( ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_OVERRIDEANDSUPPRESSNGENEVENTS_KEYWORD) )
);
}
// Same as above, but for the rundown provider
BOOL IsRundownNgenKeywordEnabledAndNotSuppressed()
{
LIMITED_METHOD_CONTRACT;
return
#ifdef FEATURE_PERFTRACING
EventPipeHelper::Enabled() ||
#endif // FEATURE_PERFTRACING
(
ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_RUNDOWNNGEN_KEYWORD)
&& ! ( ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_RUNDOWNOVERRIDEANDSUPPRESSNGENEVENTS_KEYWORD) )
);
}
/*******************************************************/
/* Fast assembly function to get the topmost EBP frame */
/*******************************************************/
#if defined(_TARGET_X86_)
extern "C"
{
CallStackFrame* GetEbp()
{
CallStackFrame *frame=NULL;
__asm
{
mov frame, ebp
}
return frame;
}
}
#endif //_TARGET_X86_
/*************************************/
/* Function to append a frame to an existing stack */
/*************************************/
#if !defined(FEATURE_PAL)
void ETW::SamplingLog::Append(SIZE_T currentFrame)
{
LIMITED_METHOD_CONTRACT;
if(m_FrameCount < (ETW::SamplingLog::s_MaxStackSize-1) &&
currentFrame != 0)
{
m_EBPStack[m_FrameCount] = currentFrame;
m_FrameCount++;
}
};
/********************************************************/
/* Function to get the callstack on the current thread */
/********************************************************/
ETW::SamplingLog::EtwStackWalkStatus ETW::SamplingLog::GetCurrentThreadsCallStack(UINT32 *frameCount, PVOID **Stack)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SO_TOLERANT;
}
CONTRACTL_END;
// The stack walk performed below can cause allocations (thus entering the host). But
// this is acceptable, since we're not supporting the use of SQL/F1 profiling and
// full-blown ETW CLR stacks (which would be redundant).
PERMANENT_CONTRACT_VIOLATION(HostViolation, ReasonUnsupportedForSQLF1Profiling);
m_FrameCount = 0;
ETW::SamplingLog::EtwStackWalkStatus stackwalkStatus = SaveCurrentStack();
_ASSERTE(m_FrameCount < ETW::SamplingLog::s_MaxStackSize);
// this not really needed, but let's do it
// because we use the framecount while dumping the stack event
for(int i=m_FrameCount; i<ETW::SamplingLog::s_MaxStackSize; i++)
{
m_EBPStack[i] = 0;
}
// This is for consumers to work correctly because the number of
// frames in the manifest file is specified to be 2
if(m_FrameCount < 2)
m_FrameCount = 2;
*frameCount = m_FrameCount;
*Stack = (PVOID *)m_EBPStack;
return stackwalkStatus;
};
/*************************************/
/* Function to save the stack on the current thread */
/*************************************/
ETW::SamplingLog::EtwStackWalkStatus ETW::SamplingLog::SaveCurrentStack(int skipTopNFrames)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
SO_TOLERANT;
}
CONTRACTL_END;
if (!IsGarbageCollectorFullyInitialized())
{
// If the GC isn't ready yet, then there won't be any interesting
// managed code on the stack to walk. Plus, the stack walk itself may
// hit problems (e.g., when calling into the code manager) if it's run
// too early during startup.
return ETW::SamplingLog::UnInitialized;
}
#ifndef DACCESS_COMPILE
#ifdef _TARGET_AMD64_
if (RtlVirtualUnwind_Unsafe == NULL)
{
// We haven't even set up the RtlVirtualUnwind function pointer yet,
// so it's too early to try stack walking.
return ETW::SamplingLog::UnInitialized;
}
#endif // _TARGET_AMD64_
Thread *pThread = GetThread();
if (pThread == NULL)
{
return ETW::SamplingLog::UnInitialized;
}
// The thread should not have a hijack set up or we can't walk the stack.
if (pThread->m_State & Thread::TS_Hijacked) {
return ETW::SamplingLog::UnInitialized;
}
if (pThread->IsEtwStackWalkInProgress())
{
return ETW::SamplingLog::InProgress;
}
pThread->MarkEtwStackWalkInProgress();
EX_TRY
{
#ifdef _TARGET_X86_
CallStackFrame *currentEBP = GetEbp();
CallStackFrame *lastEBP = NULL;
// The EBP stack walk below is meant to be extremely fast. It does not attempt to protect
// against cases of stack corruption. *BUT* it does need to validate a "sane" EBP chain.
// Ensure the EBP in the starting frame is "reasonable" (i.e. above the address of a local)
if ((SIZE_T) currentEBP > (SIZE_T)&currentEBP)
{
while(currentEBP)
{
lastEBP = currentEBP;
currentEBP = currentEBP->m_Next;
// Check for stack upper limit; we don't check the lower limit on each iteration
// (we did it at the top) and each subsequent value in the loop is larger than
// the previous (see the check "currentEBP < lastEBP" below)
if((SIZE_T)currentEBP > (SIZE_T)Thread::GetStackUpperBound())
{
break;
}
// If we have a too small address, we are probably bad
if((SIZE_T)currentEBP < (SIZE_T)0x10000)
break;
if((SIZE_T)currentEBP < (SIZE_T)lastEBP)
{
break;
}
// Skip the top N frames
if(skipTopNFrames) {
skipTopNFrames--;
continue;
}
// Save the Return Address for symbol decoding
Append(lastEBP->m_ReturnAddress);
}
}
#else
CONTEXT ctx;
ClrCaptureContext(&ctx);
UINT_PTR ControlPc = 0;
UINT_PTR CurrentSP = 0, PrevSP = 0;
while(1)
{
// Unwind to the caller
ControlPc = Thread::VirtualUnwindCallFrame(&ctx);
// This is to take care of recursion
CurrentSP = (UINT_PTR)GetSP(&ctx);
// when to break from this loop
if ( ControlPc == 0 || ( PrevSP == CurrentSP ) )
{
break;
}
// Skip the top N frames
if ( skipTopNFrames ) {
skipTopNFrames--;
continue;
}
// Add the stack frame to the list
Append(ControlPc);
PrevSP = CurrentSP;
}
#endif //_TARGET_X86_
} EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
pThread->MarkEtwStackWalkCompleted();
#endif //!DACCESS_COMPILE
return ETW::SamplingLog::Completed;
}
#endif // !defined(FEATURE_PAL)
#endif // !FEATURE_REDHAWK
/****************************************************************************/
/* Methods that are called from the runtime */
/****************************************************************************/
/****************************************************************************/
/* Methods for rundown events */
/****************************************************************************/
/***************************************************************************/
/* This function should be called from the event tracing callback routine
when the private CLR provider is enabled */
/***************************************************************************/
#ifndef FEATURE_REDHAWK
VOID ETW::GCLog::GCSettingsEvent()
{
if (GCHeapUtilities::IsGCHeapInitialized())
{
if (ETW_TRACING_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context,
GCSettings))
{
ETW::GCLog::ETW_GC_INFO Info;
Info.GCSettings.ServerGC = GCHeapUtilities::IsServerHeap ();
Info.GCSettings.SegmentSize = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize (false);
Info.GCSettings.LargeObjectSegmentSize = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize (true);
FireEtwGCSettings_V1(Info.GCSettings.SegmentSize, Info.GCSettings.LargeObjectSegmentSize, Info.GCSettings.ServerGC, GetClrInstanceId());
}
GCHeapUtilities::GetGCHeap()->DiagTraceGCSegments();
}
};
#endif // !FEATURE_REDHAWK
//---------------------------------------------------------------------------------------
// Code for sending GC heap object events is generally the same for both FEATURE_REDHAWK
// and !FEATURE_REDHAWK builds
//---------------------------------------------------------------------------------------
bool s_forcedGCInProgress = false;
class ForcedGCHolder
{
public:
ForcedGCHolder() { LIMITED_METHOD_CONTRACT; s_forcedGCInProgress = true; }
~ForcedGCHolder() { LIMITED_METHOD_CONTRACT; s_forcedGCInProgress = false; }
};
BOOL ETW::GCLog::ShouldWalkStaticsAndCOMForEtw()
{
LIMITED_METHOD_CONTRACT;
return s_forcedGCInProgress &&
ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_GCHEAPDUMP_KEYWORD);
}
// Simple helpers called by the GC to decide whether it needs to do a walk of heap
// objects and / or roots.
BOOL ETW::GCLog::ShouldWalkHeapObjectsForEtw()
{
LIMITED_METHOD_CONTRACT;
return s_forcedGCInProgress &&
ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_GCHEAPDUMP_KEYWORD);
}
BOOL ETW::GCLog::ShouldWalkHeapRootsForEtw()
{
LIMITED_METHOD_CONTRACT;
return s_forcedGCInProgress &&
ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_GCHEAPDUMP_KEYWORD);
}
BOOL ETW::GCLog::ShouldTrackMovementForEtw()
{
LIMITED_METHOD_CONTRACT;
return ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_GCHEAPSURVIVALANDMOVEMENT_KEYWORD);
}
// Batches the list of moved/surviving references for the GCBulkMovedObjectRanges /
// GCBulkSurvivingObjectRanges events
struct EtwGcMovementContext
{
public:
// An instance of EtwGcMovementContext is dynamically allocated and stored
// inside of MovedReferenceContextForEtwAndProfapi, which in turn is dynamically
// allocated and pointed to by a profiling_context pointer created by the GC on the stack.
// This is used to batch and send GCBulkSurvivingObjectRanges events and
// GCBulkMovedObjectRanges events. This method is passed a pointer to
// MovedReferenceContextForEtwAndProfapi::pctxEtw; if non-NULL it gets returned;
// else, a new EtwGcMovementContext is allocated, stored in that pointer, and
// then returned. Callers should test for NULL, which can be returned if out of
// memory
static EtwGcMovementContext * GetOrCreateInGCContext(EtwGcMovementContext ** ppContext)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(ppContext != NULL);
EtwGcMovementContext * pContext = *ppContext;
if (pContext == NULL)
{
pContext = new (nothrow) EtwGcMovementContext;
*ppContext = pContext;
}
return pContext;
}
EtwGcMovementContext() :
iCurBulkSurvivingObjectRanges(0),
iCurBulkMovedObjectRanges(0)
{
LIMITED_METHOD_CONTRACT;
Clear();
}
// Resets structure for reuse on construction, and after each flush.
// (Intentionally leave iCurBulk* as is, since they persist across flushes within a GC.)
void Clear()
{
LIMITED_METHOD_CONTRACT;
cBulkSurvivingObjectRanges = 0;
cBulkMovedObjectRanges = 0;
ZeroMemory(rgGCBulkSurvivingObjectRanges, sizeof(rgGCBulkSurvivingObjectRanges));
ZeroMemory(rgGCBulkMovedObjectRanges, sizeof(rgGCBulkMovedObjectRanges));
}
//---------------------------------------------------------------------------------------
// GCBulkSurvivingObjectRanges
//---------------------------------------------------------------------------------------
// Sequence number for each GCBulkSurvivingObjectRanges event
UINT iCurBulkSurvivingObjectRanges;
// Number of surviving object ranges currently filled out in rgGCBulkSurvivingObjectRanges array
UINT cBulkSurvivingObjectRanges;
// Struct array containing the primary data for each GCBulkSurvivingObjectRanges
// event. Fix the size so the total event stays well below the 64K limit (leaving
// lots of room for non-struct fields that come before the values data)
EventStructGCBulkSurvivingObjectRangesValue rgGCBulkSurvivingObjectRanges[
(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkSurvivingObjectRangesValue)];
//---------------------------------------------------------------------------------------
// GCBulkMovedObjectRanges
//---------------------------------------------------------------------------------------
// Sequence number for each GCBulkMovedObjectRanges event
UINT iCurBulkMovedObjectRanges;
// Number of Moved object ranges currently filled out in rgGCBulkMovedObjectRanges array
UINT cBulkMovedObjectRanges;
// Struct array containing the primary data for each GCBulkMovedObjectRanges
// event. Fix the size so the total event stays well below the 64K limit (leaving
// lots of room for non-struct fields that come before the values data)
EventStructGCBulkMovedObjectRangesValue rgGCBulkMovedObjectRanges[
(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkMovedObjectRangesValue)];
};
// Contains above struct for ETW, plus extra info (opaque to us) used by the profiling
// API to track its own information.
struct MovedReferenceContextForEtwAndProfapi
{
// An instance of MovedReferenceContextForEtwAndProfapi is dynamically allocated and
// pointed to by a profiling_context pointer created by the GC on the stack. This is used to
// batch and send GCBulkSurvivingObjectRanges events and GCBulkMovedObjectRanges
// events and the corresponding callbacks for profapi profilers. This method is
// passed a pointer to a MovedReferenceContextForEtwAndProfapi; if non-NULL it gets
// returned; else, a new MovedReferenceContextForEtwAndProfapi is allocated, stored
// in that pointer, and then returned. Callers should test for NULL, which can be
// returned if out of memory
static MovedReferenceContextForEtwAndProfapi * CreateInGCContext(LPVOID pvContext)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(pvContext != NULL);
MovedReferenceContextForEtwAndProfapi * pContext = *(MovedReferenceContextForEtwAndProfapi **) pvContext;
// Shouldn't be called if the context was already created. Perhaps someone made
// one too many BeginMovedReferences calls, or didn't have an EndMovedReferences
// in between?
_ASSERTE(pContext == NULL);
pContext = new (nothrow) MovedReferenceContextForEtwAndProfapi;
*(MovedReferenceContextForEtwAndProfapi **) pvContext = pContext;
return pContext;
}
MovedReferenceContextForEtwAndProfapi() :
pctxProfAPI(NULL),
pctxEtw(NULL)
{
LIMITED_METHOD_CONTRACT;
}
LPVOID pctxProfAPI;
EtwGcMovementContext * pctxEtw;
};
//---------------------------------------------------------------------------------------
//
// Called by the GC for each moved or surviving reference that it encounters. This
// batches the info into our context's buffer, and flushes that buffer to ETW as it fills
// up.
//
// Arguments:
// * pbMemBlockStart - Start of moved/surviving block
// * pbMemBlockEnd - Next pointer after end of moved/surviving block
// * cbRelocDistance - How far did the block move? (0 for non-compacted / surviving
// references; negative if moved to earlier addresses)
// * profilingContext - Where our context is stored
// * fCompacting - Is this a compacting GC? Used to decide whether to send the moved
// or surviving event
//
// static
void ETW::GCLog::MovedReference(
BYTE * pbMemBlockStart,
BYTE * pbMemBlockEnd,
ptrdiff_t cbRelocDistance,
size_t profilingContext,
BOOL fCompacting,
BOOL fAllowProfApiNotification /* = TRUE */)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK; // EEToProfInterfaceImpl::AllocateMovedReferencesData takes lock
}
CONTRACTL_END;
MovedReferenceContextForEtwAndProfapi * pCtxForEtwAndProfapi =
(MovedReferenceContextForEtwAndProfapi *) profilingContext;
if (pCtxForEtwAndProfapi == NULL)
{
_ASSERTE(!"MovedReference() encountered a NULL profilingContext");
return;
}
#ifdef PROFILING_SUPPORTED
// ProfAPI
if (fAllowProfApiNotification)
{
BEGIN_PIN_PROFILER(CORProfilerTrackGC());
g_profControlBlock.pProfInterface->MovedReference(pbMemBlockStart,
pbMemBlockEnd,
cbRelocDistance,
&(pCtxForEtwAndProfapi->pctxProfAPI),
fCompacting);
END_PIN_PROFILER();
}
#endif // PROFILING_SUPPORTED
// ETW
if (!ShouldTrackMovementForEtw())
return;
EtwGcMovementContext * pContext =
EtwGcMovementContext::GetOrCreateInGCContext(&pCtxForEtwAndProfapi->pctxEtw);
if (pContext == NULL)
return;
if (fCompacting)
{
// Moved references
_ASSERTE(pContext->cBulkMovedObjectRanges < _countof(pContext->rgGCBulkMovedObjectRanges));
EventStructGCBulkMovedObjectRangesValue * pValue =
&pContext->rgGCBulkMovedObjectRanges[pContext->cBulkMovedObjectRanges];
pValue->OldRangeBase = pbMemBlockStart;
pValue->NewRangeBase = pbMemBlockStart + cbRelocDistance;
pValue->RangeLength = pbMemBlockEnd - pbMemBlockStart;
pContext->cBulkMovedObjectRanges++;
// If buffer is now full, empty it into ETW
if (pContext->cBulkMovedObjectRanges == _countof(pContext->rgGCBulkMovedObjectRanges))
{
FireEtwGCBulkMovedObjectRanges(
pContext->iCurBulkMovedObjectRanges,
pContext->cBulkMovedObjectRanges,
GetClrInstanceId(),
sizeof(pContext->rgGCBulkMovedObjectRanges[0]),
&pContext->rgGCBulkMovedObjectRanges[0]);
pContext->iCurBulkMovedObjectRanges++;
pContext->Clear();
}
}
else
{
// Surviving references
_ASSERTE(pContext->cBulkSurvivingObjectRanges < _countof(pContext->rgGCBulkSurvivingObjectRanges));
EventStructGCBulkSurvivingObjectRangesValue * pValue =
&pContext->rgGCBulkSurvivingObjectRanges[pContext->cBulkSurvivingObjectRanges];
pValue->RangeBase = pbMemBlockStart;
pValue->RangeLength = pbMemBlockEnd - pbMemBlockStart;
pContext->cBulkSurvivingObjectRanges++;
// If buffer is now full, empty it into ETW
if (pContext->cBulkSurvivingObjectRanges == _countof(pContext->rgGCBulkSurvivingObjectRanges))
{
FireEtwGCBulkSurvivingObjectRanges(
pContext->iCurBulkSurvivingObjectRanges,
pContext->cBulkSurvivingObjectRanges,
GetClrInstanceId(),
sizeof(pContext->rgGCBulkSurvivingObjectRanges[0]),
&pContext->rgGCBulkSurvivingObjectRanges[0]);
pContext->iCurBulkSurvivingObjectRanges++;
pContext->Clear();
}
}
}
//---------------------------------------------------------------------------------------
//
// Called by the GC just before it begins enumerating plugs. Gives us a chance to
// allocate our context structure, to allow us to batch plugs before firing events
// for them
//
// Arguments:
// * pProfilingContext - Points to location on stack (in GC function) where we can
// store a pointer to the context we allocate
//
// static
VOID ETW::GCLog::BeginMovedReferences(size_t * pProfilingContext)
{
LIMITED_METHOD_CONTRACT;
MovedReferenceContextForEtwAndProfapi::CreateInGCContext(LPVOID(pProfilingContext));
}
//---------------------------------------------------------------------------------------
//
// Called by the GC at the end of a heap walk to give us a place to flush any remaining
// buffers of data to ETW or the profapi profiler
//
// Arguments:
// profilingContext - Our context we built up during the heap walk
//
// static
VOID ETW::GCLog::EndMovedReferences(size_t profilingContext, BOOL fAllowProfApiNotification /* = TRUE */)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
MovedReferenceContextForEtwAndProfapi * pCtxForEtwAndProfapi = (MovedReferenceContextForEtwAndProfapi *) profilingContext;
if (pCtxForEtwAndProfapi == NULL)
{
_ASSERTE(!"EndMovedReferences() encountered a NULL profilingContext");
return;
}
#ifdef PROFILING_SUPPORTED
// ProfAPI
if (fAllowProfApiNotification)
{
BEGIN_PIN_PROFILER(CORProfilerTrackGC());
g_profControlBlock.pProfInterface->EndMovedReferences(&(pCtxForEtwAndProfapi->pctxProfAPI));
END_PIN_PROFILER();
}
#endif //PROFILING_SUPPORTED
// ETW
if (!ShouldTrackMovementForEtw())
return;
// If context isn't already set up for us, then we haven't been collecting any data
// for ETW events.
EtwGcMovementContext * pContext = pCtxForEtwAndProfapi->pctxEtw;
if (pContext == NULL)
return;
// Flush any remaining moved or surviving range data
if (pContext->cBulkMovedObjectRanges > 0)
{
FireEtwGCBulkMovedObjectRanges(
pContext->iCurBulkMovedObjectRanges,
pContext->cBulkMovedObjectRanges,
GetClrInstanceId(),
sizeof(pContext->rgGCBulkMovedObjectRanges[0]),
&pContext->rgGCBulkMovedObjectRanges[0]);
}
if (pContext->cBulkSurvivingObjectRanges > 0)
{
FireEtwGCBulkSurvivingObjectRanges(
pContext->iCurBulkSurvivingObjectRanges,
pContext->cBulkSurvivingObjectRanges,
GetClrInstanceId(),
sizeof(pContext->rgGCBulkSurvivingObjectRanges[0]),
&pContext->rgGCBulkSurvivingObjectRanges[0]);
}
pCtxForEtwAndProfapi->pctxEtw = NULL;
delete pContext;
}
/***************************************************************************/
/* This implements the public runtime provider's GCHeapCollectKeyword. It
performs a full, gen-2, blocking GC. */
/***************************************************************************/
VOID ETW::GCLog::ForceGC(LONGLONG l64ClientSequenceNumber)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
#ifndef FEATURE_REDHAWK
if (!IsGarbageCollectorFullyInitialized())
return;
#endif // FEATURE_REDHAWK
InterlockedExchange64(&s_l64LastClientSequenceNumber, l64ClientSequenceNumber);
ForceGCForDiagnostics();
}
//---------------------------------------------------------------------------------------
//
// Helper to fire the GCStart event. Figures out which version of GCStart to fire, and
// includes the client sequence number, if available.
//
// Arguments:
// pGcInfo - ETW_GC_INFO containing details from GC about this collection
//
// static
VOID ETW::GCLog::FireGcStart(ETW_GC_INFO * pGcInfo)
{
LIMITED_METHOD_CONTRACT;
if (ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_GC_KEYWORD))
{
// If the controller specified a client sequence number for us to log with this
// GCStart, then retrieve it
LONGLONG l64ClientSequenceNumberToLog = 0;
if ((s_l64LastClientSequenceNumber != 0) &&
(pGcInfo->GCStart.Depth == GCHeapUtilities::GetGCHeap()->GetMaxGeneration()) &&
(pGcInfo->GCStart.Reason == ETW_GC_INFO::GC_INDUCED))
{
l64ClientSequenceNumberToLog = InterlockedExchange64(&s_l64LastClientSequenceNumber, 0);
}
FireEtwGCStart_V2(pGcInfo->GCStart.Count, pGcInfo->GCStart.Depth, pGcInfo->GCStart.Reason, pGcInfo->GCStart.Type, GetClrInstanceId(), l64ClientSequenceNumberToLog);
}
}
//---------------------------------------------------------------------------------------
//
// Contains code common to profapi and ETW scenarios where the profiler wants to force
// the CLR to perform a GC. The important work here is to create a managed thread for
// the current thread BEFORE the GC begins. On both ETW and profapi threads, there may
// not yet be a managed thread object. But some scenarios require a managed thread
// object be present (notably if we need to call into Jupiter during the GC).
//
// Return Value:
// HRESULT indicating success or failure
//
// Assumptions:
// Caller should ensure that the EE has fully started up and that the GC heap is
// initialized enough to actually perform a GC
//
// static
HRESULT ETW::GCLog::ForceGCForDiagnostics()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
HRESULT hr = E_FAIL;
#ifndef FEATURE_REDHAWK
// Caller should ensure we're past startup.
_ASSERTE(IsGarbageCollectorFullyInitialized());
// In immersive apps the GarbageCollect() call below will call into Jupiter,
// which will call back into the runtime to track references. This call
// chain would cause a Thread object to be created for this thread while code
// higher on the stack owns the ThreadStoreLock. This will lead to asserts
// since the ThreadStoreLock is non-reentrant. To avoid this we'll create
// the Thread object here instead.
if (GetThreadNULLOk() == NULL)
{
HRESULT hr = E_FAIL;
SetupThreadNoThrow(&hr);
if (FAILED(hr))
return hr;
}
ASSERT_NO_EE_LOCKS_HELD();
EX_TRY
{
// Need to switch to cooperative mode as the thread will access managed
// references (through Jupiter callbacks).
GCX_COOP();
#endif // FEATURE_REDHAWK
ForcedGCHolder forcedGCHolder;
hr = GCHeapUtilities::GetGCHeap()->GarbageCollect(
-1, // all generations should be collected
false, // low_memory_p
collection_blocking);
#ifndef FEATURE_REDHAWK
}
EX_CATCH { }
EX_END_CATCH(RethrowCorruptingExceptions);
#endif // FEATURE_REDHAWK
return hr;
}
//---------------------------------------------------------------------------------------
// WalkStaticsAndCOMForETW walks both CCW/RCW objects and static variables.
//---------------------------------------------------------------------------------------
VOID ETW::GCLog::WalkStaticsAndCOMForETW()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
EX_TRY
{
BulkTypeEventLogger typeLogger;
// Walk RCWs/CCWs
BulkComLogger comLogger(&typeLogger);
comLogger.LogAllComObjects();
// Walk static variables
BulkStaticsLogger staticLogger(&typeLogger);
staticLogger.LogAllStatics();
// Ensure all loggers have written all events, fire type logger last to batch events
// (FireBulkComEvent or FireBulkStaticsEvent may queue up additional types).
comLogger.FireBulkComEvent();
staticLogger.FireBulkStaticsEvent();
typeLogger.FireBulkTypeEvent();
}
EX_CATCH
{
}
EX_END_CATCH(SwallowAllExceptions);
}
//---------------------------------------------------------------------------------------
// BulkStaticsLogger: Batches up and logs static variable roots
//---------------------------------------------------------------------------------------
BulkComLogger::BulkComLogger(BulkTypeEventLogger *typeLogger)
: m_currRcw(0), m_currCcw(0), m_typeLogger(typeLogger), m_etwRcwData(0), m_etwCcwData(0), m_enumResult(0)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
m_etwRcwData = new EventRCWEntry[kMaxRcwCount];
m_etwCcwData = new EventCCWEntry[kMaxCcwCount];
}
BulkComLogger::~BulkComLogger()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
FireBulkComEvent();
if (m_etwRcwData)
delete [] m_etwRcwData;
if (m_etwCcwData)
delete [] m_etwCcwData;
if (m_enumResult)
{
CCWEnumerationEntry *curr = m_enumResult;
while (curr)
{
CCWEnumerationEntry *next = curr->Next;
delete curr;
curr = next;
}
}
}
void BulkComLogger::FireBulkComEvent()
{
WRAPPER_NO_CONTRACT;
FlushRcw();
FlushCcw();
}
void BulkComLogger::WriteRcw(RCW *pRcw, Object *obj)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(pRcw != NULL);
PRECONDITION(obj != NULL);
}
CONTRACTL_END;
_ASSERTE(m_currRcw < kMaxRcwCount);
#ifdef FEATURE_COMINTEROP
EventRCWEntry &rcw = m_etwRcwData[m_currRcw];
rcw.ObjectID = (ULONGLONG)obj;
rcw.TypeID = (ULONGLONG)obj->GetTypeHandle().AsTAddr();
rcw.IUnk = (ULONGLONG)pRcw->GetIUnknown_NoAddRef();
rcw.VTable = (ULONGLONG)pRcw->GetVTablePtr();
rcw.RefCount = pRcw->GetRefCount();
rcw.Flags = 0;
if (++m_currRcw >= kMaxRcwCount)
FlushRcw();
#endif
}
void BulkComLogger::FlushRcw()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(m_currRcw <= kMaxRcwCount);
if (m_currRcw == 0)
return;
if (m_typeLogger)
{
for (int i = 0; i < m_currRcw; ++i)
ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(m_typeLogger, m_etwRcwData[i].TypeID, ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime);
}
unsigned short instance = GetClrInstanceId();
#if !defined(FEATURE_PAL)
EVENT_DATA_DESCRIPTOR eventData[3];
EventDataDescCreate(&eventData[0], &m_currRcw, sizeof(const unsigned int));
EventDataDescCreate(&eventData[1], &instance, sizeof(const unsigned short));
EventDataDescCreate(&eventData[2], m_etwRcwData, sizeof(EventRCWEntry) * m_currRcw);
ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRCW, _countof(eventData), eventData);
#else
ULONG result = FireEtXplatGCBulkRCW(m_currRcw, instance, sizeof(EventRCWEntry) * m_currRcw, m_etwRcwData);
#endif // !defined(FEATURE_PAL)
_ASSERTE(result == ERROR_SUCCESS);
m_currRcw = 0;
}
void BulkComLogger::WriteCcw(ComCallWrapper *pCcw, Object **handle, Object *obj)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(handle != NULL);
PRECONDITION(obj != NULL);
}
CONTRACTL_END;
_ASSERTE(m_currCcw < kMaxCcwCount);
#ifdef FEATURE_COMINTEROP
IUnknown *iUnk = NULL;
int refCount = 0;
ULONG jupiterRefCount = 0;
ULONG flags = 0;
if (pCcw)
{
iUnk = pCcw->GetOuter();
if (iUnk == NULL)
iUnk = pCcw->GetBasicIP(true);
refCount = pCcw->GetRefCount();
jupiterRefCount = pCcw->GetJupiterRefCount();
if (pCcw->IsWrapperActive())
flags |= EventCCWEntry::Strong;
if (pCcw->IsPegged())
flags |= EventCCWEntry::Pegged;
}
EventCCWEntry &ccw = m_etwCcwData[m_currCcw++];
ccw.RootID = (ULONGLONG)handle;
ccw.ObjectID = (ULONGLONG)obj;
ccw.TypeID = (ULONGLONG)obj->GetTypeHandle().AsTAddr();
ccw.IUnk = (ULONGLONG)iUnk;
ccw.RefCount = refCount;
ccw.JupiterRefCount = jupiterRefCount;
ccw.Flags = flags;
if (m_currCcw >= kMaxCcwCount)
FlushCcw();
#endif
}
void BulkComLogger::FlushCcw()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(m_currCcw <= kMaxCcwCount);
if (m_currCcw == 0)
return;
if (m_typeLogger)
{
for (int i = 0; i < m_currCcw; ++i)
ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(m_typeLogger, m_etwCcwData[i].TypeID, ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime);
}
unsigned short instance = GetClrInstanceId();
#if !defined(FEATURE_PAL)
EVENT_DATA_DESCRIPTOR eventData[3];
EventDataDescCreate(&eventData[0], &m_currCcw, sizeof(const unsigned int));
EventDataDescCreate(&eventData[1], &instance, sizeof(const unsigned short));
EventDataDescCreate(&eventData[2], m_etwCcwData, sizeof(EventCCWEntry) * m_currCcw);
ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootCCW, _countof(eventData), eventData);
#else
ULONG result = FireEtXplatGCBulkRootCCW(m_currCcw, instance, sizeof(EventCCWEntry) * m_currCcw, m_etwCcwData);
#endif //!defined(FEATURE_PAL)
_ASSERTE(result == ERROR_SUCCESS);
m_currCcw = 0;
}
void BulkComLogger::LogAllComObjects()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
#ifdef FEATURE_COMINTEROP
SyncBlockCache *cache = SyncBlockCache::GetSyncBlockCache();
if (cache == NULL)
return;
int count = cache->GetTableEntryCount();
SyncTableEntry *table = SyncTableEntry::GetSyncTableEntry();
for (int i = 0; i < count; ++i)
{
SyncTableEntry &entry = table[i];
Object *obj = entry.m_Object.Load();
if (obj && entry.m_SyncBlock)
{
InteropSyncBlockInfo *interop = entry.m_SyncBlock->GetInteropInfoNoCreate();
if (interop)
{
RCW *rcw = interop->GetRawRCW();
if (rcw)
WriteRcw(rcw, obj);
}
}
}
// We need to do work in HandleWalkCallback which may trigger a GC. We cannot do this while
// enumerating the handle table. Instead, we will build a list of RefCount handles we found
// during the handle table enumeration first (m_enumResult) during this enumeration:
GCHandleUtilities::GetGCHandleManager()->TraceRefCountedHandles(BulkComLogger::HandleWalkCallback, uintptr_t(this), 0);
// Now that we have all of the object handles, we will walk all of the handles and write the
// etw events.
for (CCWEnumerationEntry *curr = m_enumResult; curr; curr = curr->Next)
{
for (int i = 0; i < curr->Count; ++i)
{
Object **handle = curr->Handles[i];
Object *obj = NULL;
if (handle == NULL || (obj = *handle) == 0)
return;
ObjHeader *header = obj->GetHeader();
_ASSERTE(header != NULL);
// We can catch the refcount handle too early where we don't have a CCW, WriteCCW
// handles this case. We still report the refcount handle without the CCW data.
ComCallWrapper *ccw = NULL;
// Checking the index ensures that the syncblock is already created. The
// PassiveGetSyncBlock function does not check bounds, so we have to be sure
// the SyncBlock was already created.
int index = header->GetHeaderSyncBlockIndex();
if (index > 0)
{
SyncBlock *syncBlk = header->PassiveGetSyncBlock();
InteropSyncBlockInfo *interop = syncBlk->GetInteropInfoNoCreate();
if (interop)
ccw = interop->GetCCW();
}
WriteCcw(ccw, handle, obj);
}
}
#endif
}
void BulkComLogger::HandleWalkCallback(Object **handle, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(param1 != NULL); // Should be the "this" pointer for BulkComLogger.
PRECONDITION(param2 == 0); // This is set by Ref_TraceRefCountHandles.
}
CONTRACTL_END;
// Simple sanity check to ensure the parameters are what we expect them to be.
_ASSERTE(param2 == 0);
if (handle != NULL)
((BulkComLogger*)param1)->AddCcwHandle(handle);
}
// Used during CCW enumeration to keep track of all object handles which point to a CCW.
void BulkComLogger::AddCcwHandle(Object **handle)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(handle != NULL);
}
CONTRACTL_END;
if (m_enumResult == NULL)
m_enumResult = new CCWEnumerationEntry;
CCWEnumerationEntry *curr = m_enumResult;
while (curr->Next)
curr = curr->Next;
if (curr->Count == _countof(curr->Handles))
{
curr->Next = new CCWEnumerationEntry;
curr = curr->Next;
}
curr->Handles[curr->Count++] = handle;
}
//---------------------------------------------------------------------------------------
// BulkStaticsLogger: Batches up and logs static variable roots
//---------------------------------------------------------------------------------------
#include "domainfile.h"
BulkStaticsLogger::BulkStaticsLogger(BulkTypeEventLogger *typeLogger)
: m_buffer(0), m_used(0), m_count(0), m_domain(0), m_typeLogger(typeLogger)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
m_buffer = new BYTE[kMaxBytesValues];
}
BulkStaticsLogger::~BulkStaticsLogger()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
if (m_used > 0)
FireBulkStaticsEvent();
if (m_buffer)
delete[] m_buffer;
}
void BulkStaticsLogger::FireBulkStaticsEvent()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
if (m_used <= 0 || m_count <= 0)
return;
_ASSERTE(m_domain != NULL);
unsigned short instance = GetClrInstanceId();
unsigned __int64 appDomain = (unsigned __int64)m_domain;
#if !defined(FEATURE_PAL)
EVENT_DATA_DESCRIPTOR eventData[4];
EventDataDescCreate(&eventData[0], &m_count, sizeof(const unsigned int) );
EventDataDescCreate(&eventData[1], &appDomain, sizeof(unsigned __int64) );
EventDataDescCreate(&eventData[2], &instance, sizeof(const unsigned short) );
EventDataDescCreate(&eventData[3], m_buffer, m_used);
ULONG result = EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootStaticVar, _countof(eventData), eventData);
#else
ULONG result = FireEtXplatGCBulkRootStaticVar(m_count, appDomain, instance, m_used, m_buffer);
#endif //!defined(FEATURE_PAL)
_ASSERTE(result == ERROR_SUCCESS);
m_used = 0;
m_count = 0;
}
void BulkStaticsLogger::WriteEntry(AppDomain *domain, Object **address, Object *obj, FieldDesc *fieldDesc)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(domain != NULL);
PRECONDITION(address != NULL);
PRECONDITION(obj != NULL);
PRECONDITION(fieldDesc != NULL);
}
CONTRACTL_END;
// Each bulk statics event is for one AppDomain. If we are now inspecting a new domain,
// we need to flush the built up events now.
if (m_domain != domain)
{
if (m_domain != NULL)
FireBulkStaticsEvent();
m_domain = domain;
}
ULONGLONG th = (ULONGLONG)obj->GetTypeHandle().AsTAddr();
ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(m_typeLogger, th, ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime);
// We should have at least 512 characters remaining in the buffer here.
int remaining = kMaxBytesValues - m_used;
_ASSERTE(kMaxBytesValues - m_used > 512);
int len = EventStaticEntry::WriteEntry(m_buffer + m_used, remaining, (ULONGLONG)address,
(ULONGLONG)obj, th, 0, fieldDesc);
// 512 bytes was not enough buffer? This shouldn't happen, so we'll skip emitting the
// event on error.
if (len > 0)
{
m_used += len;
m_count++;
}
// When we are close to running out of buffer, emit the event.
if (kMaxBytesValues - m_used < 512)
FireBulkStaticsEvent();
}
void BulkStaticsLogger::LogAllStatics()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
// Enumerate only active app domains (first parameter). We use the unsafe
// iterator here because this method is called under the threadstore lock
// and it's safe to use while the runtime is suspended.
UnsafeAppDomainIterator appIter(TRUE);
appIter.Init();
while (appIter.Next())
{
AppDomain *domain = appIter.GetDomain();
AppDomain::AssemblyIterator assemblyIter = domain->IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded|kIncludeExecution));
CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
while (assemblyIter.Next(pDomainAssembly.This()))
{
// Make sure the assembly is loaded.
if (!pDomainAssembly->IsLoaded())
continue;
CollectibleAssemblyHolder<Assembly *> pAssembly = pDomainAssembly->GetAssembly();
DomainModuleIterator modIter = pDomainAssembly->IterateModules(kModIterIncludeLoaded);
while (modIter.Next())
{
// Get the domain module from the module/appdomain pair.
Module *module = modIter.GetModule();
if (module == NULL)
continue;
DomainFile *domainFile = module->FindDomainFile(domain);
if (domainFile == NULL)
continue;
// Ensure the module has fully loaded.
if (!domainFile->IsActive())
continue;
DomainLocalModule *domainModule = module->GetDomainLocalModule(domain);
if (domainModule == NULL)
continue;
// Now iterate all types with
LookupMap<PTR_MethodTable>::Iterator mtIter = module->EnumerateTypeDefs();
while (mtIter.Next())
{
// I don't think mt can be null here, but the dac does a null check...
// IsFullyLoaded should be equivalent to 'GetLoadLevel() == CLASS_LOADED'
MethodTable *mt = mtIter.GetElement();
if (mt == NULL || !mt->IsFullyLoaded())
continue;
EEClass *cls = mt->GetClass();
_ASSERTE(cls != NULL);
if (cls->GetNumStaticFields() <= 0)
continue;
ApproxFieldDescIterator fieldIter(mt, ApproxFieldDescIterator::STATIC_FIELDS);
for (FieldDesc *field = fieldIter.Next(); field != NULL; field = fieldIter.Next())
{
// Don't want thread local or context local
_ASSERTE(field->IsStatic());
if (field->IsSpecialStatic() || field->IsEnCNew())
continue;
// Static valuetype values are boxed.
CorElementType fieldType = field->GetFieldType();
if (fieldType != ELEMENT_TYPE_CLASS && fieldType != ELEMENT_TYPE_VALUETYPE)
continue;
BYTE *base = field->GetBaseInDomainLocalModule(domainModule);
if (base == NULL)
continue;
Object **address = (Object**)field->GetStaticAddressHandle(base);
Object *obj = NULL;
if (address == NULL || ((obj = *address) == NULL))
continue;
WriteEntry(domain, address, *address, field);
} // foreach static field
}
} // foreach domain module
} // foreach domain assembly
} // foreach AppDomain
} // BulkStaticsLogger::LogAllStatics
//---------------------------------------------------------------------------------------
// BulkTypeValue / BulkTypeEventLogger: These take care of batching up types so they can
// be logged via ETW in bulk
//---------------------------------------------------------------------------------------
BulkTypeValue::BulkTypeValue() : cTypeParameters(0)
#ifdef FEATURE_REDHAWK
, ullSingleTypeParameter(0)
#else // FEATURE_REDHAWK
, sName()
#endif // FEATURE_REDHAWK
, rgTypeParameters()
{
LIMITED_METHOD_CONTRACT;
ZeroMemory(&fixedSizedData, sizeof(fixedSizedData));
}
//---------------------------------------------------------------------------------------
//
// Clears a BulkTypeValue so it can be reused after the buffer is flushed to ETW
//
void BulkTypeValue::Clear()
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
ZeroMemory(&fixedSizedData, sizeof(fixedSizedData));
cTypeParameters = 0;
#ifdef FEATURE_REDHAWK
ullSingleTypeParameter = 0;
rgTypeParameters.Release();
#else // FEATURE_REDHAWK
sName.Clear();
rgTypeParameters.Clear();
#endif // FEATURE_REDHAWK
}
//---------------------------------------------------------------------------------------
//
// Fire an ETW event for all the types we batched so far, and then reset our state
// so we can start batching new types at the beginning of the array.
//
//
void BulkTypeEventLogger::FireBulkTypeEvent()
{
LIMITED_METHOD_CONTRACT;
if (m_nBulkTypeValueCount == 0)
{
// No types were batched up, so nothing to send
return;
}
UINT16 nClrInstanceID = GetClrInstanceId();
#if !defined(FEATURE_PAL)
// Normally, we'd use the MC-generated FireEtwBulkType for all this gunk, but
// it's insufficient as the bulk type event is too complex (arrays of structs of
// varying size). So we directly log the event via EventDataDescCreate and
// EventWrite
// We use one descriptor for the count + one for the ClrInstanceID + 4
// per batched type (to include fixed-size data + name + param count + param
// array). But the system limit of 128 descriptors per event kicks in way
// before the 64K event size limit, and we already limit our batch size
// (m_nBulkTypeValueCount) to stay within the 128 descriptor limit.
EVENT_DATA_DESCRIPTOR EventData[128];
UINT iDesc = 0;
_ASSERTE(iDesc < _countof(EventData));
EventDataDescCreate(&EventData[iDesc++], &m_nBulkTypeValueCount, sizeof(m_nBulkTypeValueCount));
_ASSERTE(iDesc < _countof(EventData));
EventDataDescCreate(&EventData[iDesc++], &nClrInstanceID, sizeof(nClrInstanceID));
for (int iTypeData = 0; iTypeData < m_nBulkTypeValueCount; iTypeData++)
{
// Do fixed-size data as one bulk copy
_ASSERTE(iDesc < _countof(EventData));
EventDataDescCreate(
&EventData[iDesc++],
&(m_rgBulkTypeValues[iTypeData].fixedSizedData),
sizeof(m_rgBulkTypeValues[iTypeData].fixedSizedData));
// Do var-sized data individually per field
// Type name (nonexistent and thus empty on FEATURE_REDHAWK)
_ASSERTE(iDesc < _countof(EventData));
#ifdef FEATURE_REDHAWK
EventDataDescCreate(&EventData[iDesc++], W(""), sizeof(WCHAR));
#else // FEATURE_REDHAWK
LPCWSTR wszName = m_rgBulkTypeValues[iTypeData].sName.GetUnicode();
EventDataDescCreate(
&EventData[iDesc++],
(wszName == NULL) ? W("") : wszName,
(wszName == NULL) ? sizeof(WCHAR) : (m_rgBulkTypeValues[iTypeData].sName.GetCount() + 1) * sizeof(WCHAR));
#endif // FEATURE_REDHAWK
// Type parameter count
#ifndef FEATURE_REDHAWK
m_rgBulkTypeValues[iTypeData].cTypeParameters = m_rgBulkTypeValues[iTypeData].rgTypeParameters.GetCount();
#endif // FEATURE_REDHAWK
_ASSERTE(iDesc < _countof(EventData));
EventDataDescCreate(
&EventData[iDesc++],
&(m_rgBulkTypeValues[iTypeData].cTypeParameters),
sizeof(m_rgBulkTypeValues[iTypeData].cTypeParameters));
// Type parameter array
if (m_rgBulkTypeValues[iTypeData].cTypeParameters > 0)
{
_ASSERTE(iDesc < _countof(EventData));
EventDataDescCreate(
&EventData[iDesc++],
#ifdef FEATURE_REDHAWK
((m_rgBulkTypeValues[iTypeData].cTypeParameters == 1) ?
&(m_rgBulkTypeValues[iTypeData].ullSingleTypeParameter) :
(ULONGLONG *) (m_rgBulkTypeValues[iTypeData].rgTypeParameters)),
#else
m_rgBulkTypeValues[iTypeData].rgTypeParameters.GetElements(),
#endif
sizeof(ULONGLONG) * m_rgBulkTypeValues[iTypeData].cTypeParameters);
}
}
Win32EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &BulkType, iDesc, EventData);
#else // FEATURE_PAL
if(m_pBulkTypeEventBuffer == NULL)
{
// The buffer could not be allocated when this object was created, so bail.
return;
}
UINT iSize = 0;
for (int iTypeData = 0; iTypeData < m_nBulkTypeValueCount; iTypeData++)
{
BulkTypeValue& target = m_rgBulkTypeValues[iTypeData];
// Do fixed-size data as one bulk copy
memcpy(
m_pBulkTypeEventBuffer + iSize,
&(target.fixedSizedData),
sizeof(target.fixedSizedData));
iSize += sizeof(target.fixedSizedData);
// Do var-sized data individually per field
LPCWSTR wszName = target.sName.GetUnicode();
if (wszName == NULL)
{
m_pBulkTypeEventBuffer[iSize++] = 0;
m_pBulkTypeEventBuffer[iSize++] = 0;
}
else
{
UINT nameSize = (target.sName.GetCount() + 1) * sizeof(WCHAR);
memcpy(m_pBulkTypeEventBuffer + iSize, wszName, nameSize);
iSize += nameSize;
}
// Type parameter count
ULONG params = target.rgTypeParameters.GetCount();
ULONG *ptrInt = (ULONG*)(m_pBulkTypeEventBuffer + iSize);
*ptrInt = params;
iSize += 4;
target.cTypeParameters = params;
// Type parameter array
if (target.cTypeParameters > 0)
{
memcpy(m_pBulkTypeEventBuffer + iSize, target.rgTypeParameters.GetElements(), sizeof(ULONGLONG) * target.cTypeParameters);
iSize += sizeof(ULONGLONG) * target.cTypeParameters;
}
}
FireEtwBulkType(m_nBulkTypeValueCount, GetClrInstanceId(), iSize, m_pBulkTypeEventBuffer);
#endif // FEATURE_PAL
// Reset state
m_nBulkTypeValueCount = 0;
m_nBulkTypeValueByteCount = 0;
}
#ifndef FEATURE_REDHAWK
//---------------------------------------------------------------------------------------
//
// Batches a single type into the array, flushing the array to ETW if it fills up. Most
// interaction with the type system (to analyze the type) is done here. This does not
// recursively batch up any parameter types (for arrays or generics), but does add their
// TypeHandles to the rgTypeParameters array. LogTypeAndParameters is responsible for
// initiating any recursive calls to deal with type parameters.
//
// Arguments:
// th - TypeHandle to batch
//
// Return Value:
// Index into array of where this type got batched. -1 if there was a failure.
//
int BulkTypeEventLogger::LogSingleType(TypeHandle th)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK; // some of the type system stuff can take locks
}
CONTRACTL_END;
// If there's no room for another type, flush what we've got
if (m_nBulkTypeValueCount == _countof(m_rgBulkTypeValues))
{
FireBulkTypeEvent();
}
_ASSERTE(m_nBulkTypeValueCount < _countof(m_rgBulkTypeValues));
if (!th.IsTypeDesc() && th.GetMethodTable()->IsArray())
{
_ASSERTE(!"BulkTypeEventLogger::LogSingleType called with MethodTable array");
return -1;
}
BulkTypeValue * pVal = &m_rgBulkTypeValues[m_nBulkTypeValueCount];
// Clear out pVal before filling it out (array elements can get reused if there
// are enough types that we need to flush to multiple events). Clearing the
// contained SBuffer can throw, so deal with exceptions
BOOL fSucceeded = FALSE;
EX_TRY
{
pVal->Clear();
fSucceeded = TRUE;
}
EX_CATCH
{
fSucceeded = FALSE;
}
EX_END_CATCH(RethrowCorruptingExceptions);
if (!fSucceeded)
return -1;
pVal->fixedSizedData.TypeID = (ULONGLONG) th.AsTAddr();
pVal->fixedSizedData.ModuleID = (ULONGLONG) (TADDR) th.GetModule();
pVal->fixedSizedData.TypeNameID = (th.GetMethodTable() == NULL) ? 0 : th.GetCl();
pVal->fixedSizedData.Flags = 0;
pVal->fixedSizedData.CorElementType = (BYTE) th.GetInternalCorElementType();
if (th.IsArray())
{
// Normal typedesc array
pVal->fixedSizedData.Flags |= kEtwTypeFlagsArray;
// Fetch TypeHandle of array elements
fSucceeded = FALSE;
EX_TRY
{
pVal->rgTypeParameters.Append((ULONGLONG) th.AsArray()->GetArrayElementTypeHandle().AsTAddr());
fSucceeded = TRUE;
}
EX_CATCH
{
fSucceeded = FALSE;
}
EX_END_CATCH(RethrowCorruptingExceptions);
if (!fSucceeded)
return -1;
}
else if (th.IsTypeDesc())
{
// Non-array Typedescs
PTR_TypeDesc pTypeDesc = th.AsTypeDesc();
if (pTypeDesc->HasTypeParam())
{
fSucceeded = FALSE;
EX_TRY
{
pVal->rgTypeParameters.Append((ULONGLONG) pTypeDesc->GetTypeParam().AsTAddr());
fSucceeded = TRUE;
}
EX_CATCH
{
fSucceeded = FALSE;
}
EX_END_CATCH(RethrowCorruptingExceptions);
if (!fSucceeded)
return -1;
}
}
else
{
// Non-array MethodTable
PTR_MethodTable pMT = th.AsMethodTable();
// Make CorElementType more specific if this is a string MT
if (pMT->IsString())
{
pVal->fixedSizedData.CorElementType = ELEMENT_TYPE_STRING;
}
else if (pMT->IsObjectClass())
{
pVal->fixedSizedData.CorElementType = ELEMENT_TYPE_OBJECT;
}
// Generic arguments
DWORD cTypeParameters = pMT->GetNumGenericArgs();
if (cTypeParameters > 0)
{
Instantiation inst = pMT->GetInstantiation();
fSucceeded = FALSE;
EX_TRY
{
for (DWORD i=0; i < cTypeParameters; i++)
{
pVal->rgTypeParameters.Append((ULONGLONG) inst[i].AsTAddr());
}
fSucceeded = TRUE;
}
EX_CATCH
{
fSucceeded = FALSE;
}
EX_END_CATCH(RethrowCorruptingExceptions);
if (!fSucceeded)
return -1;
}
if (pMT->HasFinalizer())
{
pVal->fixedSizedData.Flags |= kEtwTypeFlagsFinalizable;
}
if (pMT->IsDelegate())
{
pVal->fixedSizedData.Flags |= kEtwTypeFlagsDelegate;
}
if (pMT->IsComObjectType())
{
pVal->fixedSizedData.Flags |= kEtwTypeFlagsExternallyImplementedCOMObject;
}
}
// If the profiler wants it, construct a name. Always normalize the string (even if
// type names are not requested) so that calls to sName.GetCount() can't throw
EX_TRY
{
if (ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_GCHEAPANDTYPENAMES_KEYWORD))
{
th.GetName(pVal->sName);
}
pVal->sName.Normalize();
}
EX_CATCH
{
// If this failed, the name remains empty, which is ok; the event just
// won't have a name in it.
pVal->sName.Clear();
}
EX_END_CATCH(RethrowCorruptingExceptions);
// Now that we know the full size of this type's data, see if it fits in our
// batch or whether we need to flush
int cbVal = pVal->GetByteCountInEvent();
if (cbVal > kMaxBytesTypeValues)
{
// This type is apparently so huge, it's too big to squeeze into an event, even
// if it were the only type batched in the whole event. Bail
_ASSERTE(!"Type too big to log via ETW");
return -1;
}
if (m_nBulkTypeValueByteCount + cbVal > kMaxBytesTypeValues)
{
// Although this type fits into the array, its size is so big that the entire
// array can't be logged via ETW. So flush the array, and start over by
// calling ourselves--this refetches the type info and puts it at the
// beginning of the array. Since we know this type is small enough to be
// batched into an event on its own, this recursive call will not try to
// call itself again.
FireBulkTypeEvent();
return LogSingleType(th);
}
// The type fits into the batch, so update our state
m_nBulkTypeValueCount++;
m_nBulkTypeValueByteCount += cbVal;
return m_nBulkTypeValueCount - 1; // Index of type we just added
}
//---------------------------------------------------------------------------------------
//
// High-level method to batch a type and (recursively) its type parameters, flushing to
// ETW as needed. This is called by (static)
// ETW::TypeSystemLog::LogTypeAndParametersIfNecessary, which is what clients use to log
// type events
//
// Arguments:
// * thAsAddr - Type to batch
// * typeLogBehavior - Reminder of whether the type system log lock is held
// (useful if we need to recurively call back into TypeSystemLog), and whether
// we even care to check if the type was already logged
//
void BulkTypeEventLogger::LogTypeAndParameters(ULONGLONG thAsAddr, ETW::TypeSystemLog::TypeLogBehavior typeLogBehavior)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK; // LogSingleType can take locks
}
CONTRACTL_END;
TypeHandle th = TypeHandle::FromTAddr((TADDR) thAsAddr);
// Batch up this type. This grabs useful info about the type, including any
// type parameters it may have, and sticks it in m_rgBulkTypeValues
int iBulkTypeEventData = LogSingleType(th);
if (iBulkTypeEventData == -1)
{
// There was a failure trying to log the type, so don't bother with its type
// parameters
return;
}
// Look at the type info we just batched, so we can get the type parameters
BulkTypeValue * pVal = &m_rgBulkTypeValues[iBulkTypeEventData];
// We're about to recursively call ourselves for the type parameters, so make a
// local copy of their type handles first (else, as we log them we could flush
// and clear out m_rgBulkTypeValues, thus trashing pVal)
StackSArray<ULONGLONG> rgTypeParameters;
DWORD cParams = pVal->rgTypeParameters.GetCount();
BOOL fSucceeded = FALSE;
EX_TRY
{
for (COUNT_T i = 0; i < cParams; i++)
{
rgTypeParameters.Append(pVal->rgTypeParameters[i]);
}
fSucceeded = TRUE;
}
EX_CATCH
{
fSucceeded = FALSE;
}
EX_END_CATCH(RethrowCorruptingExceptions);
if (!fSucceeded)
return;
// Before we recurse, adjust the special-cased type-log behavior that allows a
// top-level type to be logged without lookup, but still requires lookups to avoid
// dupes of type parameters
if (typeLogBehavior == ETW::TypeSystemLog::kTypeLogBehaviorAlwaysLogTopLevelType)
typeLogBehavior = ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime;
// Recursively log any referenced parameter types
for (COUNT_T i=0; i < cParams; i++)
{
ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(this, rgTypeParameters[i], typeLogBehavior);
}
}
#endif // FEATURE_REDHAWK
// Holds state that batches of roots, nodes, edges, and types as the GC walks the heap
// at the end of a collection.
class EtwGcHeapDumpContext
{
public:
// An instance of EtwGcHeapDumpContext is dynamically allocated and stored inside of
// ProfilingScanContext and ProfilerWalkHeapContext, which are context structures
// that the GC heap walker sends back to the callbacks. This method is passed a
// pointer to ProfilingScanContext::pvEtwContext or
// ProfilerWalkHeapContext::pvEtwContext; if non-NULL it gets returned; else, a new
// EtwGcHeapDumpContext is allocated, stored in that pointer, and then returned.
// Callers should test for NULL, which can be returned if out of memory
static EtwGcHeapDumpContext * GetOrCreateInGCContext(LPVOID * ppvEtwContext)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(ppvEtwContext != NULL);
EtwGcHeapDumpContext * pContext = (EtwGcHeapDumpContext *) *ppvEtwContext;
if (pContext == NULL)
{
pContext = new (nothrow) EtwGcHeapDumpContext;
*ppvEtwContext = pContext;
}
return pContext;
}
EtwGcHeapDumpContext() :
iCurBulkRootEdge(0),
iCurBulkRootConditionalWeakTableElementEdge(0),
iCurBulkNodeEvent(0),
iCurBulkEdgeEvent(0),
bulkTypeEventLogger()
{
LIMITED_METHOD_CONTRACT;
ClearRootEdges();
ClearRootConditionalWeakTableElementEdges();
ClearNodes();
ClearEdges();
}
// These helpers clear the individual buffers, for use after a flush and on
// construction. They intentionally leave the indices (iCur*) alone, since they
// persist across flushes within a GC
void ClearRootEdges()
{
LIMITED_METHOD_CONTRACT;
cGcBulkRootEdges = 0;
ZeroMemory(rgGcBulkRootEdges, sizeof(rgGcBulkRootEdges));
}
void ClearRootConditionalWeakTableElementEdges()
{
LIMITED_METHOD_CONTRACT;
cGCBulkRootConditionalWeakTableElementEdges = 0;
ZeroMemory(rgGCBulkRootConditionalWeakTableElementEdges, sizeof(rgGCBulkRootConditionalWeakTableElementEdges));
}
void ClearNodes()
{
LIMITED_METHOD_CONTRACT;
cGcBulkNodeValues = 0;
ZeroMemory(rgGcBulkNodeValues, sizeof(rgGcBulkNodeValues));
}
void ClearEdges()
{
LIMITED_METHOD_CONTRACT;
cGcBulkEdgeValues = 0;
ZeroMemory(rgGcBulkEdgeValues, sizeof(rgGcBulkEdgeValues));
}
//---------------------------------------------------------------------------------------
// GCBulkRootEdge
//
// A "root edge" is the relationship between a source "GCRootID" (i.e., stack
// variable, handle, static, etc.) and the target "RootedNodeAddress" (the managed
// object that gets rooted).
//
//---------------------------------------------------------------------------------------
// Sequence number for each GCBulkRootEdge event
UINT iCurBulkRootEdge;
// Number of root edges currently filled out in rgGcBulkRootEdges array
UINT cGcBulkRootEdges;
// Struct array containing the primary data for each GCBulkRootEdge event. Fix the size so
// the total event stays well below the 64K
// limit (leaving lots of room for non-struct fields that come before the root edge data)
EventStructGCBulkRootEdgeValue rgGcBulkRootEdges[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkRootEdgeValue)];
//---------------------------------------------------------------------------------------
// GCBulkRootConditionalWeakTableElementEdge
//
// These describe dependent handles, which simulate an edge connecting a key NodeID
// to a value NodeID.
//
//---------------------------------------------------------------------------------------
// Sequence number for each GCBulkRootConditionalWeakTableElementEdge event
UINT iCurBulkRootConditionalWeakTableElementEdge;
// Number of root edges currently filled out in rgGCBulkRootConditionalWeakTableElementEdges array
UINT cGCBulkRootConditionalWeakTableElementEdges;
// Struct array containing the primary data for each GCBulkRootConditionalWeakTableElementEdge event. Fix the size so
// the total event stays well below the 64K
// limit (leaving lots of room for non-struct fields that come before the root edge data)
EventStructGCBulkRootConditionalWeakTableElementEdgeValue rgGCBulkRootConditionalWeakTableElementEdges
[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkRootConditionalWeakTableElementEdgeValue)];
//---------------------------------------------------------------------------------------
// GCBulkNode
//
// A "node" is ANY managed object sitting on the heap, including RootedNodeAddresses
// as well as leaf nodes.
//
//---------------------------------------------------------------------------------------
// Sequence number for each GCBulkNode event
UINT iCurBulkNodeEvent;
// Number of nodes currently filled out in rgGcBulkNodeValues array
UINT cGcBulkNodeValues;
// Struct array containing the primary data for each GCBulkNode event. Fix the size so
// the total event stays well below the 64K
// limit (leaving lots of room for non-struct fields that come before the node data)
EventStructGCBulkNodeValue rgGcBulkNodeValues[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkNodeValue)];
//---------------------------------------------------------------------------------------
// GCBulkEdge
//
// An "edge" is the relationship between a source node and its referenced target
// node. Edges are reported in bulk, separately from Nodes, but it is expected that
// the consumer read the Node and Edge streams together. One takes the first node
// from the Node stream, and then reads EdgeCount entries in the Edge stream, telling
// you all of that Node's targets. Then, one takes the next node in the Node stream,
// and reads the next entries in the Edge stream (using this Node's EdgeCount to
// determine how many) to find all of its targets. This continues on until the Node
// and Edge streams have been fully read.
//
// GCBulkRootEdges are not duplicated in the GCBulkEdge events. GCBulkEdge events
// begin at the GCBulkRootEdge.RootedNodeAddress and move forward.
//
//---------------------------------------------------------------------------------------
// Sequence number for each GCBulkEdge event
UINT iCurBulkEdgeEvent;
// Number of nodes currently filled out in rgGcBulkEdgeValues array
UINT cGcBulkEdgeValues;
// Struct array containing the primary data for each GCBulkEdge event. Fix the size so
// the total event stays well below the 64K
// limit (leaving lots of room for non-struct fields that come before the edge data)
EventStructGCBulkEdgeValue rgGcBulkEdgeValues[(cbMaxEtwEvent - 0x100) / sizeof(EventStructGCBulkEdgeValue)];
//---------------------------------------------------------------------------------------
// BulkType
//
// Types are a bit more complicated to batch up, since their data is of varying
// size. BulkTypeEventLogger takes care of the pesky details for us
//---------------------------------------------------------------------------------------
BulkTypeEventLogger bulkTypeEventLogger;
};
//---------------------------------------------------------------------------------------
//
// Called during a heap walk for each root reference encountered. Batches up the root in
// the ETW context
//
// Arguments:
// * pvHandle - If the root is a handle, this points to the handle
// * pRootedNode - Points to object that is rooted
// * pSecondaryNodeForDependentHandle - For dependent handles, this is the
// secondary object
// * fDependentHandle - nonzero iff this is for a dependent handle
// * profilingScanContext - The shared profapi/etw context built up during the heap walk.
// * dwGCFlags - Bitmask of "GC_"-style flags set by GC
// * rootFlags - Bitmask of EtwGCRootFlags describing the root
//
// static
VOID ETW::GCLog::RootReference(
LPVOID pvHandle,
Object * pRootedNode,
Object * pSecondaryNodeForDependentHandle,
BOOL fDependentHandle,
ProfilingScanContext * profilingScanContext,
DWORD dwGCFlags,
DWORD rootFlags)
{
LIMITED_METHOD_CONTRACT;
EtwGcHeapDumpContext * pContext =
EtwGcHeapDumpContext::GetOrCreateInGCContext(&profilingScanContext->pvEtwContext);
if (pContext == NULL)
return;
// Determine root kind, root ID, and handle-specific flags
LPVOID pvRootID = NULL;
BYTE nRootKind = (BYTE) profilingScanContext->dwEtwRootKind;
switch (nRootKind)
{
case kEtwGCRootKindStack:
#if !defined (FEATURE_REDHAWK) && (defined(GC_PROFILING) || defined (DACCESS_COMPILE))
pvRootID = profilingScanContext->pMD;
#endif // !defined (FEATURE_REDHAWK) && (defined(GC_PROFILING) || defined (DACCESS_COMPILE))
break;
case kEtwGCRootKindHandle:
pvRootID = pvHandle;
break;
case kEtwGCRootKindFinalizer:
_ASSERTE(pvRootID == NULL);
break;
case kEtwGCRootKindOther:
default:
_ASSERTE(nRootKind == kEtwGCRootKindOther);
_ASSERTE(pvRootID == NULL);
break;
}
// Convert GC root flags to ETW root flags
if (dwGCFlags & GC_CALL_INTERIOR)
rootFlags |= kEtwGCRootFlagsInterior;
if (dwGCFlags & GC_CALL_PINNED)
rootFlags |= kEtwGCRootFlagsPinning;
// Add root edge to appropriate buffer
if (fDependentHandle)
{
_ASSERTE(pContext->cGCBulkRootConditionalWeakTableElementEdges <
_countof(pContext->rgGCBulkRootConditionalWeakTableElementEdges));
EventStructGCBulkRootConditionalWeakTableElementEdgeValue * pRCWTEEdgeValue =
&pContext->rgGCBulkRootConditionalWeakTableElementEdges[pContext->cGCBulkRootConditionalWeakTableElementEdges];
pRCWTEEdgeValue->GCKeyNodeID = pRootedNode;
pRCWTEEdgeValue->GCValueNodeID = pSecondaryNodeForDependentHandle;
pRCWTEEdgeValue->GCRootID = pvRootID;
pContext->cGCBulkRootConditionalWeakTableElementEdges++;
// If RCWTE edge buffer is now full, empty it into ETW
if (pContext->cGCBulkRootConditionalWeakTableElementEdges ==
_countof(pContext->rgGCBulkRootConditionalWeakTableElementEdges))
{
FireEtwGCBulkRootConditionalWeakTableElementEdge(
pContext->iCurBulkRootConditionalWeakTableElementEdge,
pContext->cGCBulkRootConditionalWeakTableElementEdges,
GetClrInstanceId(),
sizeof(pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]),
&pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]);
pContext->iCurBulkRootConditionalWeakTableElementEdge++;
pContext->ClearRootConditionalWeakTableElementEdges();
}
}
else
{
_ASSERTE(pContext->cGcBulkRootEdges < _countof(pContext->rgGcBulkRootEdges));
EventStructGCBulkRootEdgeValue * pBulkRootEdgeValue = &pContext->rgGcBulkRootEdges[pContext->cGcBulkRootEdges];
pBulkRootEdgeValue->RootedNodeAddress = pRootedNode;
pBulkRootEdgeValue->GCRootKind = nRootKind;
pBulkRootEdgeValue->GCRootFlag = rootFlags;
pBulkRootEdgeValue->GCRootID = pvRootID;
pContext->cGcBulkRootEdges++;
// If root edge buffer is now full, empty it into ETW
if (pContext->cGcBulkRootEdges == _countof(pContext->rgGcBulkRootEdges))
{
FireEtwGCBulkRootEdge(
pContext->iCurBulkRootEdge,
pContext->cGcBulkRootEdges,
GetClrInstanceId(),
sizeof(pContext->rgGcBulkRootEdges[0]),
&pContext->rgGcBulkRootEdges[0]);
pContext->iCurBulkRootEdge++;
pContext->ClearRootEdges();
}
}
}
//---------------------------------------------------------------------------------------
//
// Called during a heap walk for each object reference encountered. Batches up the
// corresponding node, edges, and type data for the ETW events.
//
// Arguments:
// * profilerWalkHeapContext - The shared profapi/etw context built up during the heap walk.
// * pObjReferenceSource - Object doing the pointing
// * typeID - Type of pObjReferenceSource
// * fDependentHandle - nonzero iff this is for a dependent handle
// * cRefs - Count of objects being pointed to
// * rgObjReferenceTargets - Array of objects being pointed to
//
// static
VOID ETW::GCLog::ObjectReference(
ProfilerWalkHeapContext * profilerWalkHeapContext,
Object * pObjReferenceSource,
ULONGLONG typeID,
ULONGLONG cRefs,
Object ** rgObjReferenceTargets)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
// LogTypeAndParametersIfNecessary can take a lock
CAN_TAKE_LOCK;
}
CONTRACTL_END;
EtwGcHeapDumpContext * pContext =
EtwGcHeapDumpContext::GetOrCreateInGCContext(&profilerWalkHeapContext->pvEtwContext);
if (pContext == NULL)
return;
//---------------------------------------------------------------------------------------
// GCBulkNode events
//---------------------------------------------------------------------------------------
// Add Node (pObjReferenceSource) to buffer
_ASSERTE(pContext->cGcBulkNodeValues < _countof(pContext->rgGcBulkNodeValues));
EventStructGCBulkNodeValue * pBulkNodeValue = &pContext->rgGcBulkNodeValues[pContext->cGcBulkNodeValues];
pBulkNodeValue->Address = pObjReferenceSource;
pBulkNodeValue->Size = pObjReferenceSource->GetSize();
pBulkNodeValue->TypeID = typeID;
pBulkNodeValue->EdgeCount = cRefs;
pContext->cGcBulkNodeValues++;
// If Node buffer is now full, empty it into ETW
if (pContext->cGcBulkNodeValues == _countof(pContext->rgGcBulkNodeValues))
{
FireEtwGCBulkNode(
pContext->iCurBulkNodeEvent,
pContext->cGcBulkNodeValues,
GetClrInstanceId(),
sizeof(pContext->rgGcBulkNodeValues[0]),
&pContext->rgGcBulkNodeValues[0]);
pContext->iCurBulkNodeEvent++;
pContext->ClearNodes();
}
//---------------------------------------------------------------------------------------
// BulkType events
//---------------------------------------------------------------------------------------
// We send type information as necessary--only for nodes, and only for nodes that we
// haven't already sent type info for
if (typeID != 0)
{
ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(
&pContext->bulkTypeEventLogger, // Batch up this type with others to minimize events
typeID,
// During heap walk, GC holds the lock for us, so we can directly enter the
// hash to see if the type has already been logged
ETW::TypeSystemLog::kTypeLogBehaviorTakeLockAndLogIfFirstTime
);
}
//---------------------------------------------------------------------------------------
// GCBulkEdge events
//---------------------------------------------------------------------------------------
// Add Edges (rgObjReferenceTargets) to buffer. Buffer could fill up before all edges
// are added (it could even fill up multiple times during this one call if there are
// a lot of edges), so empty Edge buffer into ETW as we go along, as many times as we
// need.
for (ULONGLONG i=0; i < cRefs; i++)
{
_ASSERTE(pContext->cGcBulkEdgeValues < _countof(pContext->rgGcBulkEdgeValues));
EventStructGCBulkEdgeValue * pBulkEdgeValue = &pContext->rgGcBulkEdgeValues[pContext->cGcBulkEdgeValues];
pBulkEdgeValue->Value = rgObjReferenceTargets[i];
// FUTURE: ReferencingFieldID
pBulkEdgeValue->ReferencingFieldID = 0;
pContext->cGcBulkEdgeValues++;
// If Edge buffer is now full, empty it into ETW
if (pContext->cGcBulkEdgeValues == _countof(pContext->rgGcBulkEdgeValues))
{
FireEtwGCBulkEdge(
pContext->iCurBulkEdgeEvent,
pContext->cGcBulkEdgeValues,
GetClrInstanceId(),
sizeof(pContext->rgGcBulkEdgeValues[0]),
&pContext->rgGcBulkEdgeValues[0]);
pContext->iCurBulkEdgeEvent++;
pContext->ClearEdges();
}
}
}
//---------------------------------------------------------------------------------------
//
// Called by GC at end of heap dump to give us a convenient time to flush any remaining
// buffers of data to ETW
//
// Arguments:
// profilerWalkHeapContext - Context containing data we've batched up
//
// static
VOID ETW::GCLog::EndHeapDump(ProfilerWalkHeapContext * profilerWalkHeapContext)
{
LIMITED_METHOD_CONTRACT;
// If context isn't already set up for us, then we haven't been collecting any data
// for ETW events.
EtwGcHeapDumpContext * pContext = (EtwGcHeapDumpContext *) profilerWalkHeapContext->pvEtwContext;
if (pContext == NULL)
return;
// If the GC events are enabled, flush any remaining root, node, and / or edge data
if (s_forcedGCInProgress &&
ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_GCHEAPDUMP_KEYWORD))
{
if (pContext->cGcBulkRootEdges > 0)
{
FireEtwGCBulkRootEdge(
pContext->iCurBulkRootEdge,
pContext->cGcBulkRootEdges,
GetClrInstanceId(),
sizeof(pContext->rgGcBulkRootEdges[0]),
&pContext->rgGcBulkRootEdges[0]);
}
if (pContext->cGCBulkRootConditionalWeakTableElementEdges > 0)
{
FireEtwGCBulkRootConditionalWeakTableElementEdge(
pContext->iCurBulkRootConditionalWeakTableElementEdge,
pContext->cGCBulkRootConditionalWeakTableElementEdges,
GetClrInstanceId(),
sizeof(pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]),
&pContext->rgGCBulkRootConditionalWeakTableElementEdges[0]);
}
if (pContext->cGcBulkNodeValues > 0)
{
FireEtwGCBulkNode(
pContext->iCurBulkNodeEvent,
pContext->cGcBulkNodeValues,
GetClrInstanceId(),
sizeof(pContext->rgGcBulkNodeValues[0]),
&pContext->rgGcBulkNodeValues[0]);
}
if (pContext->cGcBulkEdgeValues > 0)
{
FireEtwGCBulkEdge(
pContext->iCurBulkEdgeEvent,
pContext->cGcBulkEdgeValues,
GetClrInstanceId(),
sizeof(pContext->rgGcBulkEdgeValues[0]),
&pContext->rgGcBulkEdgeValues[0]);
}
}
// Ditto for type events
if (ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_TYPE_KEYWORD))
{
pContext->bulkTypeEventLogger.FireBulkTypeEvent();
}
// Delete any GC state built up in the context
profilerWalkHeapContext->pvEtwContext = NULL;
delete pContext;
}
//---------------------------------------------------------------------------------------
//
// Helper to send public finalize object & type events, and private finalize object
// event. If Type events are enabled, this will send the Type event for the finalized
// objects. It will not be batched with other types (except type parameters, if any),
// and will not check if the Type has already been logged (may thus result in dupe
// logging of the Type).
//
// Arguments:
// pMT - MT of object getting finalized
// pObj - object getting finalized
//
// static
VOID ETW::GCLog::SendFinalizeObjectEvent(MethodTable * pMT, Object * pObj)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
// LogTypeAndParameters locks, and we take our own lock if typeLogBehavior says to
CAN_TAKE_LOCK;
}
CONTRACTL_END;
// Send public finalize object event, if it's enabled
if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, FinalizeObject))
{
FireEtwFinalizeObject(pMT, pObj, GetClrInstanceId());
// This function checks if type events are enabled; if so, it sends event for
// finalized object's type (and parameter types, if any)
ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(
NULL, // Not batching this type with others
(TADDR) pMT,
// Don't spend the time entering the lock and checking the hash table to see
// if we've already logged the type; just log it (if type events are enabled).
ETW::TypeSystemLog::kTypeLogBehaviorAlwaysLog
);
}
// Send private finalize object event, if it's enabled
if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, PrvFinalizeObject))
{
EX_TRY
{
DefineFullyQualifiedNameForClassWOnStack();
FireEtwPrvFinalizeObject(pMT, pObj, GetClrInstanceId(), GetFullyQualifiedNameForClassNestedAwareW(pMT));
}
EX_CATCH
{
}
EX_END_CATCH(RethrowCorruptingExceptions);
}
}
DWORD ETW::ThreadLog::GetEtwThreadFlags(Thread * pThread)
{
LIMITED_METHOD_CONTRACT;
DWORD dwEtwThreadFlags = 0;
if (pThread->IsThreadPoolThread())
{
dwEtwThreadFlags |= kEtwThreadFlagThreadPoolWorker;
}
if (pThread->IsGCSpecial())
{
dwEtwThreadFlags |= kEtwThreadFlagGCSpecial;
}
if (IsGarbageCollectorFullyInitialized() &&
(pThread == FinalizerThread::GetFinalizerThread()))
{
dwEtwThreadFlags |= kEtwThreadFlagFinalizer;
}
return dwEtwThreadFlags;
}
VOID ETW::ThreadLog::FireThreadCreated(Thread * pThread)
{
LIMITED_METHOD_CONTRACT;
FireEtwThreadCreated(
(ULONGLONG)pThread,
(ULONGLONG)pThread->GetDomain(),
GetEtwThreadFlags(pThread),
pThread->GetThreadId(),
pThread->GetOSThreadId(),
GetClrInstanceId());
}
VOID ETW::ThreadLog::FireThreadDC(Thread * pThread)
{
LIMITED_METHOD_CONTRACT;
FireEtwThreadDC(
(ULONGLONG)pThread,
(ULONGLONG)pThread->GetDomain(),
GetEtwThreadFlags(pThread),
pThread->GetThreadId(),
pThread->GetOSThreadId(),
GetClrInstanceId());
}
#ifndef FEATURE_REDHAWK
// TypeSystemLog implementation
//
// We keep track of which TypeHandles have been logged, and stats on instances of these
// TypeHandles that have been allocated, by a hash table of hash tables. The outer hash
// table maps Module*'s to an inner hash table that contains all the TypeLoggingInfos for that
// Module*. Arranging things this way makes it easy to deal with Module unloads, as we
// can simply remove the corresponding inner hash table from the outer hash table.
// The following help define the "inner" hash table: a hash table of TypeLoggingInfos
// from a particular Module (key = TypeHandle, value = TypeLoggingInfo.
class LoggedTypesFromModuleTraits : public NoRemoveSHashTraits< DefaultSHashTraits<ETW::TypeLoggingInfo> >
{
public:
// explicitly declare local typedefs for these traits types, otherwise
// the compiler may get confused
typedef NoRemoveSHashTraits< DefaultSHashTraits<ETW::TypeLoggingInfo> > PARENT;
typedef PARENT::element_t element_t;
typedef PARENT::count_t count_t;
typedef TypeHandle key_t;
static key_t GetKey(const element_t &e)
{
LIMITED_METHOD_CONTRACT;
return e.th;
}
static BOOL Equals(key_t k1, key_t k2)
{
LIMITED_METHOD_CONTRACT;
return (k1 == k2);
}
static count_t Hash(key_t k)
{
LIMITED_METHOD_CONTRACT;
return (count_t) k.AsTAddr();
}
static bool IsNull(const element_t &e)
{
LIMITED_METHOD_CONTRACT;
return (e.th.AsTAddr() == NULL);
}
static const element_t Null()
{
LIMITED_METHOD_CONTRACT;
return ETW::TypeLoggingInfo(NULL);
}
};
typedef SHash<LoggedTypesFromModuleTraits> LoggedTypesFromModuleHash;
// The inner hash table is housed inside this class, which acts as an entry in the outer
// hash table.
class ETW::LoggedTypesFromModule
{
public:
Module * pModule;
LoggedTypesFromModuleHash loggedTypesFromModuleHash;
// These are used by the outer hash table (mapping Module*'s to instances of
// LoggedTypesFromModule).
static COUNT_T Hash(Module * pModule)
{
LIMITED_METHOD_CONTRACT;
return (COUNT_T) (SIZE_T) pModule;
}
Module * GetKey()
{
LIMITED_METHOD_CONTRACT;
return pModule;
}
LoggedTypesFromModule(Module * pModuleParam) : loggedTypesFromModuleHash()
{
LIMITED_METHOD_CONTRACT;
pModule = pModuleParam;
}
~LoggedTypesFromModule()
{
LIMITED_METHOD_CONTRACT;
}
};
// The following define the outer hash table (mapping Module*'s to instances of
// LoggedTypesFromModule).
class AllLoggedTypesTraits : public DefaultSHashTraits<ETW::LoggedTypesFromModule *>
{
public:
// explicitly declare local typedefs for these traits types, otherwise
// the compiler may get confused
typedef DefaultSHashTraits<ETW::LoggedTypesFromModule *> PARENT;
typedef PARENT::element_t element_t;
typedef PARENT::count_t count_t;
typedef Module * key_t;
static key_t GetKey(const element_t &e)
{
LIMITED_METHOD_CONTRACT;
return e->pModule;
}
static BOOL Equals(key_t k1, key_t k2)
{
LIMITED_METHOD_CONTRACT;
return (k1 == k2);
}
static count_t Hash(key_t k)
{
LIMITED_METHOD_CONTRACT;
return (count_t) (size_t) k;
}
static bool IsNull(const element_t &e)
{
LIMITED_METHOD_CONTRACT;
return (e == NULL);
}
static const element_t Null()
{
LIMITED_METHOD_CONTRACT;
return NULL;
}
};
typedef SHash<AllLoggedTypesTraits> AllLoggedTypesHash;
// The outer hash table (mapping Module*'s to instances of LoggedTypesFromModule) is
// housed in this struct, which is dynamically allocated the first time we decide we need
// it.
struct AllLoggedTypes
{
public:
// This Crst protects the entire outer & inner hash tables. On a GC heap walk, it
// is entered once for the duration of the walk, so that we can freely access the
// hash tables during the walk. On each object allocation, this Crst must be
// entered individually each time.
static CrstStatic s_cs;
// A thread local copy of the global epoch.
// This value is used by each thread to ensure that the thread local data structures
// are in sync with the global state.
unsigned int nEpoch;
// The outer hash table (mapping Module*'s to instances of LoggedTypesFromModule)
AllLoggedTypesHash allLoggedTypesHash;
};
CrstStatic AllLoggedTypes::s_cs;
AllLoggedTypes * ETW::TypeSystemLog::s_pAllLoggedTypes = NULL;
unsigned int ETW::TypeSystemLog::s_nEpoch = 0;
BOOL ETW::TypeSystemLog::s_fHeapAllocEventEnabledOnStartup = FALSE;
BOOL ETW::TypeSystemLog::s_fHeapAllocHighEventEnabledNow = FALSE;
BOOL ETW::TypeSystemLog::s_fHeapAllocLowEventEnabledNow = FALSE;
int ETW::TypeSystemLog::s_nCustomMsBetweenEvents = 0;
//---------------------------------------------------------------------------------------
//
// Initializes TypeSystemLog (specifically its crst). Called just before ETW providers
// are registered with the OS
//
// Return Value:
// HRESULT indicating success or failure
//
// static
HRESULT ETW::TypeSystemLog::PreRegistrationInit()
{
LIMITED_METHOD_CONTRACT;
if (!AllLoggedTypes::s_cs.InitNoThrow(
CrstEtwTypeLogHash,
CRST_UNSAFE_ANYMODE)) // This lock is taken during a GC while walking the heap
{
return E_FAIL;
}
return S_OK;
}
//---------------------------------------------------------------------------------------
//
// Initializes TypeSystemLog (specifically its crst). Called just after ETW providers
// are registered with the OS
//
// Return Value:
// HRESULT indicating success or failure
//
// static
void ETW::TypeSystemLog::PostRegistrationInit()
{
LIMITED_METHOD_CONTRACT;
// Initialize our "current state" BOOLs that remember if low or high allocation
// sampling is turned on
s_fHeapAllocLowEventEnabledNow = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_GCHEAPALLOCLOW_KEYWORD);
s_fHeapAllocHighEventEnabledNow = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_GCHEAPALLOCHIGH_KEYWORD);
// Snapshot the current state of the object allocated keyword (on startup), and rely
// on this snapshot for the rest of the process run. Since these events require the
// slow alloc JIT helper to be enabled, and that can only be done on startup, we
// remember in this BOOL that we did so, so that we can prevent the object allocated
// event from being fired if the fast allocation helper were enabled but had to
// degrade down to the slow helper (e.g., thread ran over its allocation limit). This
// keeps things consistent.
s_fHeapAllocEventEnabledOnStartup = (s_fHeapAllocLowEventEnabledNow || s_fHeapAllocHighEventEnabledNow);
if (s_fHeapAllocEventEnabledOnStartup)
{
// Determine if a COMPLUS env var is overriding the frequency for the sampled
// object allocated events
// Config value intentionally typed as string, b/c DWORD intepretation is hard-coded
// to hex, which is not what the user would expect. This way I can force the
// conversion to use decimal.
NewArrayHolder<WCHAR> wszCustomObjectAllocationEventsPerTypePerSec(NULL);
if (FAILED(CLRConfig::GetConfigValue(
CLRConfig::UNSUPPORTED_ETW_ObjectAllocationEventsPerTypePerSec,
&wszCustomObjectAllocationEventsPerTypePerSec)) ||
(wszCustomObjectAllocationEventsPerTypePerSec == NULL))
{
return;
}
LPWSTR endPtr;
DWORD dwCustomObjectAllocationEventsPerTypePerSec = wcstoul(
wszCustomObjectAllocationEventsPerTypePerSec,
&endPtr,
10 // Base 10 conversion
);
if (dwCustomObjectAllocationEventsPerTypePerSec == ULONG_MAX)
dwCustomObjectAllocationEventsPerTypePerSec = 0;
if (dwCustomObjectAllocationEventsPerTypePerSec != 0)
{
// MsBetweenEvents = (1000 ms/sec) / (custom desired events/sec)
s_nCustomMsBetweenEvents = 1000 / dwCustomObjectAllocationEventsPerTypePerSec;
}
}
}
//---------------------------------------------------------------------------------------
//
// Update object allocation sampling frequency and / or Type hash table contents based
// on what keywords were changed.
//
// static
void ETW::TypeSystemLog::OnKeywordsChanged()
{
LIMITED_METHOD_CONTRACT;
// If the desired frequencey for the GCSampledObjectAllocation events has changed,
// update our state.
s_fHeapAllocLowEventEnabledNow = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_GCHEAPALLOCLOW_KEYWORD);
s_fHeapAllocHighEventEnabledNow = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_GCHEAPALLOCHIGH_KEYWORD);
// FUTURE: Would be nice here to log an error event if (s_fHeapAllocLowEventEnabledNow ||
// s_fHeapAllocHighEventEnabledNow), but !s_fHeapAllocEventEnabledOnStartup
// If the type events should be turned off, eliminate the hash tables that tracked
// which types were logged. (If type events are turned back on later, we'll re-log
// them all as we encounter them.) Note that all we can really test for is that the
// Types keyword on the runtime provider is off. Not necessarily that it was on and
// was just turned off with this request. But either way, TypeSystemLog can handle it
// because it is extremely smart.
if (!ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, TRACE_LEVEL_INFORMATION, CLR_TYPE_KEYWORD))
OnTypesKeywordTurnedOff();
}
//---------------------------------------------------------------------------------------
//
// Based on keywords alone, determine the what the default sampling rate should be for
// object allocation events. (This function does not consider any COMPLUS overrides for
// the sampling rate.)
//
// static
int ETW::TypeSystemLog::GetDefaultMsBetweenEvents()
{
LIMITED_METHOD_CONTRACT;
// We should only get here if the allocation event is enabled. In spirit, this assert
// is correct, but a race could cause the assert to fire (if someone toggled the
// event off after we decided that the event was on and we started down the path of
// calculating statistics to fire the event). In such a case we'll end up returning
// k_nDefaultMsBetweenEventsLow below, but next time we won't get here as we'll know
// early enough not to fire the event.
//_ASSERTE(IsHeapAllocEventEnabled());
// MsBetweenEvents = (1000 ms/sec) / (desired events/sec)
const int k_nDefaultMsBetweenEventsHigh = 1000 / 100; // 100 events per type per sec
const int k_nDefaultMsBetweenEventsLow = 1000 / 5; // 5 events per type per sec
// If both are set, High takes precedence
if (s_fHeapAllocHighEventEnabledNow)
{
return k_nDefaultMsBetweenEventsHigh;
}
return k_nDefaultMsBetweenEventsLow;
}
//---------------------------------------------------------------------------------------
//
// Use this to decide whether to fire the object allocation event
//
// Return Value:
// nonzero iff we should fire the event.
//
// static
BOOL ETW::TypeSystemLog::IsHeapAllocEventEnabled()
{
LIMITED_METHOD_CONTRACT;
return
// Only fire the event if it was enabled at startup (and thus the slow-JIT new
// helper is used in all cases)
s_fHeapAllocEventEnabledOnStartup &&
// AND a keyword is still enabled. (Thus people can turn off the event
// whenever they want; but they cannot turn it on unless it was also on at startup.)
(s_fHeapAllocHighEventEnabledNow || s_fHeapAllocLowEventEnabledNow);
}
//---------------------------------------------------------------------------------------
//
// Helper that adds (or updates) the TypeLoggingInfo inside the inner hash table passed
// in.
//
// Arguments:
// * pLoggedTypesFromModule - Inner hash table to update
// * pTypeLoggingInfo - TypeLoggingInfo to store
//
// Return Value:
// nonzero iff the add/replace was successful.
//
// static
BOOL ETW::TypeSystemLog::AddOrReplaceTypeLoggingInfo(ETW::LoggedTypesFromModule * pLoggedTypesFromModule, const ETW::TypeLoggingInfo * pTypeLoggingInfo)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(pLoggedTypesFromModule != NULL);
BOOL fSucceeded = FALSE;
EX_TRY
{
pLoggedTypesFromModule->loggedTypesFromModuleHash.AddOrReplace(*pTypeLoggingInfo);
fSucceeded = TRUE;
}
EX_CATCH
{
fSucceeded = FALSE;
}
EX_END_CATCH(RethrowCorruptingExceptions);
return fSucceeded;
}
//---------------------------------------------------------------------------------------
//
// Records stats about the object's allocation, and determines based on those stats whether
// to fires the high / low frequency GCSampledObjectAllocation ETW event
//
// Arguments:
// * pObject - Allocated object to log
// * th - TypeHandle for the object
//
// static
void ETW::TypeSystemLog::SendObjectAllocatedEvent(Object * pObject)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
}
CONTRACTL_END;
// No-op if the appropriate keywords were not enabled on startup (or we're not yet
// started up)
if (!s_fHeapAllocEventEnabledOnStartup || !g_fEEStarted)
return;
TypeHandle th = pObject->GetTypeHandle();
SIZE_T size = pObject->GetSize();
if (size < MIN_OBJECT_SIZE)
{
size = PtrAlign(size);
}
SIZE_T nTotalSizeForTypeSample = size;
DWORD dwTickNow = GetTickCount();
DWORD dwObjectCountForTypeSample = 0;
// Get stats for type
TypeLoggingInfo typeLoggingInfo(NULL);
LoggedTypesFromModule * pLoggedTypesFromModule = NULL;
BOOL fCreatedNew = FALSE;
typeLoggingInfo = LookupOrCreateTypeLoggingInfo(th, &fCreatedNew, &pLoggedTypesFromModule);
if (typeLoggingInfo.th.IsNull())
return;
// Update stats with current allocation
typeLoggingInfo.dwAllocsSkippedForSample++;
typeLoggingInfo.cbIgnoredSizeForSample += size;
// If both the high and low verbosity keywords are enabled, log all allocations.
if (!(s_fHeapAllocHighEventEnabledNow && s_fHeapAllocLowEventEnabledNow))
{
// Get the number of threads so that we can scale the per-thread sampling data.
// NOTE: We don't do this while holding the thread store lock, so this may not be perfect,
// but it will be close enough.
LONG numThreads = ThreadStore::s_pThreadStore->ThreadCountInEE();
// This is our filter. If we should ignore this alloc, then record our updated
// our stats, and bail without sending the event. Note that we always log objects
// over 10K in size.
if (size < 10000 && typeLoggingInfo.dwAllocsSkippedForSample < (typeLoggingInfo.dwAllocsToSkipPerSample * numThreads))
{
// Update hash table's copy of type logging info with these values. It is not optimal that
// we're doing another hash table lookup here. Could instead have used LookupPtr()
// if it gave us back a non-const pointer, and then we could have updated in-place
AddOrReplaceTypeLoggingInfo(pLoggedTypesFromModule, &typeLoggingInfo);
if (fCreatedNew)
{
// Although we're skipping logging the allocation, we still need to log
// the type (so it's available for resolving future allocation events to
// their types).
//
// (See other call to LogTypeAndParametersIfNecessary further down for
// more comments.)
LogTypeAndParametersIfNecessary(
NULL,
th.AsTAddr(),
kTypeLogBehaviorAlwaysLogTopLevelType);
}
return;
}
// Based on observed allocation stats, adjust our sampling rate for this type
typeLoggingInfo.dwAllocCountInCurrentBucket += typeLoggingInfo.dwAllocsSkippedForSample;
int delta = (dwTickNow - typeLoggingInfo.dwTickOfCurrentTimeBucket) & 0x7FFFFFFF; // make wrap around work.
int nMinAllocPerMSec = typeLoggingInfo.dwAllocCountInCurrentBucket / 16 / numThreads; // This is an underestimation of the true rate.
if (delta >= 16 || (nMinAllocPerMSec > 2 && nMinAllocPerMSec > typeLoggingInfo.flAllocPerMSec * 1.5F))
{
float flNewAllocPerMSec = 0;
if (delta >= 16)
{
// This is the normal case, our allocation rate is under control with the current throttling.
flNewAllocPerMSec = ((float) typeLoggingInfo.dwAllocCountInCurrentBucket) / delta;
// Do a exponential decay window that is 5 * max(16, AllocationInterval)
typeLoggingInfo.flAllocPerMSec = 0.8F * typeLoggingInfo.flAllocPerMSec + 0.2F * flNewAllocPerMSec;
typeLoggingInfo.dwTickOfCurrentTimeBucket = dwTickNow;
typeLoggingInfo.dwAllocCountInCurrentBucket = 0;
}
else
{
flNewAllocPerMSec = (float) nMinAllocPerMSec;
// This means the second clause above is true, which means our sampling rate is too low
// so we need to throttle quickly.
typeLoggingInfo.flAllocPerMSec = flNewAllocPerMSec;
}
// Obey the desired sampling rate, but don't ignore > 1000 allocations per second
// per type
int nDesiredMsBetweenEvents = (s_nCustomMsBetweenEvents == 0) ? GetDefaultMsBetweenEvents() : s_nCustomMsBetweenEvents;
typeLoggingInfo.dwAllocsToSkipPerSample = min((int) (typeLoggingInfo.flAllocPerMSec * nDesiredMsBetweenEvents), 1000);
if (typeLoggingInfo.dwAllocsToSkipPerSample == 1)
typeLoggingInfo.dwAllocsToSkipPerSample = 0;
}
}
// We're logging this sample, so save the values we need into locals, and reset
// our counts for the next sample.
nTotalSizeForTypeSample = typeLoggingInfo.cbIgnoredSizeForSample;
dwObjectCountForTypeSample = typeLoggingInfo.dwAllocsSkippedForSample;
typeLoggingInfo.cbIgnoredSizeForSample = 0;
typeLoggingInfo.dwAllocsSkippedForSample = 0;
// Save updated stats into hash table
if (!AddOrReplaceTypeLoggingInfo(pLoggedTypesFromModule, &typeLoggingInfo))
{
return;
}
// While we're still holding the crst, optionally log any relevant Types now (we may need
// to reconsult the hash in here if there are any type parameters, though we can
// optimize and NOT consult the hash for th itself).
if (fCreatedNew)
{
// We were the ones to add the Type to the hash. So it wasn't there before,
// which means it hasn't been logged yet.
LogTypeAndParametersIfNecessary(
// No BulkTypeEventLogger, as we're not batching during a GC heap walk
NULL,
th.AsTAddr(),
// We've determined the type is not yet logged, so no need to check
kTypeLogBehaviorAlwaysLogTopLevelType);
}
// Now log the allocation
if (s_fHeapAllocHighEventEnabledNow)
{
FireEtwGCSampledObjectAllocationHigh(pObject, (LPVOID) th.AsTAddr(), dwObjectCountForTypeSample, nTotalSizeForTypeSample, GetClrInstanceId());
}
else
{
FireEtwGCSampledObjectAllocationLow(pObject, (LPVOID) th.AsTAddr(), dwObjectCountForTypeSample, nTotalSizeForTypeSample, GetClrInstanceId());
}
}
//---------------------------------------------------------------------------------------
//
// Accessor for global hash table crst
//
// Return Value:
// global hash table crst
//
// static
CrstBase * ETW::TypeSystemLog::GetHashCrst()
{
LIMITED_METHOD_CONTRACT;
return &AllLoggedTypes::s_cs;
}
//---------------------------------------------------------------------------------------
//
// Outermost level of ETW-type-logging. Clients outside eventtrace.cpp call this to log
// a TypeHandle and (recursively) its type parameters when present. This guy then calls
// into the appropriate BulkTypeEventLogger to do the batching and logging
//
// Arguments:
// * pBulkTypeEventLogger - If our caller is keeping track of batched types, it
// passes this to us so we can use it to batch the current type (GC heap walk
// does this). If this is NULL, no batching is going on (e.g., we're called on
// object allocation, not a GC heal walk), in which case we create our own
// temporary BulkTypeEventLogger.
// * thAsAddr - TypeHandle to batch
// * typeLogBehavior - Optimization to tell us we don't need to enter the
// TypeSystemLog's crst, as the TypeSystemLog's hash table is already protected
// by a prior acquisition of the crst by our caller. (Or that we don't even
// need to check the hash in the first place.)
//
// static
VOID ETW::TypeSystemLog::LogTypeAndParametersIfNecessary(BulkTypeEventLogger * pLogger, ULONGLONG thAsAddr, TypeLogBehavior typeLogBehavior)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
// LogTypeAndParameters locks, and we take our own lock if typeLogBehavior says to
CAN_TAKE_LOCK;
}
CONTRACTL_END;
if (!ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_TYPE_KEYWORD))
{
return;
}
TypeHandle th = TypeHandle::FromTAddr((TADDR) thAsAddr);
if (!th.IsRestored())
{
return;
}
// Check to see if we've already logged this type. If so, bail immediately.
// Otherwise, mark that it's getting logged (by adding it to the hash), and fall
// through to the logging code below. If caller doesn't care, then don't even
// check; just log the type
BOOL fShouldLogType = ((typeLogBehavior == kTypeLogBehaviorAlwaysLog) ||
(typeLogBehavior == kTypeLogBehaviorAlwaysLogTopLevelType)) ?
TRUE :
ShouldLogType(th);
if (!fShouldLogType)
return;
if (pLogger == NULL)
{
// We're not batching this type against previous types (e.g., we're being called
// on object allocate instead of a GC heap walk). So create a temporary logger
// on the stack. If there are generic parameters that need to be logged, then
// at least they'll get batched together with the type
BulkTypeEventLogger logger;
logger.LogTypeAndParameters(thAsAddr, typeLogBehavior);
// Since this logger isn't being used to batch anything else, flush what we have
logger.FireBulkTypeEvent();
}
else
{
// We are batching this type with others (e.g., we're being called at the end of
// a GC on a heap walk). So use the logger our caller set up for us.
pLogger->LogTypeAndParameters(thAsAddr, typeLogBehavior);
}
}
//---------------------------------------------------------------------------------------
//
// Ask hash table if we've already logged the type, without first acquiring the lock
// (our caller already did this). As a side-effect, a TypeLoggingInfo will be created
// for this type (so future calls to this function will return FALSE to avoid dupe type
// logging).
//
// Arguments:
// pth - TypeHandle to query
//
// Return Value:
// nonzero iff type should be logged (i.e., not previously logged)
//
// static
BOOL ETW::TypeSystemLog::ShouldLogType(TypeHandle th)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
// Check to see if TypeLoggingInfo exists yet for th. If not, creates one and
// adds it to the hash.
BOOL fCreatedNew = FALSE;
// When we have a thread context, default to calling the API that requires one which
// reduces the cost of locking.
if (GetThread() != NULL)
{
LookupOrCreateTypeLoggingInfo(th, &fCreatedNew);
}
else
{
AddTypeToGlobalCacheIfNotExists(th, &fCreatedNew);
}
// Return whether we had to create the TypeLoggingInfo (indicating it was not yet in
// the hash, and thus that we hadn't yet logged the type).
return fCreatedNew;
}
//---------------------------------------------------------------------------------------
//
// Helper that returns (creating if necessary) the TypeLoggingInfo in the hash table
// corresponding with the specified TypeHandle
//
// Arguments:
// * th - Key to lookup the TypeLoggingInfo
// * pfCreatedNew - [out] Points to nonzero iff a new TypeLoggingInfo was created
// (i.e., none existed yet in the hash for th).
// * ppLoggedTypesFromModule - [out] Points to the inner hash that was used to do
// the lookup. (An otpimization so the caller doesn't have to find this again,
// if it needs to do further operations on it.)
//
// Return Value:
// TypeLoggingInfo found or created.
//
//
// static
ETW::TypeLoggingInfo ETW::TypeSystemLog::LookupOrCreateTypeLoggingInfo(TypeHandle th, BOOL * pfCreatedNew, LoggedTypesFromModule ** ppLoggedTypesFromModule /* = NULL */)
{
//LIMITED_METHOD_CONTRACT;
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(pfCreatedNew != NULL);
if (ppLoggedTypesFromModule != NULL)
{
*ppLoggedTypesFromModule = NULL;
}
BOOL fSucceeded = FALSE;
Thread *pThread = GetThread();
// Compare the thread local epoch value against the global epoch.
// If the epoch has changed, dump the thread local state and start over.
AllLoggedTypes * pThreadAllLoggedTypes = pThread->GetAllocationSamplingTable();
if((pThreadAllLoggedTypes != NULL) && (pThreadAllLoggedTypes->nEpoch != s_nEpoch))
{
// Set the type hash pointer on the thread to NULL.
pThread->SetAllocationSamplingTable(NULL);
// DeleteTypeHashNoLock will set pThreadAllLoggedTypes to NULL
DeleteTypeHashNoLock(&pThreadAllLoggedTypes);
}
// Create the thread local state if it doesn't exist.
if (pThreadAllLoggedTypes == NULL)
{
pThreadAllLoggedTypes = new (nothrow) AllLoggedTypes;
if (pThreadAllLoggedTypes == NULL)
{
// out of memory. Bail on ETW stuff
*pfCreatedNew = FALSE;
return TypeLoggingInfo(NULL);
}
// Set the epoch so we know we can track when changes to global state occur.
pThreadAllLoggedTypes->nEpoch = s_nEpoch;
// Save the thread local state to the thread.
pThread->SetAllocationSamplingTable(pThreadAllLoggedTypes);
}
BOOL addTypeToGlobalList = FALSE;
// Step 1: go from LoaderModule to hash of types.
Module * pLoaderModule = th.GetLoaderModule();
_ASSERTE(pLoaderModule != NULL);
LoggedTypesFromModule * pLoggedTypesFromModule = pThreadAllLoggedTypes->allLoggedTypesHash.Lookup(pLoaderModule);
if (pLoggedTypesFromModule == NULL)
{
addTypeToGlobalList = TRUE;
pLoggedTypesFromModule = new (nothrow) LoggedTypesFromModule(pLoaderModule);
if (pLoggedTypesFromModule == NULL)
{
// out of memory. Bail on ETW stuff
*pfCreatedNew = FALSE;
return TypeLoggingInfo(NULL);
}
fSucceeded = FALSE;
EX_TRY
{
pThreadAllLoggedTypes->allLoggedTypesHash.Add(pLoggedTypesFromModule);
fSucceeded = TRUE;
}
EX_CATCH
{
fSucceeded = FALSE;
}
EX_END_CATCH(RethrowCorruptingExceptions);
if (!fSucceeded)
{
*pfCreatedNew = FALSE;
return TypeLoggingInfo(NULL);
}
}
if (ppLoggedTypesFromModule != NULL)
{
*ppLoggedTypesFromModule = pLoggedTypesFromModule;
}
// Step 2: From hash of types, see if our TypeHandle is there already
TypeLoggingInfo typeLoggingInfoPreexisting = pLoggedTypesFromModule->loggedTypesFromModuleHash.Lookup(th);
if (!typeLoggingInfoPreexisting.th.IsNull())
{
// Type is already hashed, so it's already logged, so we don't need to
// log it again.
*pfCreatedNew = FALSE;
return typeLoggingInfoPreexisting;
}
// We haven't logged this type, so we need to continue with this function to
// log it below. Add it to the hash table first so any recursive calls will
// see that this type is already being taken care of
addTypeToGlobalList = TRUE;
fSucceeded = FALSE;
TypeLoggingInfo typeLoggingInfoNew(th);
EX_TRY
{
pLoggedTypesFromModule->loggedTypesFromModuleHash.Add(typeLoggingInfoNew);
fSucceeded = TRUE;
}
EX_CATCH
{
fSucceeded = FALSE;
}
EX_END_CATCH(RethrowCorruptingExceptions);
if (!fSucceeded)
{
*pfCreatedNew = FALSE;
return TypeLoggingInfo(NULL);
}
// This is the first time that we've seen this type on this thread, so we should attempt to
// add it to the global list.
if(!AddTypeToGlobalCacheIfNotExists(th, pfCreatedNew))
{
// out of memory or ETW has been disabled. Bail on ETW stuff
*pfCreatedNew = FALSE;
return TypeLoggingInfo(NULL);
}
return typeLoggingInfoNew;
}
//---------------------------------------------------------------------------------------
//
// Helper that creates a Type entry in the global type logging cache if one doesn't
// already exist.
//
// Arguments:
// * th - Key to lookup or create
//
// Return Value:
// TRUE if the type needed to be added to the cache.
//
//
// static
BOOL ETW::TypeSystemLog::AddTypeToGlobalCacheIfNotExists(TypeHandle th, BOOL * pfCreatedNew)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
BOOL fSucceeded = FALSE;
{
CrstHolder _crst(GetHashCrst());
// Check if ETW is enabled, and if not, bail here.
// We do this inside of the lock to ensure that we don't immediately
// re-allocate the global type hash after it has been cleaned up.
if (!ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_TYPE_KEYWORD))
{
*pfCreatedNew = FALSE;
return fSucceeded;
}
if (s_pAllLoggedTypes == NULL)
{
s_pAllLoggedTypes = new (nothrow) AllLoggedTypes;
if (s_pAllLoggedTypes == NULL)
{
// out of memory. Bail on ETW stuff
*pfCreatedNew = FALSE;
return fSucceeded;
}
}
// Step 1: go from LoaderModule to hash of types.
Module * pLoaderModule = th.GetLoaderModule();
_ASSERTE(pLoaderModule != NULL);
LoggedTypesFromModule * pLoggedTypesFromModule = s_pAllLoggedTypes->allLoggedTypesHash.Lookup(pLoaderModule);
if (pLoggedTypesFromModule == NULL)
{
pLoggedTypesFromModule = new (nothrow) LoggedTypesFromModule(pLoaderModule);
if (pLoggedTypesFromModule == NULL)
{
// out of memory. Bail on ETW stuff
*pfCreatedNew = FALSE;
return fSucceeded;
}
fSucceeded = FALSE;
EX_TRY
{
s_pAllLoggedTypes->allLoggedTypesHash.Add(pLoggedTypesFromModule);
fSucceeded = TRUE;
}
EX_CATCH
{
fSucceeded = FALSE;
}
EX_END_CATCH(RethrowCorruptingExceptions);
if (!fSucceeded)
{
*pfCreatedNew = FALSE;
return fSucceeded;
}
}
// Step 2: From hash of types, see if our TypeHandle is there already
TypeLoggingInfo typeLoggingInfoPreexisting = pLoggedTypesFromModule->loggedTypesFromModuleHash.Lookup(th);
if (!typeLoggingInfoPreexisting.th.IsNull())
{
// Type is already hashed, so it's already logged, so we don't need to
// log it again.
*pfCreatedNew = FALSE;
return fSucceeded;
}
// We haven't logged this type, so we need to continue with this function to
// log it below. Add it to the hash table first so any recursive calls will
// see that this type is already being taken care of
fSucceeded = FALSE;
TypeLoggingInfo typeLoggingInfoNew(th);
EX_TRY
{
pLoggedTypesFromModule->loggedTypesFromModuleHash.Add(typeLoggingInfoNew);
fSucceeded = TRUE;
}
EX_CATCH
{
fSucceeded = FALSE;
}
EX_END_CATCH(RethrowCorruptingExceptions);
if (!fSucceeded)
{
*pfCreatedNew = FALSE;
return fSucceeded;
}
} // RELEASE: CrstHolder _crst(GetHashCrst());
*pfCreatedNew = TRUE;
return fSucceeded;
}
//---------------------------------------------------------------------------------------
//
// Called when we determine if a module was unloaded, so we can clear out that module's
// set of types from our hash table
//
// Arguments:
// pModule - Module getting unloaded
//
// static
VOID ETW::TypeSystemLog::OnModuleUnload(Module * pModule)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
// We don't need to do anything if allocation sampling is disabled.
if (!ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_TYPE_KEYWORD))
{
return;
}
LoggedTypesFromModule * pLoggedTypesFromModule = NULL;
{
CrstHolder _crst(GetHashCrst());
// We don't need to do anything if the global type hash doesn't contain any data.
if (s_pAllLoggedTypes == NULL)
return;
// Is there a TypesHash for this module?
pLoggedTypesFromModule = s_pAllLoggedTypes->allLoggedTypesHash.Lookup(pModule);
if (pLoggedTypesFromModule == NULL)
return;
// Remove TypesHash from master hash mapping modules to their TypesHash
s_pAllLoggedTypes->allLoggedTypesHash.Remove(pModule);
// Increment the epoch to signal the change to all threads.
s_nEpoch++;
}
// Destruct this TypesHash we just removed
delete pLoggedTypesFromModule;
pLoggedTypesFromModule = NULL;
}
//---------------------------------------------------------------------------------------
//
// Same semantics as DeleteTypeHash but assumes that the appropriate lock
// has already been acquired.
//
// static
VOID ETW::TypeSystemLog::DeleteTypeHashNoLock(AllLoggedTypes **ppAllLoggedTypes)
{
LIMITED_METHOD_CONTRACT;
if(ppAllLoggedTypes == NULL)
{
return;
}
AllLoggedTypes *pAllLoggedTypes = *ppAllLoggedTypes;
if(pAllLoggedTypes == NULL)
{
return;
}
// Destruct each of the per-module TypesHashes
AllLoggedTypesHash * pLoggedTypesHash = &pAllLoggedTypes->allLoggedTypesHash;
for (AllLoggedTypesHash::Iterator iter = pLoggedTypesHash->Begin();
iter != pLoggedTypesHash->End();
++iter)
{
LoggedTypesFromModule * pLoggedTypesFromModule = *iter;
delete pLoggedTypesFromModule;
}
// This causes the default ~AllLoggedTypes() to be called, and thus
// ~AllLoggedTypesHash() to be called
delete pAllLoggedTypes;
*ppAllLoggedTypes = NULL;
}
//---------------------------------------------------------------------------------------
//
// Called from shutdown to give us the opportunity to dump any sampled object allocation
// information before the process shuts down.
//
// static
VOID ETW::TypeSystemLog::FlushObjectAllocationEvents()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
// If logging is not enabled, then we don't need to do any work.
if (!(s_fHeapAllocLowEventEnabledNow || s_fHeapAllocHighEventEnabledNow))
{
return;
}
AllLoggedTypes * pThreadAllLoggedTypes = NULL;
Thread * pThread = NULL;
// Get the thread store lock.
ThreadStoreLockHolder tsl;
// Iterate over each thread and log any un-logged allocations.
while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
{
pThreadAllLoggedTypes = pThread->GetAllocationSamplingTable();
if (pThreadAllLoggedTypes == NULL)
{
continue;
}
DWORD dwAllocsSkippedForSample;
SIZE_T cbIgnoredSizeForSample;
// Iterate over each module.
AllLoggedTypesHash * pLoggedTypesHash = &pThreadAllLoggedTypes->allLoggedTypesHash;
for (AllLoggedTypesHash::Iterator iter = pLoggedTypesHash->Begin();
iter != pLoggedTypesHash->End();
++iter)
{
// Iterate over each type in the module.
LoggedTypesFromModule * pLoggedTypesFromModule = *iter;
LoggedTypesFromModuleHash * pLoggedTypesFromModuleHash = &pLoggedTypesFromModule->loggedTypesFromModuleHash;
for (LoggedTypesFromModuleHash::Iterator typeIter = pLoggedTypesFromModuleHash->Begin();
typeIter != pLoggedTypesFromModuleHash->End();
++typeIter)
{
dwAllocsSkippedForSample = typeIter->dwAllocsSkippedForSample;
cbIgnoredSizeForSample = typeIter->cbIgnoredSizeForSample;
// Only write the event if there were allocations that have not been logged.
if (dwAllocsSkippedForSample > 0 || cbIgnoredSizeForSample > 0)
{
// Write the event based on which keyword was specified when ETW was configured.
if (s_fHeapAllocHighEventEnabledNow)
{
FireEtwGCSampledObjectAllocationHigh(NULL, (LPVOID) typeIter->th.AsTAddr(), dwAllocsSkippedForSample, cbIgnoredSizeForSample, GetClrInstanceId());
}
else
{
FireEtwGCSampledObjectAllocationLow(NULL, (LPVOID) typeIter->th.AsTAddr(), dwAllocsSkippedForSample, cbIgnoredSizeForSample, GetClrInstanceId());
}
}
}
}
}
}
//---------------------------------------------------------------------------------------
//
// Whenever we detect that the Types keyword is off, this gets called. This eliminates the
// global hash tables that tracked which types were logged (if the hash tables had been created
// previously). If type events are turned back on later, we'll re-log them all as we
// encounter them. Thread local hash tables are destroyed in the Cleanup method, which is
// called during GC to ensure that there aren't any races.
//
// static
VOID ETW::TypeSystemLog::OnTypesKeywordTurnedOff()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
// Take the global cache lock.
CrstHolder _crst(GetHashCrst());
// Clean-up the global TypeHash if necessary.
if (s_pAllLoggedTypes == NULL)
{
// Even if we don't increment the epoch, but we get into a situation where
// some per thread data has been allocated, it will be cleaned up during the
// next GC because we are guaranteed that s_nEpoch has been incremented at
// least once (to shutdown allocation sampling).
return;
}
// Destruct the global TypeHash
DeleteTypeHashNoLock(&s_pAllLoggedTypes);
// Increment the epoch to signal the change to all threads.
s_nEpoch++;
}
//---------------------------------------------------------------------------------------
//
// Clean-up thread local type hashes. This is called from within the GC to ensure that
// there are no races. All threads are suspended when this is called.
//
// static
VOID ETW::TypeSystemLog::Cleanup()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
// If allocation sampling is enabled, bail here so that we don't delete
// any of the thread local state.
if (ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_TYPE_KEYWORD))
{
return;
}
// If logging is disabled but the epoch has not been incremented,
// we haven't ever turned on allocation sampling, so there is nothing
// to clean-up.
if(s_nEpoch <= 0)
{
return;
}
// Iterate over each thread and destruct the per thread caches
AllLoggedTypes * pThreadAllLoggedTypes = NULL;
Thread * pThread = NULL;
while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
{
pThreadAllLoggedTypes = pThread->GetAllocationSamplingTable();
if(pThreadAllLoggedTypes == NULL)
{
continue;
}
// Destruct each of the thread local TypesHashes
DeleteTypeHashNoLock(&pThreadAllLoggedTypes);
// Set the thread type hash pointer to NULL
pThread->SetAllocationSamplingTable(NULL);
}
}
/****************************************************************************/
/* Called when ETW is turned ON on an existing process and ModuleRange events are to
be fired */
/****************************************************************************/
VOID ETW::EnumerationLog::ModuleRangeRundown()
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
} CONTRACTL_END;
EX_TRY
{
if (ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_PERFTRACK_PRIVATE_KEYWORD))
{
ETW::EnumerationLog::EnumerationHelper(NULL, NULL, ETW::EnumerationLog::EnumerationStructs::ModuleRangeLoadPrivate);
}
} EX_CATCH { } EX_END_CATCH(SwallowAllExceptions);
}
/****************************************************************************/
/* Called when ETW is turned ON on an existing process */
/****************************************************************************/
VOID ETW::EnumerationLog::StartRundown()
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
} CONTRACTL_END;
EX_TRY
{
BOOL bIsArmRundownEnabled = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_RUNDOWNAPPDOMAINRESOURCEMANAGEMENT_KEYWORD);
BOOL bIsPerfTrackRundownEnabled = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_RUNDOWNPERFTRACK_KEYWORD);
BOOL bIsThreadingRundownEnabled = ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_RUNDOWNTHREADING_KEYWORD);
if(ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
TRACE_LEVEL_INFORMATION,
CLR_RUNDOWNJIT_KEYWORD)
||
ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,