PageRenderTime 49ms CodeModel.GetById 17ms RepoModel.GetById 0ms app.codeStats 0ms

/base/Kernel/Singularity/GCTracing.cs

#
C# | 634 lines | 390 code | 157 blank | 87 comment | 51 complexity | a75b1a819e65eb459bc7a60b3df47b75 MD5 | raw file
  1. ////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // Microsoft Research Singularity
  4. //
  5. // Copyright (c) Microsoft Corporation. All rights reserved.
  6. //
  7. // File: GCTracing.cs
  8. //
  9. // Note: Provides logging facilities for the garbage collector.
  10. // This is intended to be used in both kernel and SIPs, SIPs
  11. // will get a private memory buffer for these entries, separated from the kernel log
  12. //
  13. using System;
  14. using System.Diagnostics;
  15. using System.Text;
  16. using System.Runtime.InteropServices;
  17. using System.Runtime.CompilerServices;
  18. using System.Threading;
  19. using Microsoft.Singularity;
  20. using Microsoft.Singularity.Isal;
  21. using System.GCs;
  22. using System.Collections;
  23. using Microsoft.Singularity.Memory;
  24. using Microsoft.Singularity.V1.Services;
  25. using Microsoft.Singularity.Eventing;
  26. namespace Microsoft.Singularity
  27. {
  28. [CLSCompliant(false)]
  29. [AccessedByRuntime("output to header - methods defined in GCTracing.cpp")]
  30. public class GCProfilerLogger : GCProfiler
  31. {
  32. [CLSCompliant(false)]
  33. public class GCTypeSource : EventSource {
  34. public static GCTypeSource Create(string sourceName, uint typeSize, ulong options) {
  35. uint qos = QualityOfService.PermanentEvents;
  36. if (options != 0) {
  37. qos |= QualityOfService.OOM_BreakOnRecycle;
  38. }
  39. EventingStorage storage = EventingStorage.CreateLocalStorage(qos, typeSize);
  40. if (storage == null) {
  41. return null;
  42. }
  43. GCTypeSource Logger = new GCTypeSource(sourceName,
  44. storage,
  45. ENABLE_ALL_MASK);
  46. if (Logger != null) {
  47. Logger.Register();
  48. }
  49. return Logger;
  50. }
  51. GCTypeSource(string sourceName, EventingStorage storage, uint controlFlags)
  52. :base(sourceName, storage, controlFlags) {}
  53. }
  54. [CLSCompliant(false)]
  55. public class GCEventSource : EventSource {
  56. public static GCEventSource Create(string sourceName, uint typeSize, ulong options) {
  57. uint qos = QualityOfService.RecyclableEvents;
  58. if (options != 0) {
  59. qos |= QualityOfService.OOM_BreakOnRecycle;
  60. }
  61. EventingStorage storage = EventingStorage.CreateLocalStorage(qos, typeSize);
  62. if (storage == null) {
  63. return null;
  64. }
  65. GCEventSource Logger = new GCEventSource(sourceName,
  66. storage,
  67. ENABLE_ALL_MASK);
  68. if (Logger != null) {
  69. Logger.Register();
  70. }
  71. return Logger;
  72. }
  73. GCEventSource(string sourceName, EventingStorage storage, uint controlFlags)
  74. :base(sourceName, storage, controlFlags) {}
  75. }
  76. [CLSCompliant(false)]
  77. [AccessedByRuntime("output to header - methods defined in GCTracing.cpp")]
  78. public class ProfilerBuffer
  79. {
  80. private const int cycleGranularity = 5000000;
  81. public Thread OwningThread;
  82. private ulong lastCycle;
  83. private ulong firstCycle;
  84. #if LEGACY_GCTRACING
  85. [AccessedByRuntime("output to header : defined in GCTracing.cpp")]
  86. [MethodImpl(MethodImplOptions.InternalCall)]
  87. [StackBound(256)]
  88. [NoHeapAllocation]
  89. public static extern unsafe void SetupBuffer(byte * memoryBuffer, ulong bufferSize);
  90. #endif // LEGACY_GCTRACING
  91. //
  92. // ABI entries
  93. //
  94. [AccessedByRuntime("output to header : defined in GCTracing.cpp")]
  95. [MethodImpl(MethodImplOptions.InternalCall)]
  96. [StackBound(256)]
  97. [NoHeapAllocation]
  98. public static extern unsafe void LogGenerations(int maxGeneration, int * generations);
  99. [AccessedByRuntime("output to header : defined in GCTracing.cpp")]
  100. [MethodImpl(MethodImplOptions.InternalCall)]
  101. [StackBound(256)]
  102. [NoHeapAllocation]
  103. public static extern unsafe void LogInterval(ulong Delta);
  104. [AccessedByRuntime("output to header : defined in GCTracing.cpp")]
  105. [MethodImpl(MethodImplOptions.InternalCall)]
  106. [StackBound(256)]
  107. [NoHeapAllocation]
  108. public static extern unsafe void LogAllocation(int threadId, UIntPtr objectAddress, uint stkNo);
  109. [AccessedByRuntime("output to header : defined in GCTracing.cpp")]
  110. [MethodImpl(MethodImplOptions.InternalCall)]
  111. [StackBound(256)]
  112. [NoHeapAllocation]
  113. public static extern unsafe void LogObject(uint ArraySize, UIntPtr * objectParameters);
  114. [AccessedByRuntime("output to header : defined in GCTracing.cpp")]
  115. [MethodImpl(MethodImplOptions.InternalCall)]
  116. [StackBound(256)]
  117. [NoHeapAllocation]
  118. public static extern unsafe void LogRoots(uint ArraySize, UIntPtr * objectRoots);
  119. [AccessedByRuntime("output to header : defined in GCTracing.cpp")]
  120. [MethodImpl(MethodImplOptions.InternalCall)]
  121. [StackBound(256)]
  122. [NoHeapAllocation]
  123. public static extern unsafe void LogType(uint typeId, string typeName);
  124. [AccessedByRuntime("output to header : defined in GCTracing.cpp")]
  125. [MethodImpl(MethodImplOptions.InternalCall)]
  126. [StackBound(256)]
  127. [NoHeapAllocation]
  128. public static extern unsafe void LogStack(uint stackNo, uint typeNo, UIntPtr size, uint stackSize, uint * funcIDs); // 'n'
  129. [AccessedByRuntime("output to header : defined in GCTracing.cpp")]
  130. [MethodImpl(MethodImplOptions.InternalCall)]
  131. [StackBound(256)]
  132. [NoHeapAllocation]
  133. public static extern unsafe void LogFunction(uint funcNo, UIntPtr eip);
  134. public ProfilerBuffer()
  135. {
  136. firstCycle = Processor.CycleCount;
  137. }
  138. //
  139. // Local definitions
  140. //
  141. public void LogTick() {
  142. // Log an entry describing the delta in cycles
  143. ulong nowCycle = Processor.CycleCount;
  144. // Emit a tick-count record only if it's been a while since the last one.
  145. if (nowCycle > lastCycle + cycleGranularity) {
  146. lastCycle = nowCycle;
  147. ProfilerBuffer.LogInterval((ulong) ((lastCycle - firstCycle) / 1000000));
  148. }
  149. }
  150. }
  151. //
  152. // Local variables
  153. //
  154. private const int maxGeneration = 3; // CLR Profiler only handles this many
  155. private int[] generations;
  156. ProfilerBuffer Buffer;
  157. private Hashtable typeTable; // hash of types->ids
  158. private uint nextTypeNo; // hand out IDs monotonically
  159. private Hashtable stackTable; // hash of stacks->ids
  160. private uint nextStackNo;
  161. private const int stackSize = 16; // increase for bigger / slower stack traces
  162. private UIntPtr[] stackEips;
  163. private uint[] stackNos;
  164. private Hashtable funcTable; // hash of Eips->ids
  165. private uint nextFuncNo;
  166. private uint[] functionsIDs;
  167. private Thread recurseThread; // prevent recursion when the profiler allocates
  168. private SpinLock allocationLock;
  169. private bool enabled; // profiling is enabled?
  170. private ulong options;
  171. private const uint tempBufferSize = 1024;
  172. private UIntPtr[] tempGCBuffer;
  173. private uint tempGCBufferEntries;
  174. #if LEGACY_GCTRACING
  175. private ulong bufferSize;
  176. private static unsafe byte * bufferMemory;
  177. #else // LEGACY_GCTRACING
  178. private GCTypeSource typeSource;
  179. private GCEventSource EventSource;
  180. [AccessedByRuntime("referenced from Monitoring.cpp")]
  181. private static unsafe UIntPtr TypeStorageHandle = 0;
  182. [AccessedByRuntime("referenced from Monitoring.cpp")]
  183. private static unsafe UIntPtr StorageHandle = 0;
  184. #endif // LEGACY_GCTRACING
  185. internal GCProfilerLogger()
  186. {
  187. #if SINGULARITY_KERNEL
  188. // Initialize spinlock
  189. allocationLock = new SpinLock(SpinLock.Types.GCTracing);
  190. #endif
  191. }
  192. internal static GCProfilerLogger CurrentProfiler = null;
  193. // The public API for a typical client app.
  194. static public void StartProfiling()
  195. {
  196. // Query the diagnosis service whether the GC profiling is enabled
  197. // This allows setting from the kernel debugger the buffer sizes
  198. // for both kernel and SIP profiling.
  199. // Note, these are controlled independently, the implementation
  200. // of GCProfileSettings is different between kernel and SIP
  201. if (CurrentProfiler != null) {
  202. //
  203. // The profiler has been set already. No second attempt is allowed
  204. //
  205. return;
  206. }
  207. unsafe{
  208. int result;
  209. ulong defaultMemorySize = 0;
  210. ulong Options = 0;
  211. result = DiagnosisService.GCProfileSettingsImpl(
  212. out defaultMemorySize,
  213. out Options
  214. );
  215. if ((result == 0) && (defaultMemorySize > 0)) {
  216. CurrentProfiler = new GCProfilerLogger();
  217. CurrentProfiler.Initialize(defaultMemorySize, Options);
  218. GC.SetProfiler(CurrentProfiler);
  219. DebugStub.WriteLine("GC Profiling started");
  220. }
  221. }
  222. }
  223. // Initialization, prior to attempting to set this profiler into the GC. It's
  224. // inappropriate to do this stuff inside a constructor.
  225. internal void Initialize(ulong Size, ulong Flags)
  226. {
  227. options = Flags;
  228. typeTable = new Hashtable();
  229. stackTable = new Hashtable();
  230. funcTable = new Hashtable();
  231. stackEips = new UIntPtr[stackSize];
  232. stackNos = new uint[stackSize];
  233. generations = new int[maxGeneration];
  234. functionsIDs = new uint[stackSize];
  235. Buffer = new ProfilerBuffer();
  236. tempGCBuffer = new UIntPtr[tempBufferSize];
  237. tempGCBufferEntries = 0;
  238. #if LEGACY_GCTRACING
  239. bufferSize = Size;
  240. unsafe{
  241. // Allocate the memory indicated as parameter, as nonGC
  242. #if SINGULARITY_KERNEL
  243. UIntPtr pages = Microsoft.Singularity.Memory.MemoryManager.PagesFromBytes(bufferSize);
  244. bufferMemory = (byte *)Microsoft.Singularity.Memory.MemoryManager.KernelAllocate(
  245. pages, null, 0, System.GCs.PageType.NonGC).ToPointer();
  246. #else // !SINGULARITY_KERNEL
  247. bufferMemory = (byte *)PageTableService.Allocate(bufferSize);
  248. #endif //SINGULARITY_KERNEL
  249. if (bufferMemory != null) {
  250. // When we set this, we are no longer single-threaded:
  251. ProfilerBuffer.SetupBuffer(bufferMemory, bufferSize);
  252. this.enabled = true;
  253. }
  254. }
  255. #else // LEGACY_GCTRACING
  256. typeSource = GCTypeSource.Create("GC.TypeDefinitions", (uint)Size, options);
  257. EventSource = GCEventSource.Create("GC.Events", (uint)Size, options);
  258. if ((typeSource == null) || (EventSource == null)) {
  259. typeSource = null;
  260. EventSource = null;
  261. this.enabled = false;
  262. } else {
  263. TypeStorageHandle = typeSource.Storage.GetHandle();
  264. StorageHandle = EventSource.Storage.GetHandle();
  265. this.enabled = true;
  266. }
  267. #endif // LEGACY_GCTRACING
  268. }
  269. // the API by which our base class calls us
  270. protected override void Shutdown()
  271. {
  272. // This is presumed to be called when the process is single-threaded, since
  273. // the entire GC heap is shutting down.
  274. if (enabled) {
  275. enabled = false;
  276. }
  277. }
  278. protected override void PreGC(int generation)
  279. {
  280. try {
  281. // Take ownership of the buffer to prevent mutating threads from
  282. // interleaving with us.
  283. DebugStub.Assert(Buffer.OwningThread == null);
  284. Buffer.OwningThread = Thread.CurrentThread;
  285. Buffer.LogTick();
  286. if (generation >= maxGeneration) {
  287. generation = maxGeneration-1;
  288. }
  289. generations[generation]++;
  290. unsafe{
  291. fixed (int * ptr = &generations[0]) {
  292. ProfilerBuffer.LogGenerations(maxGeneration, ptr);
  293. }
  294. }
  295. }
  296. catch (Exception) {
  297. enabled = false;
  298. throw;
  299. }
  300. }
  301. // A GC has finished. The world is in a sane place, except that we might not
  302. // have started up all the mutator threads if this is a StopTheWorld collection.
  303. protected override void PostGC()
  304. {
  305. try {
  306. // emit the fact a GC has happened, including the state of the heap.
  307. // TODO: have another function to log the tick count here to estimate the
  308. // time spent in GC too.
  309. Buffer.LogTick();
  310. // We should have an empty buffer, meaning we completed logging from the
  311. // previous operation while entering this code.
  312. DebugStub.Assert(tempGCBufferEntries == 0);
  313. ScanRoots();
  314. unsafe{
  315. fixed (UIntPtr * ptr = &tempGCBuffer[0]) {
  316. ProfilerBuffer.LogRoots(tempGCBufferEntries, ptr);
  317. tempGCBufferEntries = 0;
  318. }
  319. }
  320. // Write all the reachability graph of the heap
  321. ScanObjects();
  322. // Once we have finished writing everything, we can allow mutator threads to
  323. // share access to the fileBuffer with their own consistent entries.
  324. DebugStub.Assert(Buffer.OwningThread == Thread.CurrentThread);
  325. Buffer.OwningThread = null;
  326. }
  327. catch (Exception) {
  328. enabled = false;
  329. throw;
  330. }
  331. }
  332. // In the list of roots, we have found another object
  333. protected override void ScanOneRoot(UIntPtr objectAddress)
  334. {
  335. if (objectAddress != 0) {
  336. //
  337. // If we filled in the temporary buffer, log it and resume
  338. // from the beginning.
  339. if (tempGCBufferEntries >= tempBufferSize) {
  340. unsafe {
  341. fixed (UIntPtr * ptr = &tempGCBuffer[0]) {
  342. ProfilerBuffer.LogRoots(tempGCBufferEntries, ptr);
  343. tempGCBufferEntries = 0;
  344. }
  345. }
  346. }
  347. tempGCBuffer[ tempGCBufferEntries++ ] = (UIntPtr)objectAddress;
  348. }
  349. }
  350. // In the heap reachability graph, we are starting to dump a new object
  351. protected override void StartScanOneObject(UIntPtr objectAddress, Type type, UIntPtr size)
  352. {
  353. DebugStub.Assert(tempGCBufferEntries == 0);
  354. tempGCBuffer[ tempGCBufferEntries++ ] = (UIntPtr)objectAddress;
  355. tempGCBuffer[ tempGCBufferEntries++ ] = (UIntPtr)GetTypeId(type);
  356. tempGCBuffer[ tempGCBufferEntries++ ] = (UIntPtr)size;
  357. }
  358. // This is one of the references that the current object holds.
  359. protected override void ScanOneObjectField(UIntPtr objectAddress)
  360. {
  361. if ((objectAddress != 0) && (tempGCBufferEntries < tempBufferSize)) {
  362. tempGCBuffer[ tempGCBufferEntries++ ] = (UIntPtr)objectAddress;
  363. }
  364. }
  365. // We are finished scanning one object
  366. protected override void EndScanOneObject()
  367. {
  368. unsafe{
  369. fixed (UIntPtr * ptr = &tempGCBuffer[0]) {
  370. ProfilerBuffer.LogObject(tempGCBufferEntries, ptr);
  371. tempGCBufferEntries = 0;
  372. }
  373. }
  374. }
  375. // Create a log entry for the allocation that just occurred on this thread.
  376. protected override void Allocation(UIntPtr objAddr, Type type, UIntPtr size)
  377. {
  378. bool iflag;
  379. // We cannot recurse inside an Allocation notification, or we will simply
  380. // blow the stack on the first entry. Also, we don't want to log allocations
  381. // that occur as a consequence of logging the state of the GC heap -- though
  382. // we could support that if we chose to.
  383. if (enabled &&
  384. recurseThread != Thread.CurrentThread && // recurse?
  385. Buffer.OwningThread != Thread.CurrentThread) { // GC logging?
  386. iflag = Processor.DisableLocalPreemption();
  387. allocationLock.Acquire();
  388. try {
  389. DebugStub.Assert(recurseThread == null);
  390. recurseThread = Thread.CurrentThread;
  391. Buffer.LogTick();
  392. uint stackSize = Isa.GetStackReturnAddresses(stackEips);
  393. uint stkNo = 0;
  394. if (stackSize > 0) {
  395. stkNo = GetStackId(type, size, stackEips, stackSize);
  396. }
  397. ProfilerBuffer.LogAllocation(Thread.CurrentThread.GetThreadId(), objAddr, stkNo);
  398. }
  399. finally {
  400. recurseThread = null;
  401. allocationLock.Release();
  402. Processor.RestoreLocalPreemption(iflag);
  403. }
  404. }
  405. }
  406. // The rest of our implementation details:
  407. private uint GetStackId(Type type, UIntPtr size, UIntPtr[] stackEips, uint stackSize)
  408. {
  409. // First make sure that we have a type record for the object being
  410. // instantiated at this stack.
  411. uint typeNo = GetTypeId(type);
  412. DebugStub.Assert(stackSize <= stackEips.Length);
  413. // Then, make sure we have a function record for each Eip in the stack. Of course
  414. // we don't know when a bunch of Eips map to different offsets in the same function.
  415. // So make a function for each unique Eip & fix it up in the post-processing.
  416. // Hopefully there aren't too many unique callsites in each method.
  417. ulong hash = typeNo; // perhaps "typeNo ^ size" ?
  418. for (int i = 0; i < stackSize; i++) {
  419. // Map the individual Eips into their corresponding functions
  420. stackNos[i] = GetFunctionId(stackEips[i]);
  421. hash = (hash << 11) + (hash ^ stackNos[i]);
  422. }
  423. // TODO: Note that we will statistically map some distinct stacks into the same
  424. // stack if they have the same hash.
  425. object o = stackTable[hash];
  426. if (o != null) {
  427. return (uint) o;
  428. }
  429. // It's a novel stack. Note that we embed the size into the stack, but we
  430. // don't include the size in the hash. There's a technique for sharing
  431. // prefixes of stacks that could be explored here to get more accurate profiles
  432. // without huge stack expansion.
  433. // TODO: consider the above.
  434. uint stackNo = nextStackNo++;
  435. // Stacks are emitted backwards, presumably to support common prefixes better.
  436. for (int i = (int)stackSize - 1; i >= 0; i--) {
  437. functionsIDs[stackSize - 1 - i] = stackNos[i];
  438. }
  439. unsafe{
  440. fixed (uint * ptr = &functionsIDs[0]) {
  441. ProfilerBuffer.LogStack(stackNo, typeNo, size, stackSize, ptr);
  442. }
  443. }
  444. stackTable[hash] = stackNo;
  445. return stackNo;
  446. }
  447. private uint GetFunctionId(UIntPtr eip)
  448. {
  449. // Given the EIP of a Function, make sure the function has been defined. Since we
  450. // don't have enough runtime information to map Eips to functions, we must rely on
  451. // post-processing.
  452. object o = funcTable[eip];
  453. if (o != null) {
  454. return (uint) o;
  455. }
  456. uint funcNo = nextFuncNo++;
  457. ProfilerBuffer.LogFunction(funcNo, eip);
  458. funcTable[eip] = funcNo;
  459. return funcNo;
  460. }
  461. private uint GetTypeId(Type type)
  462. {
  463. // Given a Type, make sure that it has been defined.
  464. object o = typeTable[type];
  465. if (o != null) {
  466. return (uint) o;
  467. }
  468. uint typeNo = nextTypeNo++;
  469. // Log this entry before putting it into the hashtable, where other threads might
  470. // see it. This means we may have duplicate conflicting entries for the same logical
  471. // type, but this is tolerated by the profiler.
  472. ProfilerBuffer.LogType(typeNo, type.FullName);
  473. typeTable[type] = typeNo;
  474. return typeNo;
  475. }
  476. }
  477. }