PageRenderTime 47ms CodeModel.GetById 14ms RepoModel.GetById 1ms app.codeStats 0ms

/src/qt/qtwebkit/Source/JavaScriptCore/heap/Heap.cpp

https://gitlab.com/x33n/phantomjs
C++ | 896 lines | 699 code | 143 blank | 54 comment | 67 complexity | 5ae52bd7e2e534f8d47d9a899a3eacd3 MD5 | raw file
  1. /*
  2. * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
  3. * Copyright (C) 2007 Eric Seidel <eric@webkit.org>
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  18. *
  19. */
  20. #include "config.h"
  21. #include "Heap.h"
  22. #include "CodeBlock.h"
  23. #include "ConservativeRoots.h"
  24. #include "CopiedSpace.h"
  25. #include "CopiedSpaceInlines.h"
  26. #include "CopyVisitorInlines.h"
  27. #include "GCActivityCallback.h"
  28. #include "HeapRootVisitor.h"
  29. #include "HeapStatistics.h"
  30. #include "IncrementalSweeper.h"
  31. #include "Interpreter.h"
  32. #include "VM.h"
  33. #include "JSGlobalObject.h"
  34. #include "JSLock.h"
  35. #include "JSONObject.h"
  36. #include "Operations.h"
  37. #include "Tracing.h"
  38. #include "UnlinkedCodeBlock.h"
  39. #include "WeakSetInlines.h"
  40. #include <algorithm>
  41. #include <wtf/RAMSize.h>
  42. #include <wtf/CurrentTime.h>
  43. using namespace std;
  44. using namespace JSC;
  45. namespace JSC {
  46. namespace {
  47. static const size_t largeHeapSize = 32 * MB; // About 1.5X the average webpage.
  48. static const size_t smallHeapSize = 1 * MB; // Matches the FastMalloc per-thread cache.
  49. #if ENABLE(GC_LOGGING)
  50. #if COMPILER(CLANG)
  51. #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
  52. _Pragma("clang diagnostic push") \
  53. _Pragma("clang diagnostic ignored \"-Wglobal-constructors\"") \
  54. _Pragma("clang diagnostic ignored \"-Wexit-time-destructors\"") \
  55. static type name arguments; \
  56. _Pragma("clang diagnostic pop")
  57. #else
  58. #define DEFINE_GC_LOGGING_GLOBAL(type, name, arguments) \
  59. static type name arguments;
  60. #endif // COMPILER(CLANG)
  61. struct GCTimer {
  62. GCTimer(const char* name)
  63. : m_time(0)
  64. , m_min(100000000)
  65. , m_max(0)
  66. , m_count(0)
  67. , m_name(name)
  68. {
  69. }
  70. ~GCTimer()
  71. {
  72. dataLogF("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000);
  73. }
  74. double m_time;
  75. double m_min;
  76. double m_max;
  77. size_t m_count;
  78. const char* m_name;
  79. };
  80. struct GCTimerScope {
  81. GCTimerScope(GCTimer* timer)
  82. : m_timer(timer)
  83. , m_start(WTF::currentTime())
  84. {
  85. }
  86. ~GCTimerScope()
  87. {
  88. double delta = WTF::currentTime() - m_start;
  89. if (delta < m_timer->m_min)
  90. m_timer->m_min = delta;
  91. if (delta > m_timer->m_max)
  92. m_timer->m_max = delta;
  93. m_timer->m_count++;
  94. m_timer->m_time += delta;
  95. }
  96. GCTimer* m_timer;
  97. double m_start;
  98. };
  99. struct GCCounter {
  100. GCCounter(const char* name)
  101. : m_name(name)
  102. , m_count(0)
  103. , m_total(0)
  104. , m_min(10000000)
  105. , m_max(0)
  106. {
  107. }
  108. void count(size_t amount)
  109. {
  110. m_count++;
  111. m_total += amount;
  112. if (amount < m_min)
  113. m_min = amount;
  114. if (amount > m_max)
  115. m_max = amount;
  116. }
  117. ~GCCounter()
  118. {
  119. dataLogF("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max);
  120. }
  121. const char* m_name;
  122. size_t m_count;
  123. size_t m_total;
  124. size_t m_min;
  125. size_t m_max;
  126. };
  127. #define GCPHASE(name) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name##Timer, (#name)); GCTimerScope name##TimerScope(&name##Timer)
  128. #define COND_GCPHASE(cond, name1, name2) DEFINE_GC_LOGGING_GLOBAL(GCTimer, name1##Timer, (#name1)); DEFINE_GC_LOGGING_GLOBAL(GCTimer, name2##Timer, (#name2)); GCTimerScope name1##CondTimerScope(cond ? &name1##Timer : &name2##Timer)
  129. #define GCCOUNTER(name, value) do { DEFINE_GC_LOGGING_GLOBAL(GCCounter, name##Counter, (#name)); name##Counter.count(value); } while (false)
  130. #else
  131. #define GCPHASE(name) do { } while (false)
  132. #define COND_GCPHASE(cond, name1, name2) do { } while (false)
  133. #define GCCOUNTER(name, value) do { } while (false)
  134. #endif
  135. static inline size_t minHeapSize(HeapType heapType, size_t ramSize)
  136. {
  137. if (heapType == LargeHeap)
  138. return min(largeHeapSize, ramSize / 4);
  139. return smallHeapSize;
  140. }
  141. static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize)
  142. {
  143. // Try to stay under 1/2 RAM size to leave room for the DOM, rendering, networking, etc.
  144. if (heapSize < ramSize / 4)
  145. return 2 * heapSize;
  146. if (heapSize < ramSize / 2)
  147. return 1.5 * heapSize;
  148. return 1.25 * heapSize;
  149. }
  150. static inline bool isValidSharedInstanceThreadState(VM* vm)
  151. {
  152. return vm->apiLock().currentThreadIsHoldingLock();
  153. }
  154. static inline bool isValidThreadState(VM* vm)
  155. {
  156. if (vm->identifierTable != wtfThreadData().currentIdentifierTable())
  157. return false;
  158. if (vm->isSharedInstance() && !isValidSharedInstanceThreadState(vm))
  159. return false;
  160. return true;
  161. }
  162. struct MarkObject : public MarkedBlock::VoidFunctor {
  163. void operator()(JSCell* cell)
  164. {
  165. if (cell->isZapped())
  166. return;
  167. Heap::heap(cell)->setMarked(cell);
  168. }
  169. };
  170. struct Count : public MarkedBlock::CountFunctor {
  171. void operator()(JSCell*) { count(1); }
  172. };
  173. struct CountIfGlobalObject : MarkedBlock::CountFunctor {
  174. void operator()(JSCell* cell) {
  175. if (!cell->isObject())
  176. return;
  177. if (!asObject(cell)->isGlobalObject())
  178. return;
  179. count(1);
  180. }
  181. };
  182. class RecordType {
  183. public:
  184. typedef PassOwnPtr<TypeCountSet> ReturnType;
  185. RecordType();
  186. void operator()(JSCell*);
  187. ReturnType returnValue();
  188. private:
  189. const char* typeName(JSCell*);
  190. OwnPtr<TypeCountSet> m_typeCountSet;
  191. };
  192. inline RecordType::RecordType()
  193. : m_typeCountSet(adoptPtr(new TypeCountSet))
  194. {
  195. }
  196. inline const char* RecordType::typeName(JSCell* cell)
  197. {
  198. const ClassInfo* info = cell->classInfo();
  199. if (!info || !info->className)
  200. return "[unknown]";
  201. return info->className;
  202. }
  203. inline void RecordType::operator()(JSCell* cell)
  204. {
  205. m_typeCountSet->add(typeName(cell));
  206. }
  207. inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
  208. {
  209. return m_typeCountSet.release();
  210. }
  211. } // anonymous namespace
  212. Heap::Heap(VM* vm, HeapType heapType)
  213. : m_heapType(heapType)
  214. , m_ramSize(ramSize())
  215. , m_minBytesPerCycle(minHeapSize(m_heapType, m_ramSize))
  216. , m_sizeAfterLastCollect(0)
  217. , m_bytesAllocatedLimit(m_minBytesPerCycle)
  218. , m_bytesAllocated(0)
  219. , m_bytesAbandoned(0)
  220. , m_operationInProgress(NoOperation)
  221. , m_blockAllocator()
  222. , m_objectSpace(this)
  223. , m_storageSpace(this)
  224. , m_machineThreads(this)
  225. , m_sharedData(vm)
  226. , m_slotVisitor(m_sharedData)
  227. , m_copyVisitor(m_sharedData)
  228. , m_handleSet(vm)
  229. , m_isSafeToCollect(false)
  230. , m_vm(vm)
  231. , m_lastGCLength(0)
  232. , m_lastCodeDiscardTime(WTF::currentTime())
  233. , m_activityCallback(DefaultGCActivityCallback::create(this))
  234. , m_sweeper(IncrementalSweeper::create(this))
  235. {
  236. m_storageSpace.init();
  237. }
  238. Heap::~Heap()
  239. {
  240. }
  241. bool Heap::isPagedOut(double deadline)
  242. {
  243. return m_objectSpace.isPagedOut(deadline) || m_storageSpace.isPagedOut(deadline);
  244. }
  245. // The VM is being destroyed and the collector will never run again.
  246. // Run all pending finalizers now because we won't get another chance.
  247. void Heap::lastChanceToFinalize()
  248. {
  249. RELEASE_ASSERT(!m_vm->dynamicGlobalObject);
  250. RELEASE_ASSERT(m_operationInProgress == NoOperation);
  251. m_objectSpace.lastChanceToFinalize();
  252. #if ENABLE(SIMPLE_HEAP_PROFILING)
  253. m_slotVisitor.m_visitedTypeCounts.dump(WTF::dataFile(), "Visited Type Counts");
  254. m_destroyedTypeCounts.dump(WTF::dataFile(), "Destroyed Type Counts");
  255. #endif
  256. }
  257. void Heap::reportExtraMemoryCostSlowCase(size_t cost)
  258. {
  259. // Our frequency of garbage collection tries to balance memory use against speed
  260. // by collecting based on the number of newly created values. However, for values
  261. // that hold on to a great deal of memory that's not in the form of other JS values,
  262. // that is not good enough - in some cases a lot of those objects can pile up and
  263. // use crazy amounts of memory without a GC happening. So we track these extra
  264. // memory costs. Only unusually large objects are noted, and we only keep track
  265. // of this extra cost until the next GC. In garbage collected languages, most values
  266. // are either very short lived temporaries, or have extremely long lifetimes. So
  267. // if a large value survives one garbage collection, there is not much point to
  268. // collecting more frequently as long as it stays alive.
  269. didAllocate(cost);
  270. if (shouldCollect())
  271. collect(DoNotSweep);
  272. }
  273. void Heap::reportAbandonedObjectGraph()
  274. {
  275. // Our clients don't know exactly how much memory they
  276. // are abandoning so we just guess for them.
  277. double abandonedBytes = 0.10 * m_sizeAfterLastCollect;
  278. // We want to accelerate the next collection. Because memory has just
  279. // been abandoned, the next collection has the potential to
  280. // be more profitable. Since allocation is the trigger for collection,
  281. // we hasten the next collection by pretending that we've allocated more memory.
  282. didAbandon(abandonedBytes);
  283. }
  284. void Heap::didAbandon(size_t bytes)
  285. {
  286. m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
  287. m_bytesAbandoned += bytes;
  288. }
  289. void Heap::protect(JSValue k)
  290. {
  291. ASSERT(k);
  292. ASSERT(m_vm->apiLock().currentThreadIsHoldingLock());
  293. if (!k.isCell())
  294. return;
  295. m_protectedValues.add(k.asCell());
  296. }
  297. bool Heap::unprotect(JSValue k)
  298. {
  299. ASSERT(k);
  300. ASSERT(m_vm->apiLock().currentThreadIsHoldingLock());
  301. if (!k.isCell())
  302. return false;
  303. return m_protectedValues.remove(k.asCell());
  304. }
  305. void Heap::jettisonDFGCodeBlock(PassOwnPtr<CodeBlock> codeBlock)
  306. {
  307. m_dfgCodeBlocks.jettison(codeBlock);
  308. }
  309. void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
  310. {
  311. ProtectCountSet::iterator end = m_protectedValues.end();
  312. for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
  313. heapRootVisitor.visit(&it->key);
  314. }
  315. void Heap::pushTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
  316. {
  317. m_tempSortingVectors.append(tempVector);
  318. }
  319. void Heap::popTempSortVector(Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempVector)
  320. {
  321. ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
  322. m_tempSortingVectors.removeLast();
  323. }
  324. void Heap::markTempSortVectors(HeapRootVisitor& heapRootVisitor)
  325. {
  326. typedef Vector<Vector<ValueStringPair, 0, UnsafeVectorOverflow>* > VectorOfValueStringVectors;
  327. VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
  328. for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
  329. Vector<ValueStringPair, 0, UnsafeVectorOverflow>* tempSortingVector = *it;
  330. Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
  331. for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
  332. if (vectorIt->first)
  333. heapRootVisitor.visit(&vectorIt->first);
  334. }
  335. }
  336. }
  337. void Heap::harvestWeakReferences()
  338. {
  339. m_slotVisitor.harvestWeakReferences();
  340. }
  341. void Heap::finalizeUnconditionalFinalizers()
  342. {
  343. m_slotVisitor.finalizeUnconditionalFinalizers();
  344. }
  345. inline JSStack& Heap::stack()
  346. {
  347. return m_vm->interpreter->stack();
  348. }
  349. void Heap::canonicalizeCellLivenessData()
  350. {
  351. m_objectSpace.canonicalizeCellLivenessData();
  352. }
  353. void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
  354. {
  355. ASSERT(isValidThreadState(m_vm));
  356. ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
  357. stack().gatherConservativeRoots(stackRoots);
  358. size_t stackRootCount = stackRoots.size();
  359. JSCell** registerRoots = stackRoots.roots();
  360. for (size_t i = 0; i < stackRootCount; i++) {
  361. setMarked(registerRoots[i]);
  362. roots.add(registerRoots[i]);
  363. }
  364. }
  365. void Heap::markRoots()
  366. {
  367. SamplingRegion samplingRegion("Garbage Collection: Tracing");
  368. GCPHASE(MarkRoots);
  369. ASSERT(isValidThreadState(m_vm));
  370. #if ENABLE(OBJECT_MARK_LOGGING)
  371. double gcStartTime = WTF::currentTime();
  372. #endif
  373. void* dummy;
  374. // We gather conservative roots before clearing mark bits because conservative
  375. // gathering uses the mark bits to determine whether a reference is valid.
  376. ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace);
  377. m_jitStubRoutines.clearMarks();
  378. {
  379. GCPHASE(GatherConservativeRoots);
  380. m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);
  381. }
  382. ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace);
  383. m_dfgCodeBlocks.clearMarks();
  384. {
  385. GCPHASE(GatherStackRoots);
  386. stack().gatherConservativeRoots(
  387. stackRoots, m_jitStubRoutines, m_dfgCodeBlocks);
  388. }
  389. #if ENABLE(DFG_JIT)
  390. ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace);
  391. {
  392. GCPHASE(GatherScratchBufferRoots);
  393. m_vm->gatherConservativeRoots(scratchBufferRoots);
  394. }
  395. #endif
  396. {
  397. GCPHASE(clearMarks);
  398. m_objectSpace.clearMarks();
  399. }
  400. m_sharedData.didStartMarking();
  401. SlotVisitor& visitor = m_slotVisitor;
  402. visitor.setup();
  403. HeapRootVisitor heapRootVisitor(visitor);
  404. {
  405. ParallelModeEnabler enabler(visitor);
  406. if (m_vm->codeBlocksBeingCompiled.size()) {
  407. GCPHASE(VisitActiveCodeBlock);
  408. for (size_t i = 0; i < m_vm->codeBlocksBeingCompiled.size(); i++)
  409. m_vm->codeBlocksBeingCompiled[i]->visitAggregate(visitor);
  410. }
  411. m_vm->smallStrings.visitStrongReferences(visitor);
  412. {
  413. GCPHASE(VisitMachineRoots);
  414. MARK_LOG_ROOT(visitor, "C++ Stack");
  415. visitor.append(machineThreadRoots);
  416. visitor.donateAndDrain();
  417. }
  418. {
  419. GCPHASE(VisitStackRoots);
  420. MARK_LOG_ROOT(visitor, "Stack");
  421. visitor.append(stackRoots);
  422. visitor.donateAndDrain();
  423. }
  424. #if ENABLE(DFG_JIT)
  425. {
  426. GCPHASE(VisitScratchBufferRoots);
  427. MARK_LOG_ROOT(visitor, "Scratch Buffers");
  428. visitor.append(scratchBufferRoots);
  429. visitor.donateAndDrain();
  430. }
  431. #endif
  432. {
  433. GCPHASE(VisitProtectedObjects);
  434. MARK_LOG_ROOT(visitor, "Protected Objects");
  435. markProtectedObjects(heapRootVisitor);
  436. visitor.donateAndDrain();
  437. }
  438. {
  439. GCPHASE(VisitTempSortVectors);
  440. MARK_LOG_ROOT(visitor, "Temp Sort Vectors");
  441. markTempSortVectors(heapRootVisitor);
  442. visitor.donateAndDrain();
  443. }
  444. {
  445. GCPHASE(MarkingArgumentBuffers);
  446. if (m_markListSet && m_markListSet->size()) {
  447. MARK_LOG_ROOT(visitor, "Argument Buffers");
  448. MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
  449. visitor.donateAndDrain();
  450. }
  451. }
  452. if (m_vm->exception) {
  453. GCPHASE(MarkingException);
  454. MARK_LOG_ROOT(visitor, "Exceptions");
  455. heapRootVisitor.visit(&m_vm->exception);
  456. visitor.donateAndDrain();
  457. }
  458. {
  459. GCPHASE(VisitStrongHandles);
  460. MARK_LOG_ROOT(visitor, "Strong Handles");
  461. m_handleSet.visitStrongHandles(heapRootVisitor);
  462. visitor.donateAndDrain();
  463. }
  464. {
  465. GCPHASE(HandleStack);
  466. MARK_LOG_ROOT(visitor, "Handle Stack");
  467. m_handleStack.visit(heapRootVisitor);
  468. visitor.donateAndDrain();
  469. }
  470. {
  471. GCPHASE(TraceCodeBlocksAndJITStubRoutines);
  472. MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines");
  473. m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor);
  474. m_jitStubRoutines.traceMarkedStubRoutines(visitor);
  475. visitor.donateAndDrain();
  476. }
  477. #if ENABLE(PARALLEL_GC)
  478. {
  479. GCPHASE(Convergence);
  480. visitor.drainFromShared(SlotVisitor::MasterDrain);
  481. }
  482. #endif
  483. }
  484. // Weak references must be marked last because their liveness depends on
  485. // the liveness of the rest of the object graph.
  486. {
  487. GCPHASE(VisitingLiveWeakHandles);
  488. MARK_LOG_ROOT(visitor, "Live Weak Handles");
  489. while (true) {
  490. m_objectSpace.visitWeakSets(heapRootVisitor);
  491. harvestWeakReferences();
  492. if (visitor.isEmpty())
  493. break;
  494. {
  495. ParallelModeEnabler enabler(visitor);
  496. visitor.donateAndDrain();
  497. #if ENABLE(PARALLEL_GC)
  498. visitor.drainFromShared(SlotVisitor::MasterDrain);
  499. #endif
  500. }
  501. }
  502. }
  503. GCCOUNTER(VisitedValueCount, visitor.visitCount());
  504. m_sharedData.didFinishMarking();
  505. #if ENABLE(OBJECT_MARK_LOGGING)
  506. size_t visitCount = visitor.visitCount();
  507. #if ENABLE(PARALLEL_GC)
  508. visitCount += m_sharedData.childVisitCount();
  509. #endif
  510. MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount, WTF::currentTime() - gcStartTime);
  511. #endif
  512. visitor.reset();
  513. #if ENABLE(PARALLEL_GC)
  514. m_sharedData.resetChildren();
  515. #endif
  516. m_sharedData.reset();
  517. }
  518. void Heap::copyBackingStores()
  519. {
  520. m_storageSpace.startedCopying();
  521. if (m_storageSpace.shouldDoCopyPhase()) {
  522. m_sharedData.didStartCopying();
  523. m_copyVisitor.startCopying();
  524. m_copyVisitor.copyFromShared();
  525. m_copyVisitor.doneCopying();
  526. // We need to wait for everybody to finish and return their CopiedBlocks
  527. // before signaling that the phase is complete.
  528. m_storageSpace.doneCopying();
  529. m_sharedData.didFinishCopying();
  530. } else
  531. m_storageSpace.doneCopying();
  532. }
  533. size_t Heap::objectCount()
  534. {
  535. return m_objectSpace.objectCount();
  536. }
  537. size_t Heap::size()
  538. {
  539. return m_objectSpace.size() + m_storageSpace.size();
  540. }
  541. size_t Heap::capacity()
  542. {
  543. return m_objectSpace.capacity() + m_storageSpace.capacity();
  544. }
  545. size_t Heap::protectedGlobalObjectCount()
  546. {
  547. return forEachProtectedCell<CountIfGlobalObject>();
  548. }
  549. size_t Heap::globalObjectCount()
  550. {
  551. return m_objectSpace.forEachLiveCell<CountIfGlobalObject>();
  552. }
  553. size_t Heap::protectedObjectCount()
  554. {
  555. return forEachProtectedCell<Count>();
  556. }
  557. PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
  558. {
  559. return forEachProtectedCell<RecordType>();
  560. }
  561. PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
  562. {
  563. return m_objectSpace.forEachLiveCell<RecordType>();
  564. }
  565. void Heap::deleteAllCompiledCode()
  566. {
  567. // If JavaScript is running, it's not safe to delete code, since we'll end
  568. // up deleting code that is live on the stack.
  569. if (m_vm->dynamicGlobalObject)
  570. return;
  571. for (ExecutableBase* current = m_compiledCode.head(); current; current = current->next()) {
  572. if (!current->isFunctionExecutable())
  573. continue;
  574. static_cast<FunctionExecutable*>(current)->clearCodeIfNotCompiling();
  575. }
  576. m_dfgCodeBlocks.clearMarks();
  577. m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
  578. }
  579. void Heap::deleteUnmarkedCompiledCode()
  580. {
  581. ExecutableBase* next;
  582. for (ExecutableBase* current = m_compiledCode.head(); current; current = next) {
  583. next = current->next();
  584. if (isMarked(current))
  585. continue;
  586. // We do this because executable memory is limited on some platforms and because
  587. // CodeBlock requires eager finalization.
  588. ExecutableBase::clearCodeVirtual(current);
  589. m_compiledCode.remove(current);
  590. }
  591. m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks();
  592. m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines();
  593. }
  594. void Heap::collectAllGarbage()
  595. {
  596. if (!m_isSafeToCollect)
  597. return;
  598. collect(DoSweep);
  599. }
  600. static double minute = 60.0;
  601. void Heap::collect(SweepToggle sweepToggle)
  602. {
  603. SamplingRegion samplingRegion("Garbage Collection");
  604. GCPHASE(Collect);
  605. ASSERT(vm()->apiLock().currentThreadIsHoldingLock());
  606. RELEASE_ASSERT(vm()->identifierTable == wtfThreadData().currentIdentifierTable());
  607. ASSERT(m_isSafeToCollect);
  608. JAVASCRIPTCORE_GC_BEGIN();
  609. RELEASE_ASSERT(m_operationInProgress == NoOperation);
  610. m_operationInProgress = Collection;
  611. m_activityCallback->willCollect();
  612. double lastGCStartTime = WTF::currentTime();
  613. if (lastGCStartTime - m_lastCodeDiscardTime > minute) {
  614. deleteAllCompiledCode();
  615. m_lastCodeDiscardTime = WTF::currentTime();
  616. }
  617. {
  618. GCPHASE(Canonicalize);
  619. m_objectSpace.canonicalizeCellLivenessData();
  620. }
  621. markRoots();
  622. {
  623. GCPHASE(ReapingWeakHandles);
  624. m_objectSpace.reapWeakSets();
  625. }
  626. JAVASCRIPTCORE_GC_MARKED();
  627. {
  628. m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
  629. MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
  630. m_objectSpace.forEachBlock(functor);
  631. }
  632. copyBackingStores();
  633. {
  634. GCPHASE(FinalizeUnconditionalFinalizers);
  635. finalizeUnconditionalFinalizers();
  636. }
  637. {
  638. GCPHASE(finalizeSmallStrings);
  639. m_vm->smallStrings.finalizeSmallStrings();
  640. }
  641. {
  642. GCPHASE(DeleteCodeBlocks);
  643. deleteUnmarkedCompiledCode();
  644. }
  645. {
  646. GCPHASE(DeleteSourceProviderCaches);
  647. m_vm->clearSourceProviderCaches();
  648. }
  649. if (sweepToggle == DoSweep) {
  650. SamplingRegion samplingRegion("Garbage Collection: Sweeping");
  651. GCPHASE(Sweeping);
  652. m_objectSpace.sweep();
  653. m_objectSpace.shrink();
  654. }
  655. m_sweeper->startSweeping(m_blockSnapshot);
  656. m_bytesAbandoned = 0;
  657. {
  658. GCPHASE(ResetAllocators);
  659. m_objectSpace.resetAllocators();
  660. }
  661. size_t currentHeapSize = size();
  662. if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
  663. HeapStatistics::exitWithFailure();
  664. m_sizeAfterLastCollect = currentHeapSize;
  665. // To avoid pathological GC churn in very small and very large heaps, we set
  666. // the new allocation limit based on the current size of the heap, with a
  667. // fixed minimum.
  668. size_t maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
  669. m_bytesAllocatedLimit = maxHeapSize - currentHeapSize;
  670. m_bytesAllocated = 0;
  671. double lastGCEndTime = WTF::currentTime();
  672. m_lastGCLength = lastGCEndTime - lastGCStartTime;
  673. if (Options::recordGCPauseTimes())
  674. HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime);
  675. RELEASE_ASSERT(m_operationInProgress == Collection);
  676. m_operationInProgress = NoOperation;
  677. JAVASCRIPTCORE_GC_END();
  678. if (Options::useZombieMode())
  679. zombifyDeadObjects();
  680. if (Options::objectsAreImmortal())
  681. markDeadObjects();
  682. if (Options::showObjectStatistics())
  683. HeapStatistics::showObjectStatistics(this);
  684. }
  685. void Heap::markDeadObjects()
  686. {
  687. m_objectSpace.forEachDeadCell<MarkObject>();
  688. }
  689. void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
  690. {
  691. m_activityCallback = activityCallback;
  692. }
  693. GCActivityCallback* Heap::activityCallback()
  694. {
  695. return m_activityCallback.get();
  696. }
  697. IncrementalSweeper* Heap::sweeper()
  698. {
  699. return m_sweeper.get();
  700. }
  701. void Heap::setGarbageCollectionTimerEnabled(bool enable)
  702. {
  703. activityCallback()->setEnabled(enable);
  704. }
  705. void Heap::didAllocate(size_t bytes)
  706. {
  707. m_activityCallback->didAllocate(m_bytesAllocated + m_bytesAbandoned);
  708. m_bytesAllocated += bytes;
  709. }
  710. bool Heap::isValidAllocation(size_t)
  711. {
  712. if (!isValidThreadState(m_vm))
  713. return false;
  714. if (m_operationInProgress != NoOperation)
  715. return false;
  716. return true;
  717. }
  718. void Heap::addFinalizer(JSCell* cell, Finalizer finalizer)
  719. {
  720. WeakSet::allocate(cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); // Balanced by FinalizerOwner::finalize().
  721. }
  722. void Heap::FinalizerOwner::finalize(Handle<Unknown> handle, void* context)
  723. {
  724. HandleSlot slot = handle.slot();
  725. Finalizer finalizer = reinterpret_cast<Finalizer>(context);
  726. finalizer(slot->asCell());
  727. WeakSet::deallocate(WeakImpl::asWeakImpl(slot));
  728. }
  729. void Heap::addCompiledCode(ExecutableBase* executable)
  730. {
  731. m_compiledCode.append(executable);
  732. }
  733. class Zombify : public MarkedBlock::VoidFunctor {
  734. public:
  735. void operator()(JSCell* cell)
  736. {
  737. void** current = reinterpret_cast<void**>(cell);
  738. // We want to maintain zapped-ness because that's how we know if we've called
  739. // the destructor.
  740. if (cell->isZapped())
  741. current++;
  742. void* limit = static_cast<void*>(reinterpret_cast<char*>(cell) + MarkedBlock::blockFor(cell)->cellSize());
  743. for (; current < limit; current++)
  744. *current = reinterpret_cast<void*>(0xbbadbeef);
  745. }
  746. };
  747. void Heap::zombifyDeadObjects()
  748. {
  749. // Sweep now because destructors will crash once we're zombified.
  750. m_objectSpace.sweep();
  751. m_objectSpace.forEachDeadCell<Zombify>();
  752. }
  753. } // namespace JSC