PageRenderTime 50ms CodeModel.GetById 23ms RepoModel.GetById 0ms app.codeStats 0ms

/rpython/memory/gctransform/boehm.py

https://bitbucket.org/oberstet/pypy
Python | 187 lines | 148 code | 26 blank | 13 comment | 16 complexity | 5cec22bf1ce6bf4c370100977083bb4c MD5 | raw file
Possible License(s): Apache-2.0
  1. from rpython.memory.gctransform.transform import GCTransformer, mallocHelpers
  2. from rpython.memory.gctransform.support import (get_rtti,
  3. _static_deallocator_body_for_type, LLTransformerOp, ll_call_destructor)
  4. from rpython.rtyper.lltypesystem import lltype, llmemory
  5. from rpython.flowspace.model import Constant
  6. from rpython.rtyper.lltypesystem.lloperation import llop
  7. from rpython.rtyper import rmodel
  8. class BoehmGCTransformer(GCTransformer):
  9. malloc_zero_filled = True
  10. FINALIZER_PTR = lltype.Ptr(lltype.FuncType([llmemory.Address], lltype.Void))
  11. HDR = lltype.Struct("header", ("hash", lltype.Signed))
  12. def __init__(self, translator, inline=False):
  13. super(BoehmGCTransformer, self).__init__(translator, inline=inline)
  14. self.finalizer_funcptrs = {}
  15. atomic_mh = mallocHelpers()
  16. atomic_mh.allocate = lambda size: llop.boehm_malloc_atomic(llmemory.Address, size)
  17. ll_malloc_fixedsize_atomic = atomic_mh._ll_malloc_fixedsize
  18. mh = mallocHelpers()
  19. mh.allocate = lambda size: llop.boehm_malloc(llmemory.Address, size)
  20. ll_malloc_fixedsize = mh._ll_malloc_fixedsize
  21. # XXX, do we need/want an atomic version of this function?
  22. ll_malloc_varsize_no_length = mh.ll_malloc_varsize_no_length
  23. ll_malloc_varsize = mh.ll_malloc_varsize
  24. HDRPTR = lltype.Ptr(self.HDR)
  25. def ll_identityhash(addr):
  26. obj = llmemory.cast_adr_to_ptr(addr, HDRPTR)
  27. h = obj.hash
  28. if h == 0:
  29. obj.hash = h = ~llmemory.cast_adr_to_int(addr)
  30. return h
  31. if self.translator:
  32. self.malloc_fixedsize_ptr = self.inittime_helper(
  33. ll_malloc_fixedsize, [lltype.Signed], llmemory.Address)
  34. self.malloc_fixedsize_atomic_ptr = self.inittime_helper(
  35. ll_malloc_fixedsize_atomic, [lltype.Signed], llmemory.Address)
  36. self.malloc_varsize_no_length_ptr = self.inittime_helper(
  37. ll_malloc_varsize_no_length, [lltype.Signed]*3, llmemory.Address, inline=False)
  38. self.malloc_varsize_ptr = self.inittime_helper(
  39. ll_malloc_varsize, [lltype.Signed]*4, llmemory.Address, inline=False)
  40. self.weakref_create_ptr = self.inittime_helper(
  41. ll_weakref_create, [llmemory.Address], llmemory.WeakRefPtr,
  42. inline=False)
  43. self.weakref_deref_ptr = self.inittime_helper(
  44. ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address)
  45. self.identityhash_ptr = self.inittime_helper(
  46. ll_identityhash, [llmemory.Address], lltype.Signed,
  47. inline=False)
  48. self.mixlevelannotator.finish() # for now
  49. self.mixlevelannotator.backend_optimize()
  50. def gct_fv_gc_malloc(self, hop, flags, TYPE, c_size):
  51. # XXX same behavior for zero=True: in theory that's wrong
  52. if TYPE._is_atomic():
  53. funcptr = self.malloc_fixedsize_atomic_ptr
  54. else:
  55. funcptr = self.malloc_fixedsize_ptr
  56. v_raw = hop.genop("direct_call",
  57. [funcptr, c_size],
  58. resulttype=llmemory.Address)
  59. finalizer_ptr = self.finalizer_funcptr_for_type(TYPE)
  60. if finalizer_ptr:
  61. c_finalizer_ptr = Constant(finalizer_ptr, self.FINALIZER_PTR)
  62. hop.genop("boehm_register_finalizer", [v_raw, c_finalizer_ptr])
  63. return v_raw
  64. def gct_fv_gc_malloc_varsize(self, hop, flags, TYPE, v_length, c_const_size, c_item_size,
  65. c_offset_to_length):
  66. # XXX same behavior for zero=True: in theory that's wrong
  67. if c_offset_to_length is None:
  68. v_raw = hop.genop("direct_call",
  69. [self.malloc_varsize_no_length_ptr, v_length,
  70. c_const_size, c_item_size],
  71. resulttype=llmemory.Address)
  72. else:
  73. v_raw = hop.genop("direct_call",
  74. [self.malloc_varsize_ptr, v_length,
  75. c_const_size, c_item_size, c_offset_to_length],
  76. resulttype=llmemory.Address)
  77. return v_raw
  78. def finalizer_funcptr_for_type(self, TYPE):
  79. if TYPE in self.finalizer_funcptrs:
  80. return self.finalizer_funcptrs[TYPE]
  81. rtti = get_rtti(TYPE)
  82. if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
  83. destrptr = rtti._obj.destructor_funcptr
  84. DESTR_ARG = lltype.typeOf(destrptr).TO.ARGS[0]
  85. else:
  86. destrptr = None
  87. DESTR_ARG = None
  88. if destrptr:
  89. EXC_INSTANCE_TYPE = self.translator.rtyper.exceptiondata.lltype_of_exception_value
  90. typename = TYPE.__name__
  91. def ll_finalizer(addr):
  92. exc_instance = llop.gc_fetch_exception(EXC_INSTANCE_TYPE)
  93. v = llmemory.cast_adr_to_ptr(addr, DESTR_ARG)
  94. ll_call_destructor(destrptr, v, typename)
  95. llop.gc_restore_exception(lltype.Void, exc_instance)
  96. fptr = self.annotate_finalizer(ll_finalizer, [llmemory.Address], lltype.Void)
  97. else:
  98. fptr = lltype.nullptr(self.FINALIZER_PTR.TO)
  99. self.finalizer_funcptrs[TYPE] = fptr
  100. return fptr
  101. def gct_weakref_create(self, hop):
  102. v_instance, = hop.spaceop.args
  103. v_addr = hop.genop("cast_ptr_to_adr", [v_instance],
  104. resulttype=llmemory.Address)
  105. v_wref = hop.genop("direct_call",
  106. [self.weakref_create_ptr, v_addr],
  107. resulttype=llmemory.WeakRefPtr)
  108. hop.cast_result(v_wref)
  109. def gct_weakref_deref(self, hop):
  110. v_wref, = hop.spaceop.args
  111. v_addr = hop.genop("direct_call",
  112. [self.weakref_deref_ptr, v_wref],
  113. resulttype=llmemory.Address)
  114. hop.cast_result(v_addr)
  115. def gct_gc_writebarrier_before_copy(self, hop):
  116. # no write barrier needed
  117. op = hop.spaceop
  118. hop.genop("same_as",
  119. [rmodel.inputconst(lltype.Bool, True)],
  120. resultvar=op.result)
  121. def gct_gc_identityhash(self, hop):
  122. v_obj = hop.spaceop.args[0]
  123. v_adr = hop.genop("cast_ptr_to_adr", [v_obj],
  124. resulttype=llmemory.Address)
  125. hop.genop("direct_call", [self.identityhash_ptr, v_adr],
  126. resultvar=hop.spaceop.result)
  127. def gct_gc_id(self, hop):
  128. # this is the logic from the HIDE_POINTER macro in <gc/gc.h>
  129. v_int = hop.genop('cast_ptr_to_int', [hop.spaceop.args[0]],
  130. resulttype = lltype.Signed)
  131. hop.genop('int_invert', [v_int], resultvar=hop.spaceop.result)
  132. ########## weakrefs ##########
  133. # Boehm: weakref objects are small structures containing only a Boehm
  134. # disappearing link. We don't have to hide the link's value with
  135. # HIDE_POINTER(), because we explicitly use GC_MALLOC_ATOMIC().
  136. WEAKLINK = lltype.FixedSizeArray(llmemory.Address, 1)
  137. sizeof_weakreflink = llmemory.sizeof(WEAKLINK)
  138. empty_weaklink = lltype.malloc(WEAKLINK, immortal=True)
  139. empty_weaklink[0] = llmemory.NULL
  140. def ll_weakref_create(targetaddr):
  141. link = llop.boehm_malloc_atomic(llmemory.Address, sizeof_weakreflink)
  142. if not link:
  143. raise MemoryError
  144. plink = llmemory.cast_adr_to_ptr(link, lltype.Ptr(WEAKLINK))
  145. plink[0] = targetaddr
  146. llop.boehm_disappearing_link(lltype.Void, link, targetaddr)
  147. return llmemory.cast_ptr_to_weakrefptr(plink)
  148. def ll_weakref_deref(wref):
  149. plink = llmemory.cast_weakrefptr_to_ptr(lltype.Ptr(WEAKLINK), wref)
  150. return plink[0]
  151. def convert_weakref_to(targetptr):
  152. # Prebuilt weakrefs don't really need to be weak at all,
  153. # but we need to emulate the structure expected by ll_weakref_deref().
  154. # This is essentially the same code as in ll_weakref_create(), but I'm
  155. # not sure trying to share it is worth the hassle...
  156. if not targetptr:
  157. return empty_weaklink
  158. else:
  159. plink = lltype.malloc(WEAKLINK, immortal=True)
  160. plink[0] = llmemory.cast_ptr_to_adr(targetptr)
  161. return plink