PageRenderTime 55ms CodeModel.GetById 18ms RepoModel.GetById 1ms app.codeStats 0ms

/ghc-7.0.4/compiler/nativeGen/AsmCodeGen.lhs

http://picorec.googlecode.com/
Haskell | 856 lines | 499 code | 145 blank | 212 comment | 19 complexity | 02afdff4735b2bd99c51e2bb905ce2b2 MD5 | raw file
Possible License(s): BSD-3-Clause, BSD-2-Clause
  1. -- -----------------------------------------------------------------------------
  2. --
  3. -- (c) The University of Glasgow 1993-2004
  4. --
  5. -- This is the top-level module in the native code generator.
  6. --
  7. -- -----------------------------------------------------------------------------
  8. \begin{code}
  9. {-# OPTIONS -w #-}
  10. -- The above warning supression flag is a temporary kludge.
  11. -- While working on this module you are encouraged to remove it and fix
  12. -- any warnings in the module. See
  13. -- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
  14. -- for details
  15. module AsmCodeGen ( nativeCodeGen ) where
  16. #include "HsVersions.h"
  17. #include "nativeGen/NCG.h"
  18. #if alpha_TARGET_ARCH
  19. import Alpha.CodeGen
  20. import Alpha.Regs
  21. import Alpha.RegInfo
  22. import Alpha.Instr
  23. #elif i386_TARGET_ARCH || x86_64_TARGET_ARCH
  24. import X86.CodeGen
  25. import X86.Regs
  26. import X86.RegInfo
  27. import X86.Instr
  28. import X86.Ppr
  29. #elif sparc_TARGET_ARCH
  30. import SPARC.CodeGen
  31. import SPARC.Regs
  32. import SPARC.Instr
  33. import SPARC.Ppr
  34. import SPARC.ShortcutJump
  35. #elif powerpc_TARGET_ARCH
  36. import PPC.CodeGen
  37. import PPC.Cond
  38. import PPC.Regs
  39. import PPC.RegInfo
  40. import PPC.Instr
  41. import PPC.Ppr
  42. #else
  43. #error "AsmCodeGen: unknown architecture"
  44. #endif
  45. import RegAlloc.Liveness
  46. import qualified RegAlloc.Linear.Main as Linear
  47. import qualified GraphColor as Color
  48. import qualified RegAlloc.Graph.Main as Color
  49. import qualified RegAlloc.Graph.Stats as Color
  50. import qualified RegAlloc.Graph.Coalesce as Color
  51. import qualified RegAlloc.Graph.TrivColorable as Color
  52. import qualified SPARC.CodeGen.Expand as SPARC
  53. import TargetReg
  54. import Platform
  55. import Instruction
  56. import PIC
  57. import Reg
  58. import RegClass
  59. import NCGMonad
  60. import BlockId
  61. import CgUtils ( fixStgRegisters )
  62. import Cmm
  63. import CmmOpt ( cmmMiniInline, cmmMachOpFold )
  64. import PprCmm
  65. import CLabel
  66. import State
  67. import UniqFM
  68. import Unique ( Unique, getUnique )
  69. import UniqSupply
  70. import DynFlags
  71. #if powerpc_TARGET_ARCH
  72. import StaticFlags ( opt_Static, opt_PIC )
  73. #endif
  74. import Util
  75. import Config ( cProjectVersion )
  76. import Module
  77. import Digraph
  78. import qualified Pretty
  79. import BufWrite
  80. import Outputable
  81. import FastString
  82. import UniqSet
  83. import ErrUtils
  84. import Module
  85. -- DEBUGGING ONLY
  86. --import OrdList
  87. import Data.List
  88. import Data.Int
  89. import Data.Word
  90. import Data.Bits
  91. import Data.Maybe
  92. import GHC.Exts
  93. import Control.Monad
  94. import System.IO
  95. {-
  96. The native-code generator has machine-independent and
  97. machine-dependent modules.
  98. This module ("AsmCodeGen") is the top-level machine-independent
  99. module. Before entering machine-dependent land, we do some
  100. machine-independent optimisations (defined below) on the
  101. 'CmmStmts's.
  102. We convert to the machine-specific 'Instr' datatype with
  103. 'cmmCodeGen', assuming an infinite supply of registers. We then use
  104. a machine-independent register allocator ('regAlloc') to rejoin
  105. reality. Obviously, 'regAlloc' has machine-specific helper
  106. functions (see about "RegAllocInfo" below).
  107. Finally, we order the basic blocks of the function so as to minimise
  108. the number of jumps between blocks, by utilising fallthrough wherever
  109. possible.
  110. The machine-dependent bits break down as follows:
  111. * ["MachRegs"] Everything about the target platform's machine
  112. registers (and immediate operands, and addresses, which tend to
  113. intermingle/interact with registers).
  114. * ["MachInstrs"] Includes the 'Instr' datatype (possibly should
  115. have a module of its own), plus a miscellany of other things
  116. (e.g., 'targetDoubleSize', 'smStablePtrTable', ...)
  117. * ["MachCodeGen"] is where 'Cmm' stuff turns into
  118. machine instructions.
  119. * ["PprMach"] 'pprInstr' turns an 'Instr' into text (well, really
  120. a 'Doc').
  121. * ["RegAllocInfo"] In the register allocator, we manipulate
  122. 'MRegsState's, which are 'BitSet's, one bit per machine register.
  123. When we want to say something about a specific machine register
  124. (e.g., ``it gets clobbered by this instruction''), we set/unset
  125. its bit. Obviously, we do this 'BitSet' thing for efficiency
  126. reasons.
  127. The 'RegAllocInfo' module collects together the machine-specific
  128. info needed to do register allocation.
  129. * ["RegisterAlloc"] The (machine-independent) register allocator.
  130. -}
  131. -- -----------------------------------------------------------------------------
  132. -- Top-level of the native codegen
  133. --------------------
  134. nativeCodeGen :: DynFlags -> Handle -> UniqSupply -> [RawCmm] -> IO ()
  135. nativeCodeGen dflags h us cmms
  136. = do
  137. let split_cmms = concat $ map add_split cmms
  138. -- BufHandle is a performance hack. We could hide it inside
  139. -- Pretty if it weren't for the fact that we do lots of little
  140. -- printDocs here (in order to do codegen in constant space).
  141. bufh <- newBufHandle h
  142. (imports, prof) <- cmmNativeGens dflags bufh us split_cmms [] [] 0
  143. bFlush bufh
  144. let (native, colorStats, linearStats)
  145. = unzip3 prof
  146. -- dump native code
  147. dumpIfSet_dyn dflags
  148. Opt_D_dump_asm "Asm code"
  149. (vcat $ map (docToSDoc . pprNatCmmTop) $ concat native)
  150. -- dump global NCG stats for graph coloring allocator
  151. (case concat $ catMaybes colorStats of
  152. [] -> return ()
  153. stats -> do
  154. -- build the global register conflict graph
  155. let graphGlobal
  156. = foldl Color.union Color.initGraph
  157. $ [ Color.raGraph stat
  158. | stat@Color.RegAllocStatsStart{} <- stats]
  159. dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
  160. $ Color.pprStats stats graphGlobal
  161. dumpIfSet_dyn dflags
  162. Opt_D_dump_asm_conflicts "Register conflict graph"
  163. $ Color.dotGraph
  164. targetRegDotColor
  165. (Color.trivColorable
  166. targetVirtualRegSqueeze
  167. targetRealRegSqueeze)
  168. $ graphGlobal)
  169. -- dump global NCG stats for linear allocator
  170. (case concat $ catMaybes linearStats of
  171. [] -> return ()
  172. stats -> dumpSDoc dflags Opt_D_dump_asm_stats "NCG stats"
  173. $ Linear.pprStats (concat native) stats)
  174. -- write out the imports
  175. Pretty.printDoc Pretty.LeftMode h
  176. $ makeImportsDoc dflags (concat imports)
  177. return ()
  178. where add_split (Cmm tops)
  179. | dopt Opt_SplitObjs dflags = split_marker : tops
  180. | otherwise = tops
  181. split_marker = CmmProc [] mkSplitMarkerLabel [] (ListGraph [])
  182. -- | Do native code generation on all these cmms.
  183. --
  184. cmmNativeGens dflags h us [] impAcc profAcc count
  185. = return (reverse impAcc, reverse profAcc)
  186. cmmNativeGens dflags h us (cmm : cmms) impAcc profAcc count
  187. = do
  188. (us', native, imports, colorStats, linearStats)
  189. <- cmmNativeGen dflags us cmm count
  190. Pretty.bufLeftRender h
  191. $ {-# SCC "pprNativeCode" #-} Pretty.vcat $ map pprNatCmmTop native
  192. -- carefully evaluate this strictly. Binding it with 'let'
  193. -- and then using 'seq' doesn't work, because the let
  194. -- apparently gets inlined first.
  195. lsPprNative <- return $!
  196. if dopt Opt_D_dump_asm dflags
  197. || dopt Opt_D_dump_asm_stats dflags
  198. then native
  199. else []
  200. count' <- return $! count + 1;
  201. -- force evaulation all this stuff to avoid space leaks
  202. seqString (showSDoc $ vcat $ map ppr imports) `seq` return ()
  203. cmmNativeGens dflags h us' cmms
  204. (imports : impAcc)
  205. ((lsPprNative, colorStats, linearStats) : profAcc)
  206. count'
  207. where seqString [] = ()
  208. seqString (x:xs) = x `seq` seqString xs `seq` ()
  209. -- | Complete native code generation phase for a single top-level chunk of Cmm.
  210. -- Dumping the output of each stage along the way.
  211. -- Global conflict graph and NGC stats
  212. cmmNativeGen
  213. :: DynFlags
  214. -> UniqSupply
  215. -> RawCmmTop -- ^ the cmm to generate code for
  216. -> Int -- ^ sequence number of this top thing
  217. -> IO ( UniqSupply
  218. , [NatCmmTop Instr] -- native code
  219. , [CLabel] -- things imported by this cmm
  220. , Maybe [Color.RegAllocStats Instr] -- stats for the coloring register allocator
  221. , Maybe [Linear.RegAllocStats]) -- stats for the linear register allocators
  222. cmmNativeGen dflags us cmm count
  223. = do
  224. -- rewrite assignments to global regs
  225. let fixed_cmm =
  226. {-# SCC "fixStgRegisters" #-}
  227. fixStgRegisters cmm
  228. -- cmm to cmm optimisations
  229. let (opt_cmm, imports) =
  230. {-# SCC "cmmToCmm" #-}
  231. cmmToCmm dflags fixed_cmm
  232. dumpIfSet_dyn dflags
  233. Opt_D_dump_opt_cmm "Optimised Cmm"
  234. (pprCmm $ Cmm [opt_cmm])
  235. -- generate native code from cmm
  236. let ((native, lastMinuteImports), usGen) =
  237. {-# SCC "genMachCode" #-}
  238. initUs us $ genMachCode dflags opt_cmm
  239. dumpIfSet_dyn dflags
  240. Opt_D_dump_asm_native "Native code"
  241. (vcat $ map (docToSDoc . pprNatCmmTop) native)
  242. -- tag instructions with register liveness information
  243. let (withLiveness, usLive) =
  244. {-# SCC "regLiveness" #-}
  245. initUs usGen
  246. $ mapUs regLiveness
  247. $ map natCmmTopToLive native
  248. dumpIfSet_dyn dflags
  249. Opt_D_dump_asm_liveness "Liveness annotations added"
  250. (vcat $ map ppr withLiveness)
  251. -- allocate registers
  252. (alloced, usAlloc, ppr_raStatsColor, ppr_raStatsLinear) <-
  253. if ( dopt Opt_RegsGraph dflags
  254. || dopt Opt_RegsIterative dflags)
  255. then do
  256. -- the regs usable for allocation
  257. let (alloc_regs :: UniqFM (UniqSet RealReg))
  258. = foldr (\r -> plusUFM_C unionUniqSets
  259. $ unitUFM (targetClassOfRealReg r) (unitUniqSet r))
  260. emptyUFM
  261. $ allocatableRegs
  262. -- do the graph coloring register allocation
  263. let ((alloced, regAllocStats), usAlloc)
  264. = {-# SCC "RegAlloc" #-}
  265. initUs usLive
  266. $ Color.regAlloc
  267. dflags
  268. alloc_regs
  269. (mkUniqSet [0..maxSpillSlots])
  270. withLiveness
  271. -- dump out what happened during register allocation
  272. dumpIfSet_dyn dflags
  273. Opt_D_dump_asm_regalloc "Registers allocated"
  274. (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
  275. dumpIfSet_dyn dflags
  276. Opt_D_dump_asm_regalloc_stages "Build/spill stages"
  277. (vcat $ map (\(stage, stats)
  278. -> text "# --------------------------"
  279. $$ text "# cmm " <> int count <> text " Stage " <> int stage
  280. $$ ppr stats)
  281. $ zip [0..] regAllocStats)
  282. let mPprStats =
  283. if dopt Opt_D_dump_asm_stats dflags
  284. then Just regAllocStats else Nothing
  285. -- force evaluation of the Maybe to avoid space leak
  286. mPprStats `seq` return ()
  287. return ( alloced, usAlloc
  288. , mPprStats
  289. , Nothing)
  290. else do
  291. -- do linear register allocation
  292. let ((alloced, regAllocStats), usAlloc)
  293. = {-# SCC "RegAlloc" #-}
  294. initUs usLive
  295. $ liftM unzip
  296. $ mapUs Linear.regAlloc withLiveness
  297. dumpIfSet_dyn dflags
  298. Opt_D_dump_asm_regalloc "Registers allocated"
  299. (vcat $ map (docToSDoc . pprNatCmmTop) alloced)
  300. let mPprStats =
  301. if dopt Opt_D_dump_asm_stats dflags
  302. then Just (catMaybes regAllocStats) else Nothing
  303. -- force evaluation of the Maybe to avoid space leak
  304. mPprStats `seq` return ()
  305. return ( alloced, usAlloc
  306. , Nothing
  307. , mPprStats)
  308. ---- x86fp_kludge. This pass inserts ffree instructions to clear
  309. ---- the FPU stack on x86. The x86 ABI requires that the FPU stack
  310. ---- is clear, and library functions can return odd results if it
  311. ---- isn't.
  312. ----
  313. ---- NB. must happen before shortcutBranches, because that
  314. ---- generates JXX_GBLs which we can't fix up in x86fp_kludge.
  315. let kludged =
  316. #if i386_TARGET_ARCH
  317. {-# SCC "x86fp_kludge" #-}
  318. map x86fp_kludge alloced
  319. #else
  320. alloced
  321. #endif
  322. ---- shortcut branches
  323. let shorted =
  324. {-# SCC "shortcutBranches" #-}
  325. shortcutBranches dflags kludged
  326. ---- sequence blocks
  327. let sequenced =
  328. {-# SCC "sequenceBlocks" #-}
  329. map sequenceTop shorted
  330. ---- expansion of SPARC synthetic instrs
  331. #if sparc_TARGET_ARCH
  332. let expanded =
  333. {-# SCC "sparc_expand" #-}
  334. map SPARC.expandTop sequenced
  335. dumpIfSet_dyn dflags
  336. Opt_D_dump_asm_expanded "Synthetic instructions expanded"
  337. (vcat $ map (docToSDoc . pprNatCmmTop) expanded)
  338. #else
  339. let expanded =
  340. sequenced
  341. #endif
  342. return ( usAlloc
  343. , expanded
  344. , lastMinuteImports ++ imports
  345. , ppr_raStatsColor
  346. , ppr_raStatsLinear)
  347. #if i386_TARGET_ARCH
  348. x86fp_kludge :: NatCmmTop Instr -> NatCmmTop Instr
  349. x86fp_kludge top@(CmmData _ _) = top
  350. x86fp_kludge top@(CmmProc info lbl params (ListGraph code)) =
  351. CmmProc info lbl params (ListGraph $ i386_insert_ffrees code)
  352. #endif
  353. -- | Build a doc for all the imports.
  354. --
  355. makeImportsDoc :: DynFlags -> [CLabel] -> Pretty.Doc
  356. makeImportsDoc dflags imports
  357. = dyld_stubs imports
  358. #if HAVE_SUBSECTIONS_VIA_SYMBOLS
  359. -- On recent versions of Darwin, the linker supports
  360. -- dead-stripping of code and data on a per-symbol basis.
  361. -- There's a hack to make this work in PprMach.pprNatCmmTop.
  362. Pretty.$$ Pretty.text ".subsections_via_symbols"
  363. #endif
  364. #if HAVE_GNU_NONEXEC_STACK
  365. -- On recent GNU ELF systems one can mark an object file
  366. -- as not requiring an executable stack. If all objects
  367. -- linked into a program have this note then the program
  368. -- will not use an executable stack, which is good for
  369. -- security. GHC generated code does not need an executable
  370. -- stack so add the note in:
  371. Pretty.$$ Pretty.text ".section .note.GNU-stack,\"\",@progbits"
  372. #endif
  373. #if !defined(darwin_TARGET_OS)
  374. -- And just because every other compiler does, lets stick in
  375. -- an identifier directive: .ident "GHC x.y.z"
  376. Pretty.$$ let compilerIdent = Pretty.text "GHC" Pretty.<+>
  377. Pretty.text cProjectVersion
  378. in Pretty.text ".ident" Pretty.<+>
  379. Pretty.doubleQuotes compilerIdent
  380. #endif
  381. where
  382. -- Generate "symbol stubs" for all external symbols that might
  383. -- come from a dynamic library.
  384. dyld_stubs :: [CLabel] -> Pretty.Doc
  385. {- dyld_stubs imps = Pretty.vcat $ map pprDyldSymbolStub $
  386. map head $ group $ sort imps-}
  387. arch = platformArch $ targetPlatform dflags
  388. os = platformOS $ targetPlatform dflags
  389. -- (Hack) sometimes two Labels pretty-print the same, but have
  390. -- different uniques; so we compare their text versions...
  391. dyld_stubs imps
  392. | needImportedSymbols arch os
  393. = Pretty.vcat $
  394. (pprGotDeclaration arch os :) $
  395. map ( pprImportedSymbol arch os . fst . head) $
  396. groupBy (\(_,a) (_,b) -> a == b) $
  397. sortBy (\(_,a) (_,b) -> compare a b) $
  398. map doPpr $
  399. imps
  400. | otherwise
  401. = Pretty.empty
  402. doPpr lbl = (lbl, Pretty.render $ pprCLabel lbl astyle)
  403. astyle = mkCodeStyle AsmStyle
  404. -- -----------------------------------------------------------------------------
  405. -- Sequencing the basic blocks
  406. -- Cmm BasicBlocks are self-contained entities: they always end in a
  407. -- jump, either non-local or to another basic block in the same proc.
  408. -- In this phase, we attempt to place the basic blocks in a sequence
  409. -- such that as many of the local jumps as possible turn into
  410. -- fallthroughs.
  411. sequenceTop
  412. :: NatCmmTop Instr
  413. -> NatCmmTop Instr
  414. sequenceTop top@(CmmData _ _) = top
  415. sequenceTop (CmmProc info lbl params (ListGraph blocks)) =
  416. CmmProc info lbl params (ListGraph $ makeFarBranches $ sequenceBlocks blocks)
  417. -- The algorithm is very simple (and stupid): we make a graph out of
  418. -- the blocks where there is an edge from one block to another iff the
  419. -- first block ends by jumping to the second. Then we topologically
  420. -- sort this graph. Then traverse the list: for each block, we first
  421. -- output the block, then if it has an out edge, we move the
  422. -- destination of the out edge to the front of the list, and continue.
  423. -- FYI, the classic layout for basic blocks uses postorder DFS; this
  424. -- algorithm is implemented in cmm/ZipCfg.hs (NR 6 Sep 2007).
  425. sequenceBlocks
  426. :: Instruction instr
  427. => [NatBasicBlock instr]
  428. -> [NatBasicBlock instr]
  429. sequenceBlocks [] = []
  430. sequenceBlocks (entry:blocks) =
  431. seqBlocks (mkNode entry : reverse (flattenSCCs (sccBlocks blocks)))
  432. -- the first block is the entry point ==> it must remain at the start.
  433. sccBlocks
  434. :: Instruction instr
  435. => [NatBasicBlock instr]
  436. -> [SCC ( NatBasicBlock instr
  437. , Unique
  438. , [Unique])]
  439. sccBlocks blocks = stronglyConnCompFromEdgedVerticesR (map mkNode blocks)
  440. -- we're only interested in the last instruction of
  441. -- the block, and only if it has a single destination.
  442. getOutEdges
  443. :: Instruction instr
  444. => [instr] -> [Unique]
  445. getOutEdges instrs
  446. = case jumpDestsOfInstr (last instrs) of
  447. [one] -> [getUnique one]
  448. _many -> []
  449. mkNode block@(BasicBlock id instrs) = (block, getUnique id, getOutEdges instrs)
  450. seqBlocks [] = []
  451. seqBlocks ((block,_,[]) : rest)
  452. = block : seqBlocks rest
  453. seqBlocks ((block@(BasicBlock id instrs),_,[next]) : rest)
  454. | can_fallthrough = BasicBlock id (init instrs) : seqBlocks rest'
  455. | otherwise = block : seqBlocks rest'
  456. where
  457. (can_fallthrough, rest') = reorder next [] rest
  458. -- TODO: we should do a better job for cycles; try to maximise the
  459. -- fallthroughs within a loop.
  460. seqBlocks _ = panic "AsmCodegen:seqBlocks"
  461. reorder id accum [] = (False, reverse accum)
  462. reorder id accum (b@(block,id',out) : rest)
  463. | id == id' = (True, (block,id,out) : reverse accum ++ rest)
  464. | otherwise = reorder id (b:accum) rest
  465. -- -----------------------------------------------------------------------------
  466. -- Making far branches
  467. -- Conditional branches on PowerPC are limited to +-32KB; if our Procs get too
  468. -- big, we have to work around this limitation.
  469. makeFarBranches
  470. :: [NatBasicBlock Instr]
  471. -> [NatBasicBlock Instr]
  472. #if powerpc_TARGET_ARCH
  473. makeFarBranches blocks
  474. | last blockAddresses < nearLimit = blocks
  475. | otherwise = zipWith handleBlock blockAddresses blocks
  476. where
  477. blockAddresses = scanl (+) 0 $ map blockLen blocks
  478. blockLen (BasicBlock _ instrs) = length instrs
  479. handleBlock addr (BasicBlock id instrs)
  480. = BasicBlock id (zipWith makeFar [addr..] instrs)
  481. makeFar _ (BCC ALWAYS tgt) = BCC ALWAYS tgt
  482. makeFar addr (BCC cond tgt)
  483. | abs (addr - targetAddr) >= nearLimit
  484. = BCCFAR cond tgt
  485. | otherwise
  486. = BCC cond tgt
  487. where Just targetAddr = lookupUFM blockAddressMap tgt
  488. makeFar _ other = other
  489. nearLimit = 7000 -- 8192 instructions are allowed; let's keep some
  490. -- distance, as we have a few pseudo-insns that are
  491. -- pretty-printed as multiple instructions,
  492. -- and it's just not worth the effort to calculate
  493. -- things exactly
  494. blockAddressMap = listToUFM $ zip (map blockId blocks) blockAddresses
  495. #else
  496. makeFarBranches = id
  497. #endif
  498. -- -----------------------------------------------------------------------------
  499. -- Shortcut branches
  500. shortcutBranches
  501. :: DynFlags
  502. -> [NatCmmTop Instr]
  503. -> [NatCmmTop Instr]
  504. shortcutBranches dflags tops
  505. | optLevel dflags < 1 = tops -- only with -O or higher
  506. | otherwise = map (apply_mapping mapping) tops'
  507. where
  508. (tops', mappings) = mapAndUnzip build_mapping tops
  509. mapping = foldr plusUFM emptyUFM mappings
  510. build_mapping top@(CmmData _ _) = (top, emptyUFM)
  511. build_mapping (CmmProc info lbl params (ListGraph []))
  512. = (CmmProc info lbl params (ListGraph []), emptyUFM)
  513. build_mapping (CmmProc info lbl params (ListGraph (head:blocks)))
  514. = (CmmProc info lbl params (ListGraph (head:others)), mapping)
  515. -- drop the shorted blocks, but don't ever drop the first one,
  516. -- because it is pointed to by a global label.
  517. where
  518. -- find all the blocks that just consist of a jump that can be
  519. -- shorted.
  520. -- Don't completely eliminate loops here -- that can leave a dangling jump!
  521. (_, shortcut_blocks, others) = foldl split (emptyBlockSet, [], []) blocks
  522. split (s, shortcut_blocks, others) b@(BasicBlock id [insn])
  523. | Just (DestBlockId dest) <- canShortcut insn,
  524. (elemBlockSet dest s) || dest == id -- loop checks
  525. = (s, shortcut_blocks, b : others)
  526. split (s, shortcut_blocks, others) (BasicBlock id [insn])
  527. | Just dest <- canShortcut insn
  528. = (extendBlockSet s id, (id,dest) : shortcut_blocks, others)
  529. split (s, shortcut_blocks, others) other = (s, shortcut_blocks, other : others)
  530. -- build a mapping from BlockId to JumpDest for shorting branches
  531. mapping = foldl add emptyUFM shortcut_blocks
  532. add ufm (id,dest) = addToUFM ufm id dest
  533. apply_mapping ufm (CmmData sec statics)
  534. = CmmData sec (map (shortcutStatic (lookupUFM ufm)) statics)
  535. -- we need to get the jump tables, so apply the mapping to the entries
  536. -- of a CmmData too.
  537. apply_mapping ufm (CmmProc info lbl params (ListGraph blocks))
  538. = CmmProc info lbl params (ListGraph $ map short_bb blocks)
  539. where
  540. short_bb (BasicBlock id insns) = BasicBlock id $! map short_insn insns
  541. short_insn i = shortcutJump (lookupUFM ufm) i
  542. -- shortcutJump should apply the mapping repeatedly,
  543. -- just in case we can short multiple branches.
  544. -- -----------------------------------------------------------------------------
  545. -- Instruction selection
  546. -- Native code instruction selection for a chunk of stix code. For
  547. -- this part of the computation, we switch from the UniqSM monad to
  548. -- the NatM monad. The latter carries not only a Unique, but also an
  549. -- Int denoting the current C stack pointer offset in the generated
  550. -- code; this is needed for creating correct spill offsets on
  551. -- architectures which don't offer, or for which it would be
  552. -- prohibitively expensive to employ, a frame pointer register. Viz,
  553. -- x86.
  554. -- The offset is measured in bytes, and indicates the difference
  555. -- between the current (simulated) C stack-ptr and the value it was at
  556. -- the beginning of the block. For stacks which grow down, this value
  557. -- should be either zero or negative.
  558. -- Switching between the two monads whilst carrying along the same
  559. -- Unique supply breaks abstraction. Is that bad?
  560. genMachCode
  561. :: DynFlags
  562. -> RawCmmTop
  563. -> UniqSM
  564. ( [NatCmmTop Instr]
  565. , [CLabel])
  566. genMachCode dflags cmm_top
  567. = do { initial_us <- getUs
  568. ; let initial_st = mkNatM_State initial_us 0 dflags
  569. (new_tops, final_st) = initNat initial_st (cmmTopCodeGen dflags cmm_top)
  570. final_delta = natm_delta final_st
  571. final_imports = natm_imports final_st
  572. ; if final_delta == 0
  573. then return (new_tops, final_imports)
  574. else pprPanic "genMachCode: nonzero final delta" (int final_delta)
  575. }
  576. -- -----------------------------------------------------------------------------
  577. -- Generic Cmm optimiser
  578. {-
  579. Here we do:
  580. (a) Constant folding
  581. (b) Simple inlining: a temporary which is assigned to and then
  582. used, once, can be shorted.
  583. (c) Position independent code and dynamic linking
  584. (i) introduce the appropriate indirections
  585. and position independent refs
  586. (ii) compile a list of imported symbols
  587. Ideas for other things we could do (ToDo):
  588. - shortcut jumps-to-jumps
  589. - eliminate dead code blocks
  590. - simple CSE: if an expr is assigned to a temp, then replace later occs of
  591. that expr with the temp, until the expr is no longer valid (can push through
  592. temp assignments, and certain assigns to mem...)
  593. -}
  594. cmmToCmm :: DynFlags -> RawCmmTop -> (RawCmmTop, [CLabel])
  595. cmmToCmm _ top@(CmmData _ _) = (top, [])
  596. cmmToCmm dflags (CmmProc info lbl params (ListGraph blocks)) = runCmmOpt dflags $ do
  597. blocks' <- mapM cmmBlockConFold (cmmMiniInline blocks)
  598. return $ CmmProc info lbl params (ListGraph blocks')
  599. newtype CmmOptM a = CmmOptM (([CLabel], DynFlags) -> (# a, [CLabel] #))
  600. instance Monad CmmOptM where
  601. return x = CmmOptM $ \(imports, _) -> (# x,imports #)
  602. (CmmOptM f) >>= g =
  603. CmmOptM $ \(imports, dflags) ->
  604. case f (imports, dflags) of
  605. (# x, imports' #) ->
  606. case g x of
  607. CmmOptM g' -> g' (imports', dflags)
  608. addImportCmmOpt :: CLabel -> CmmOptM ()
  609. addImportCmmOpt lbl = CmmOptM $ \(imports, dflags) -> (# (), lbl:imports #)
  610. getDynFlagsCmmOpt :: CmmOptM DynFlags
  611. getDynFlagsCmmOpt = CmmOptM $ \(imports, dflags) -> (# dflags, imports #)
  612. runCmmOpt :: DynFlags -> CmmOptM a -> (a, [CLabel])
  613. runCmmOpt dflags (CmmOptM f) = case f ([], dflags) of
  614. (# result, imports #) -> (result, imports)
  615. cmmBlockConFold :: CmmBasicBlock -> CmmOptM CmmBasicBlock
  616. cmmBlockConFold (BasicBlock id stmts) = do
  617. stmts' <- mapM cmmStmtConFold stmts
  618. return $ BasicBlock id stmts'
  619. cmmStmtConFold stmt
  620. = case stmt of
  621. CmmAssign reg src
  622. -> do src' <- cmmExprConFold DataReference src
  623. return $ case src' of
  624. CmmReg reg' | reg == reg' -> CmmNop
  625. new_src -> CmmAssign reg new_src
  626. CmmStore addr src
  627. -> do addr' <- cmmExprConFold DataReference addr
  628. src' <- cmmExprConFold DataReference src
  629. return $ CmmStore addr' src'
  630. CmmJump addr regs
  631. -> do addr' <- cmmExprConFold JumpReference addr
  632. return $ CmmJump addr' regs
  633. CmmCall target regs args srt returns
  634. -> do target' <- case target of
  635. CmmCallee e conv -> do
  636. e' <- cmmExprConFold CallReference e
  637. return $ CmmCallee e' conv
  638. other -> return other
  639. args' <- mapM (\(CmmHinted arg hint) -> do
  640. arg' <- cmmExprConFold DataReference arg
  641. return (CmmHinted arg' hint)) args
  642. return $ CmmCall target' regs args' srt returns
  643. CmmCondBranch test dest
  644. -> do test' <- cmmExprConFold DataReference test
  645. return $ case test' of
  646. CmmLit (CmmInt 0 _) ->
  647. CmmComment (mkFastString ("deleted: " ++
  648. showSDoc (pprStmt stmt)))
  649. CmmLit (CmmInt n _) -> CmmBranch dest
  650. other -> CmmCondBranch test' dest
  651. CmmSwitch expr ids
  652. -> do expr' <- cmmExprConFold DataReference expr
  653. return $ CmmSwitch expr' ids
  654. other
  655. -> return other
  656. cmmExprConFold referenceKind expr
  657. = case expr of
  658. CmmLoad addr rep
  659. -> do addr' <- cmmExprConFold DataReference addr
  660. return $ CmmLoad addr' rep
  661. CmmMachOp mop args
  662. -- For MachOps, we first optimize the children, and then we try
  663. -- our hand at some constant-folding.
  664. -> do args' <- mapM (cmmExprConFold DataReference) args
  665. return $ cmmMachOpFold mop args'
  666. CmmLit (CmmLabel lbl)
  667. -> do
  668. dflags <- getDynFlagsCmmOpt
  669. cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
  670. CmmLit (CmmLabelOff lbl off)
  671. -> do
  672. dflags <- getDynFlagsCmmOpt
  673. dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
  674. return $ cmmMachOpFold (MO_Add wordWidth) [
  675. dynRef,
  676. (CmmLit $ CmmInt (fromIntegral off) wordWidth)
  677. ]
  678. #if powerpc_TARGET_ARCH
  679. -- On powerpc (non-PIC), it's easier to jump directly to a label than
  680. -- to use the register table, so we replace these registers
  681. -- with the corresponding labels:
  682. CmmReg (CmmGlobal EagerBlackholeInfo)
  683. | not opt_PIC
  684. -> cmmExprConFold referenceKind $
  685. CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_EAGER_BLACKHOLE_info")))
  686. CmmReg (CmmGlobal GCEnter1)
  687. | not opt_PIC
  688. -> cmmExprConFold referenceKind $
  689. CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_enter_1")))
  690. CmmReg (CmmGlobal GCFun)
  691. | not opt_PIC
  692. -> cmmExprConFold referenceKind $
  693. CmmLit (CmmLabel (mkCmmCodeLabel rtsPackageId (fsLit "__stg_gc_fun")))
  694. #endif
  695. other
  696. -> return other
  697. \end{code}