PageRenderTime 46ms CodeModel.GetById 14ms RepoModel.GetById 0ms app.codeStats 2ms

/mingw-w64-v2.0.999/gcc/src/gcc/config/alpha/alpha.c

#
C | 9837 lines | 7066 code | 1485 blank | 1286 comment | 1909 complexity | cec2b962a92c448b30d79200c3f77735 MD5 | raw file
Possible License(s): LGPL-2.1, AGPL-1.0, LGPL-3.0, Unlicense, GPL-2.0, LGPL-2.0, BSD-3-Clause, GPL-3.0
  1. /* Subroutines used for code generation on the DEC Alpha.
  2. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
  3. 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
  4. Free Software Foundation, Inc.
  5. Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
  6. This file is part of GCC.
  7. GCC is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 3, or (at your option)
  10. any later version.
  11. GCC is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with GCC; see the file COPYING3. If not see
  17. <http://www.gnu.org/licenses/>. */
  18. #include "config.h"
  19. #include "system.h"
  20. #include "coretypes.h"
  21. #include "tm.h"
  22. #include "rtl.h"
  23. #include "tree.h"
  24. #include "regs.h"
  25. #include "hard-reg-set.h"
  26. #include "insn-config.h"
  27. #include "conditions.h"
  28. #include "output.h"
  29. #include "insn-attr.h"
  30. #include "flags.h"
  31. #include "recog.h"
  32. #include "expr.h"
  33. #include "optabs.h"
  34. #include "reload.h"
  35. #include "obstack.h"
  36. #include "except.h"
  37. #include "function.h"
  38. #include "diagnostic-core.h"
  39. #include "ggc.h"
  40. #include "tm_p.h"
  41. #include "target.h"
  42. #include "target-def.h"
  43. #include "common/common-target.h"
  44. #include "debug.h"
  45. #include "langhooks.h"
  46. #include "splay-tree.h"
  47. #include "gimple.h"
  48. #include "tree-flow.h"
  49. #include "tree-stdarg.h"
  50. #include "tm-constrs.h"
  51. #include "df.h"
  52. #include "libfuncs.h"
  53. #include "opts.h"
  54. #include "params.h"
  55. /* Specify which cpu to schedule for. */
  56. enum processor_type alpha_tune;
  57. /* Which cpu we're generating code for. */
  58. enum processor_type alpha_cpu;
  59. static const char * const alpha_cpu_name[] =
  60. {
  61. "ev4", "ev5", "ev6"
  62. };
  63. /* Specify how accurate floating-point traps need to be. */
  64. enum alpha_trap_precision alpha_tp;
  65. /* Specify the floating-point rounding mode. */
  66. enum alpha_fp_rounding_mode alpha_fprm;
  67. /* Specify which things cause traps. */
  68. enum alpha_fp_trap_mode alpha_fptm;
  69. /* Nonzero if inside of a function, because the Alpha asm can't
  70. handle .files inside of functions. */
  71. static int inside_function = FALSE;
  72. /* The number of cycles of latency we should assume on memory reads. */
  73. int alpha_memory_latency = 3;
  74. /* Whether the function needs the GP. */
  75. static int alpha_function_needs_gp;
  76. /* The assembler name of the current function. */
  77. static const char *alpha_fnname;
  78. /* The next explicit relocation sequence number. */
  79. extern GTY(()) int alpha_next_sequence_number;
  80. int alpha_next_sequence_number = 1;
  81. /* The literal and gpdisp sequence numbers for this insn, as printed
  82. by %# and %* respectively. */
  83. extern GTY(()) int alpha_this_literal_sequence_number;
  84. extern GTY(()) int alpha_this_gpdisp_sequence_number;
  85. int alpha_this_literal_sequence_number;
  86. int alpha_this_gpdisp_sequence_number;
  87. /* Costs of various operations on the different architectures. */
  88. struct alpha_rtx_cost_data
  89. {
  90. unsigned char fp_add;
  91. unsigned char fp_mult;
  92. unsigned char fp_div_sf;
  93. unsigned char fp_div_df;
  94. unsigned char int_mult_si;
  95. unsigned char int_mult_di;
  96. unsigned char int_shift;
  97. unsigned char int_cmov;
  98. unsigned short int_div;
  99. };
  100. static struct alpha_rtx_cost_data const alpha_rtx_cost_data[PROCESSOR_MAX] =
  101. {
  102. { /* EV4 */
  103. COSTS_N_INSNS (6), /* fp_add */
  104. COSTS_N_INSNS (6), /* fp_mult */
  105. COSTS_N_INSNS (34), /* fp_div_sf */
  106. COSTS_N_INSNS (63), /* fp_div_df */
  107. COSTS_N_INSNS (23), /* int_mult_si */
  108. COSTS_N_INSNS (23), /* int_mult_di */
  109. COSTS_N_INSNS (2), /* int_shift */
  110. COSTS_N_INSNS (2), /* int_cmov */
  111. COSTS_N_INSNS (97), /* int_div */
  112. },
  113. { /* EV5 */
  114. COSTS_N_INSNS (4), /* fp_add */
  115. COSTS_N_INSNS (4), /* fp_mult */
  116. COSTS_N_INSNS (15), /* fp_div_sf */
  117. COSTS_N_INSNS (22), /* fp_div_df */
  118. COSTS_N_INSNS (8), /* int_mult_si */
  119. COSTS_N_INSNS (12), /* int_mult_di */
  120. COSTS_N_INSNS (1) + 1, /* int_shift */
  121. COSTS_N_INSNS (1), /* int_cmov */
  122. COSTS_N_INSNS (83), /* int_div */
  123. },
  124. { /* EV6 */
  125. COSTS_N_INSNS (4), /* fp_add */
  126. COSTS_N_INSNS (4), /* fp_mult */
  127. COSTS_N_INSNS (12), /* fp_div_sf */
  128. COSTS_N_INSNS (15), /* fp_div_df */
  129. COSTS_N_INSNS (7), /* int_mult_si */
  130. COSTS_N_INSNS (7), /* int_mult_di */
  131. COSTS_N_INSNS (1), /* int_shift */
  132. COSTS_N_INSNS (2), /* int_cmov */
  133. COSTS_N_INSNS (86), /* int_div */
  134. },
  135. };
  136. /* Similar but tuned for code size instead of execution latency. The
  137. extra +N is fractional cost tuning based on latency. It's used to
  138. encourage use of cheaper insns like shift, but only if there's just
  139. one of them. */
  140. static struct alpha_rtx_cost_data const alpha_rtx_cost_size =
  141. {
  142. COSTS_N_INSNS (1), /* fp_add */
  143. COSTS_N_INSNS (1), /* fp_mult */
  144. COSTS_N_INSNS (1), /* fp_div_sf */
  145. COSTS_N_INSNS (1) + 1, /* fp_div_df */
  146. COSTS_N_INSNS (1) + 1, /* int_mult_si */
  147. COSTS_N_INSNS (1) + 2, /* int_mult_di */
  148. COSTS_N_INSNS (1), /* int_shift */
  149. COSTS_N_INSNS (1), /* int_cmov */
  150. COSTS_N_INSNS (6), /* int_div */
  151. };
  152. /* Get the number of args of a function in one of two ways. */
  153. #if TARGET_ABI_OPEN_VMS
  154. #define NUM_ARGS crtl->args.info.num_args
  155. #else
  156. #define NUM_ARGS crtl->args.info
  157. #endif
  158. #define REG_PV 27
  159. #define REG_RA 26
  160. /* Declarations of static functions. */
  161. static struct machine_function *alpha_init_machine_status (void);
  162. static rtx alpha_emit_xfloating_compare (enum rtx_code *, rtx, rtx);
  163. #if TARGET_ABI_OPEN_VMS
  164. static void alpha_write_linkage (FILE *, const char *);
  165. static bool vms_valid_pointer_mode (enum machine_mode);
  166. #else
  167. #define vms_patch_builtins() gcc_unreachable()
  168. #endif
  169. #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
  170. /* Implement TARGET_MANGLE_TYPE. */
  171. static const char *
  172. alpha_mangle_type (const_tree type)
  173. {
  174. if (TYPE_MAIN_VARIANT (type) == long_double_type_node
  175. && TARGET_LONG_DOUBLE_128)
  176. return "g";
  177. /* For all other types, use normal C++ mangling. */
  178. return NULL;
  179. }
  180. #endif
  181. /* Parse target option strings. */
  182. static void
  183. alpha_option_override (void)
  184. {
  185. static const struct cpu_table {
  186. const char *const name;
  187. const enum processor_type processor;
  188. const int flags;
  189. const unsigned short line_size; /* in bytes */
  190. const unsigned short l1_size; /* in kb. */
  191. const unsigned short l2_size; /* in kb. */
  192. } cpu_table[] = {
  193. /* EV4/LCA45 had 8k L1 caches; EV45 had 16k L1 caches.
  194. EV4/EV45 had 128k to 16M 32-byte direct Bcache. LCA45
  195. had 64k to 8M 8-byte direct Bcache. */
  196. { "ev4", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
  197. { "21064", PROCESSOR_EV4, 0, 32, 8, 8*1024 },
  198. { "ev45", PROCESSOR_EV4, 0, 32, 16, 16*1024 },
  199. /* EV5 or EV56 had 8k 32 byte L1, 96k 32 or 64 byte L2,
  200. and 1M to 16M 64 byte L3 (not modeled).
  201. PCA56 had 16k 64-byte cache; PCA57 had 32k Icache.
  202. PCA56 had 8k 64-byte cache; PCA57 had 16k Dcache. */
  203. { "ev5", PROCESSOR_EV5, 0, 32, 8, 96 },
  204. { "21164", PROCESSOR_EV5, 0, 32, 8, 96 },
  205. { "ev56", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
  206. { "21164a", PROCESSOR_EV5, MASK_BWX, 32, 8, 96 },
  207. { "pca56", PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
  208. { "21164PC",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
  209. { "21164pc",PROCESSOR_EV5, MASK_BWX|MASK_MAX, 64, 16, 4*1024 },
  210. /* EV6 had 64k 64 byte L1, 1M to 16M Bcache. */
  211. { "ev6", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
  212. { "21264", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX, 64, 64, 16*1024 },
  213. { "ev67", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
  214. 64, 64, 16*1024 },
  215. { "21264a", PROCESSOR_EV6, MASK_BWX|MASK_MAX|MASK_FIX|MASK_CIX,
  216. 64, 64, 16*1024 }
  217. };
  218. int const ct_size = ARRAY_SIZE (cpu_table);
  219. int line_size = 0, l1_size = 0, l2_size = 0;
  220. int i;
  221. #ifdef SUBTARGET_OVERRIDE_OPTIONS
  222. SUBTARGET_OVERRIDE_OPTIONS;
  223. #endif
  224. /* Default to full IEEE compliance mode for Go language. */
  225. if (strcmp (lang_hooks.name, "GNU Go") == 0
  226. && !(target_flags_explicit & MASK_IEEE))
  227. target_flags |= MASK_IEEE;
  228. alpha_fprm = ALPHA_FPRM_NORM;
  229. alpha_tp = ALPHA_TP_PROG;
  230. alpha_fptm = ALPHA_FPTM_N;
  231. if (TARGET_IEEE)
  232. {
  233. alpha_tp = ALPHA_TP_INSN;
  234. alpha_fptm = ALPHA_FPTM_SU;
  235. }
  236. if (TARGET_IEEE_WITH_INEXACT)
  237. {
  238. alpha_tp = ALPHA_TP_INSN;
  239. alpha_fptm = ALPHA_FPTM_SUI;
  240. }
  241. if (alpha_tp_string)
  242. {
  243. if (! strcmp (alpha_tp_string, "p"))
  244. alpha_tp = ALPHA_TP_PROG;
  245. else if (! strcmp (alpha_tp_string, "f"))
  246. alpha_tp = ALPHA_TP_FUNC;
  247. else if (! strcmp (alpha_tp_string, "i"))
  248. alpha_tp = ALPHA_TP_INSN;
  249. else
  250. error ("bad value %qs for -mtrap-precision switch", alpha_tp_string);
  251. }
  252. if (alpha_fprm_string)
  253. {
  254. if (! strcmp (alpha_fprm_string, "n"))
  255. alpha_fprm = ALPHA_FPRM_NORM;
  256. else if (! strcmp (alpha_fprm_string, "m"))
  257. alpha_fprm = ALPHA_FPRM_MINF;
  258. else if (! strcmp (alpha_fprm_string, "c"))
  259. alpha_fprm = ALPHA_FPRM_CHOP;
  260. else if (! strcmp (alpha_fprm_string,"d"))
  261. alpha_fprm = ALPHA_FPRM_DYN;
  262. else
  263. error ("bad value %qs for -mfp-rounding-mode switch",
  264. alpha_fprm_string);
  265. }
  266. if (alpha_fptm_string)
  267. {
  268. if (strcmp (alpha_fptm_string, "n") == 0)
  269. alpha_fptm = ALPHA_FPTM_N;
  270. else if (strcmp (alpha_fptm_string, "u") == 0)
  271. alpha_fptm = ALPHA_FPTM_U;
  272. else if (strcmp (alpha_fptm_string, "su") == 0)
  273. alpha_fptm = ALPHA_FPTM_SU;
  274. else if (strcmp (alpha_fptm_string, "sui") == 0)
  275. alpha_fptm = ALPHA_FPTM_SUI;
  276. else
  277. error ("bad value %qs for -mfp-trap-mode switch", alpha_fptm_string);
  278. }
  279. if (alpha_cpu_string)
  280. {
  281. for (i = 0; i < ct_size; i++)
  282. if (! strcmp (alpha_cpu_string, cpu_table [i].name))
  283. {
  284. alpha_tune = alpha_cpu = cpu_table[i].processor;
  285. line_size = cpu_table[i].line_size;
  286. l1_size = cpu_table[i].l1_size;
  287. l2_size = cpu_table[i].l2_size;
  288. target_flags &= ~ (MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX);
  289. target_flags |= cpu_table[i].flags;
  290. break;
  291. }
  292. if (i == ct_size)
  293. error ("bad value %qs for -mcpu switch", alpha_cpu_string);
  294. }
  295. if (alpha_tune_string)
  296. {
  297. for (i = 0; i < ct_size; i++)
  298. if (! strcmp (alpha_tune_string, cpu_table [i].name))
  299. {
  300. alpha_tune = cpu_table[i].processor;
  301. line_size = cpu_table[i].line_size;
  302. l1_size = cpu_table[i].l1_size;
  303. l2_size = cpu_table[i].l2_size;
  304. break;
  305. }
  306. if (i == ct_size)
  307. error ("bad value %qs for -mtune switch", alpha_tune_string);
  308. }
  309. if (line_size)
  310. maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
  311. global_options.x_param_values,
  312. global_options_set.x_param_values);
  313. if (l1_size)
  314. maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
  315. global_options.x_param_values,
  316. global_options_set.x_param_values);
  317. if (l2_size)
  318. maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
  319. global_options.x_param_values,
  320. global_options_set.x_param_values);
  321. /* Do some sanity checks on the above options. */
  322. if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
  323. && alpha_tp != ALPHA_TP_INSN && alpha_cpu != PROCESSOR_EV6)
  324. {
  325. warning (0, "fp software completion requires -mtrap-precision=i");
  326. alpha_tp = ALPHA_TP_INSN;
  327. }
  328. if (alpha_cpu == PROCESSOR_EV6)
  329. {
  330. /* Except for EV6 pass 1 (not released), we always have precise
  331. arithmetic traps. Which means we can do software completion
  332. without minding trap shadows. */
  333. alpha_tp = ALPHA_TP_PROG;
  334. }
  335. if (TARGET_FLOAT_VAX)
  336. {
  337. if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
  338. {
  339. warning (0, "rounding mode not supported for VAX floats");
  340. alpha_fprm = ALPHA_FPRM_NORM;
  341. }
  342. if (alpha_fptm == ALPHA_FPTM_SUI)
  343. {
  344. warning (0, "trap mode not supported for VAX floats");
  345. alpha_fptm = ALPHA_FPTM_SU;
  346. }
  347. if (target_flags_explicit & MASK_LONG_DOUBLE_128)
  348. warning (0, "128-bit long double not supported for VAX floats");
  349. target_flags &= ~MASK_LONG_DOUBLE_128;
  350. }
  351. {
  352. char *end;
  353. int lat;
  354. if (!alpha_mlat_string)
  355. alpha_mlat_string = "L1";
  356. if (ISDIGIT ((unsigned char)alpha_mlat_string[0])
  357. && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
  358. ;
  359. else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
  360. && ISDIGIT ((unsigned char)alpha_mlat_string[1])
  361. && alpha_mlat_string[2] == '\0')
  362. {
  363. static int const cache_latency[][4] =
  364. {
  365. { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
  366. { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
  367. { 3, 12, 30 }, /* ev6 -- Bcache from DS20 LMbench. */
  368. };
  369. lat = alpha_mlat_string[1] - '0';
  370. if (lat <= 0 || lat > 3 || cache_latency[alpha_tune][lat-1] == -1)
  371. {
  372. warning (0, "L%d cache latency unknown for %s",
  373. lat, alpha_cpu_name[alpha_tune]);
  374. lat = 3;
  375. }
  376. else
  377. lat = cache_latency[alpha_tune][lat-1];
  378. }
  379. else if (! strcmp (alpha_mlat_string, "main"))
  380. {
  381. /* Most current memories have about 370ns latency. This is
  382. a reasonable guess for a fast cpu. */
  383. lat = 150;
  384. }
  385. else
  386. {
  387. warning (0, "bad value %qs for -mmemory-latency", alpha_mlat_string);
  388. lat = 3;
  389. }
  390. alpha_memory_latency = lat;
  391. }
  392. /* Default the definition of "small data" to 8 bytes. */
  393. if (!global_options_set.x_g_switch_value)
  394. g_switch_value = 8;
  395. /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */
  396. if (flag_pic == 1)
  397. target_flags |= MASK_SMALL_DATA;
  398. else if (flag_pic == 2)
  399. target_flags &= ~MASK_SMALL_DATA;
  400. /* Align labels and loops for optimal branching. */
  401. /* ??? Kludge these by not doing anything if we don't optimize. */
  402. if (optimize > 0)
  403. {
  404. if (align_loops <= 0)
  405. align_loops = 16;
  406. if (align_jumps <= 0)
  407. align_jumps = 16;
  408. }
  409. if (align_functions <= 0)
  410. align_functions = 16;
  411. /* Register variables and functions with the garbage collector. */
  412. /* Set up function hooks. */
  413. init_machine_status = alpha_init_machine_status;
  414. /* Tell the compiler when we're using VAX floating point. */
  415. if (TARGET_FLOAT_VAX)
  416. {
  417. REAL_MODE_FORMAT (SFmode) = &vax_f_format;
  418. REAL_MODE_FORMAT (DFmode) = &vax_g_format;
  419. REAL_MODE_FORMAT (TFmode) = NULL;
  420. }
  421. #ifdef TARGET_DEFAULT_LONG_DOUBLE_128
  422. if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
  423. target_flags |= MASK_LONG_DOUBLE_128;
  424. #endif
  425. }
  426. /* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
  427. int
  428. zap_mask (HOST_WIDE_INT value)
  429. {
  430. int i;
  431. for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
  432. i++, value >>= 8)
  433. if ((value & 0xff) != 0 && (value & 0xff) != 0xff)
  434. return 0;
  435. return 1;
  436. }
  437. /* Return true if OP is valid for a particular TLS relocation.
  438. We are already guaranteed that OP is a CONST. */
  439. int
  440. tls_symbolic_operand_1 (rtx op, int size, int unspec)
  441. {
  442. op = XEXP (op, 0);
  443. if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec)
  444. return 0;
  445. op = XVECEXP (op, 0, 0);
  446. if (GET_CODE (op) != SYMBOL_REF)
  447. return 0;
  448. switch (SYMBOL_REF_TLS_MODEL (op))
  449. {
  450. case TLS_MODEL_LOCAL_DYNAMIC:
  451. return unspec == UNSPEC_DTPREL && size == alpha_tls_size;
  452. case TLS_MODEL_INITIAL_EXEC:
  453. return unspec == UNSPEC_TPREL && size == 64;
  454. case TLS_MODEL_LOCAL_EXEC:
  455. return unspec == UNSPEC_TPREL && size == alpha_tls_size;
  456. default:
  457. gcc_unreachable ();
  458. }
  459. }
  460. /* Used by aligned_memory_operand and unaligned_memory_operand to
  461. resolve what reload is going to do with OP if it's a register. */
  462. rtx
  463. resolve_reload_operand (rtx op)
  464. {
  465. if (reload_in_progress)
  466. {
  467. rtx tmp = op;
  468. if (GET_CODE (tmp) == SUBREG)
  469. tmp = SUBREG_REG (tmp);
  470. if (REG_P (tmp)
  471. && REGNO (tmp) >= FIRST_PSEUDO_REGISTER)
  472. {
  473. op = reg_equiv_memory_loc (REGNO (tmp));
  474. if (op == 0)
  475. return 0;
  476. }
  477. }
  478. return op;
  479. }
  480. /* The scalar modes supported differs from the default check-what-c-supports
  481. version in that sometimes TFmode is available even when long double
  482. indicates only DFmode. */
  483. static bool
  484. alpha_scalar_mode_supported_p (enum machine_mode mode)
  485. {
  486. switch (mode)
  487. {
  488. case QImode:
  489. case HImode:
  490. case SImode:
  491. case DImode:
  492. case TImode: /* via optabs.c */
  493. return true;
  494. case SFmode:
  495. case DFmode:
  496. return true;
  497. case TFmode:
  498. return TARGET_HAS_XFLOATING_LIBS;
  499. default:
  500. return false;
  501. }
  502. }
  503. /* Alpha implements a couple of integer vector mode operations when
  504. TARGET_MAX is enabled. We do not check TARGET_MAX here, however,
  505. which allows the vectorizer to operate on e.g. move instructions,
  506. or when expand_vector_operations can do something useful. */
  507. static bool
  508. alpha_vector_mode_supported_p (enum machine_mode mode)
  509. {
  510. return mode == V8QImode || mode == V4HImode || mode == V2SImode;
  511. }
  512. /* Return 1 if this function can directly return via $26. */
  513. int
  514. direct_return (void)
  515. {
  516. return (TARGET_ABI_OSF
  517. && reload_completed
  518. && alpha_sa_size () == 0
  519. && get_frame_size () == 0
  520. && crtl->outgoing_args_size == 0
  521. && crtl->args.pretend_args_size == 0);
  522. }
  523. /* Return the TLS model to use for SYMBOL. */
  524. static enum tls_model
  525. tls_symbolic_operand_type (rtx symbol)
  526. {
  527. enum tls_model model;
  528. if (GET_CODE (symbol) != SYMBOL_REF)
  529. return TLS_MODEL_NONE;
  530. model = SYMBOL_REF_TLS_MODEL (symbol);
  531. /* Local-exec with a 64-bit size is the same code as initial-exec. */
  532. if (model == TLS_MODEL_LOCAL_EXEC && alpha_tls_size == 64)
  533. model = TLS_MODEL_INITIAL_EXEC;
  534. return model;
  535. }
  536. /* Return true if the function DECL will share the same GP as any
  537. function in the current unit of translation. */
  538. static bool
  539. decl_has_samegp (const_tree decl)
  540. {
  541. /* Functions that are not local can be overridden, and thus may
  542. not share the same gp. */
  543. if (!(*targetm.binds_local_p) (decl))
  544. return false;
  545. /* If -msmall-data is in effect, assume that there is only one GP
  546. for the module, and so any local symbol has this property. We
  547. need explicit relocations to be able to enforce this for symbols
  548. not defined in this unit of translation, however. */
  549. if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA)
  550. return true;
  551. /* Functions that are not external are defined in this UoT. */
  552. /* ??? Irritatingly, static functions not yet emitted are still
  553. marked "external". Apply this to non-static functions only. */
  554. return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl);
  555. }
  556. /* Return true if EXP should be placed in the small data section. */
  557. static bool
  558. alpha_in_small_data_p (const_tree exp)
  559. {
  560. /* We want to merge strings, so we never consider them small data. */
  561. if (TREE_CODE (exp) == STRING_CST)
  562. return false;
  563. /* Functions are never in the small data area. Duh. */
  564. if (TREE_CODE (exp) == FUNCTION_DECL)
  565. return false;
  566. if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
  567. {
  568. const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
  569. if (strcmp (section, ".sdata") == 0
  570. || strcmp (section, ".sbss") == 0)
  571. return true;
  572. }
  573. else
  574. {
  575. HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
  576. /* If this is an incomplete type with size 0, then we can't put it
  577. in sdata because it might be too big when completed. */
  578. if (size > 0 && size <= g_switch_value)
  579. return true;
  580. }
  581. return false;
  582. }
  583. #if TARGET_ABI_OPEN_VMS
  584. static bool
  585. vms_valid_pointer_mode (enum machine_mode mode)
  586. {
  587. return (mode == SImode || mode == DImode);
  588. }
  589. static bool
  590. alpha_linkage_symbol_p (const char *symname)
  591. {
  592. int symlen = strlen (symname);
  593. if (symlen > 4)
  594. return strcmp (&symname [symlen - 4], "..lk") == 0;
  595. return false;
  596. }
  597. #define LINKAGE_SYMBOL_REF_P(X) \
  598. ((GET_CODE (X) == SYMBOL_REF \
  599. && alpha_linkage_symbol_p (XSTR (X, 0))) \
  600. || (GET_CODE (X) == CONST \
  601. && GET_CODE (XEXP (X, 0)) == PLUS \
  602. && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF \
  603. && alpha_linkage_symbol_p (XSTR (XEXP (XEXP (X, 0), 0), 0))))
  604. #endif
  605. /* legitimate_address_p recognizes an RTL expression that is a valid
  606. memory address for an instruction. The MODE argument is the
  607. machine mode for the MEM expression that wants to use this address.
  608. For Alpha, we have either a constant address or the sum of a
  609. register and a constant address, or just a register. For DImode,
  610. any of those forms can be surrounded with an AND that clear the
  611. low-order three bits; this is an "unaligned" access. */
  612. static bool
  613. alpha_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
  614. {
  615. /* If this is an ldq_u type address, discard the outer AND. */
  616. if (mode == DImode
  617. && GET_CODE (x) == AND
  618. && CONST_INT_P (XEXP (x, 1))
  619. && INTVAL (XEXP (x, 1)) == -8)
  620. x = XEXP (x, 0);
  621. /* Discard non-paradoxical subregs. */
  622. if (GET_CODE (x) == SUBREG
  623. && (GET_MODE_SIZE (GET_MODE (x))
  624. < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
  625. x = SUBREG_REG (x);
  626. /* Unadorned general registers are valid. */
  627. if (REG_P (x)
  628. && (strict
  629. ? STRICT_REG_OK_FOR_BASE_P (x)
  630. : NONSTRICT_REG_OK_FOR_BASE_P (x)))
  631. return true;
  632. /* Constant addresses (i.e. +/- 32k) are valid. */
  633. if (CONSTANT_ADDRESS_P (x))
  634. return true;
  635. #if TARGET_ABI_OPEN_VMS
  636. if (LINKAGE_SYMBOL_REF_P (x))
  637. return true;
  638. #endif
  639. /* Register plus a small constant offset is valid. */
  640. if (GET_CODE (x) == PLUS)
  641. {
  642. rtx ofs = XEXP (x, 1);
  643. x = XEXP (x, 0);
  644. /* Discard non-paradoxical subregs. */
  645. if (GET_CODE (x) == SUBREG
  646. && (GET_MODE_SIZE (GET_MODE (x))
  647. < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
  648. x = SUBREG_REG (x);
  649. if (REG_P (x))
  650. {
  651. if (! strict
  652. && NONSTRICT_REG_OK_FP_BASE_P (x)
  653. && CONST_INT_P (ofs))
  654. return true;
  655. if ((strict
  656. ? STRICT_REG_OK_FOR_BASE_P (x)
  657. : NONSTRICT_REG_OK_FOR_BASE_P (x))
  658. && CONSTANT_ADDRESS_P (ofs))
  659. return true;
  660. }
  661. }
  662. /* If we're managing explicit relocations, LO_SUM is valid, as are small
  663. data symbols. Avoid explicit relocations of modes larger than word
  664. mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */
  665. else if (TARGET_EXPLICIT_RELOCS
  666. && GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
  667. {
  668. if (small_symbolic_operand (x, Pmode))
  669. return true;
  670. if (GET_CODE (x) == LO_SUM)
  671. {
  672. rtx ofs = XEXP (x, 1);
  673. x = XEXP (x, 0);
  674. /* Discard non-paradoxical subregs. */
  675. if (GET_CODE (x) == SUBREG
  676. && (GET_MODE_SIZE (GET_MODE (x))
  677. < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
  678. x = SUBREG_REG (x);
  679. /* Must have a valid base register. */
  680. if (! (REG_P (x)
  681. && (strict
  682. ? STRICT_REG_OK_FOR_BASE_P (x)
  683. : NONSTRICT_REG_OK_FOR_BASE_P (x))))
  684. return false;
  685. /* The symbol must be local. */
  686. if (local_symbolic_operand (ofs, Pmode)
  687. || dtp32_symbolic_operand (ofs, Pmode)
  688. || tp32_symbolic_operand (ofs, Pmode))
  689. return true;
  690. }
  691. }
  692. return false;
  693. }
  694. /* Build the SYMBOL_REF for __tls_get_addr. */
  695. static GTY(()) rtx tls_get_addr_libfunc;
  696. static rtx
  697. get_tls_get_addr (void)
  698. {
  699. if (!tls_get_addr_libfunc)
  700. tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
  701. return tls_get_addr_libfunc;
  702. }
  703. /* Try machine-dependent ways of modifying an illegitimate address
  704. to be legitimate. If we find one, return the new, valid address. */
  705. static rtx
  706. alpha_legitimize_address_1 (rtx x, rtx scratch, enum machine_mode mode)
  707. {
  708. HOST_WIDE_INT addend;
  709. /* If the address is (plus reg const_int) and the CONST_INT is not a
  710. valid offset, compute the high part of the constant and add it to
  711. the register. Then our address is (plus temp low-part-const). */
  712. if (GET_CODE (x) == PLUS
  713. && REG_P (XEXP (x, 0))
  714. && CONST_INT_P (XEXP (x, 1))
  715. && ! CONSTANT_ADDRESS_P (XEXP (x, 1)))
  716. {
  717. addend = INTVAL (XEXP (x, 1));
  718. x = XEXP (x, 0);
  719. goto split_addend;
  720. }
  721. /* If the address is (const (plus FOO const_int)), find the low-order
  722. part of the CONST_INT. Then load FOO plus any high-order part of the
  723. CONST_INT into a register. Our address is (plus reg low-part-const).
  724. This is done to reduce the number of GOT entries. */
  725. if (can_create_pseudo_p ()
  726. && GET_CODE (x) == CONST
  727. && GET_CODE (XEXP (x, 0)) == PLUS
  728. && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
  729. {
  730. addend = INTVAL (XEXP (XEXP (x, 0), 1));
  731. x = force_reg (Pmode, XEXP (XEXP (x, 0), 0));
  732. goto split_addend;
  733. }
  734. /* If we have a (plus reg const), emit the load as in (2), then add
  735. the two registers, and finally generate (plus reg low-part-const) as
  736. our address. */
  737. if (can_create_pseudo_p ()
  738. && GET_CODE (x) == PLUS
  739. && REG_P (XEXP (x, 0))
  740. && GET_CODE (XEXP (x, 1)) == CONST
  741. && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS
  742. && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1)))
  743. {
  744. addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1));
  745. x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0),
  746. XEXP (XEXP (XEXP (x, 1), 0), 0),
  747. NULL_RTX, 1, OPTAB_LIB_WIDEN);
  748. goto split_addend;
  749. }
  750. /* If this is a local symbol, split the address into HIGH/LO_SUM parts.
  751. Avoid modes larger than word mode since i.e. $LC0+8($1) can fold
  752. around +/- 32k offset. */
  753. if (TARGET_EXPLICIT_RELOCS
  754. && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
  755. && symbolic_operand (x, Pmode))
  756. {
  757. rtx r0, r16, eqv, tga, tp, insn, dest, seq;
  758. switch (tls_symbolic_operand_type (x))
  759. {
  760. case TLS_MODEL_NONE:
  761. break;
  762. case TLS_MODEL_GLOBAL_DYNAMIC:
  763. start_sequence ();
  764. r0 = gen_rtx_REG (Pmode, 0);
  765. r16 = gen_rtx_REG (Pmode, 16);
  766. tga = get_tls_get_addr ();
  767. dest = gen_reg_rtx (Pmode);
  768. seq = GEN_INT (alpha_next_sequence_number++);
  769. emit_insn (gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq));
  770. insn = gen_call_value_osf_tlsgd (r0, tga, seq);
  771. insn = emit_call_insn (insn);
  772. RTL_CONST_CALL_P (insn) = 1;
  773. use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
  774. insn = get_insns ();
  775. end_sequence ();
  776. emit_libcall_block (insn, dest, r0, x);
  777. return dest;
  778. case TLS_MODEL_LOCAL_DYNAMIC:
  779. start_sequence ();
  780. r0 = gen_rtx_REG (Pmode, 0);
  781. r16 = gen_rtx_REG (Pmode, 16);
  782. tga = get_tls_get_addr ();
  783. scratch = gen_reg_rtx (Pmode);
  784. seq = GEN_INT (alpha_next_sequence_number++);
  785. emit_insn (gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq));
  786. insn = gen_call_value_osf_tlsldm (r0, tga, seq);
  787. insn = emit_call_insn (insn);
  788. RTL_CONST_CALL_P (insn) = 1;
  789. use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16);
  790. insn = get_insns ();
  791. end_sequence ();
  792. eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
  793. UNSPEC_TLSLDM_CALL);
  794. emit_libcall_block (insn, scratch, r0, eqv);
  795. eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL);
  796. eqv = gen_rtx_CONST (Pmode, eqv);
  797. if (alpha_tls_size == 64)
  798. {
  799. dest = gen_reg_rtx (Pmode);
  800. emit_insn (gen_rtx_SET (VOIDmode, dest, eqv));
  801. emit_insn (gen_adddi3 (dest, dest, scratch));
  802. return dest;
  803. }
  804. if (alpha_tls_size == 32)
  805. {
  806. insn = gen_rtx_HIGH (Pmode, eqv);
  807. insn = gen_rtx_PLUS (Pmode, scratch, insn);
  808. scratch = gen_reg_rtx (Pmode);
  809. emit_insn (gen_rtx_SET (VOIDmode, scratch, insn));
  810. }
  811. return gen_rtx_LO_SUM (Pmode, scratch, eqv);
  812. case TLS_MODEL_INITIAL_EXEC:
  813. eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
  814. eqv = gen_rtx_CONST (Pmode, eqv);
  815. tp = gen_reg_rtx (Pmode);
  816. scratch = gen_reg_rtx (Pmode);
  817. dest = gen_reg_rtx (Pmode);
  818. emit_insn (gen_load_tp (tp));
  819. emit_insn (gen_rtx_SET (VOIDmode, scratch, eqv));
  820. emit_insn (gen_adddi3 (dest, tp, scratch));
  821. return dest;
  822. case TLS_MODEL_LOCAL_EXEC:
  823. eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL);
  824. eqv = gen_rtx_CONST (Pmode, eqv);
  825. tp = gen_reg_rtx (Pmode);
  826. emit_insn (gen_load_tp (tp));
  827. if (alpha_tls_size == 32)
  828. {
  829. insn = gen_rtx_HIGH (Pmode, eqv);
  830. insn = gen_rtx_PLUS (Pmode, tp, insn);
  831. tp = gen_reg_rtx (Pmode);
  832. emit_insn (gen_rtx_SET (VOIDmode, tp, insn));
  833. }
  834. return gen_rtx_LO_SUM (Pmode, tp, eqv);
  835. default:
  836. gcc_unreachable ();
  837. }
  838. if (local_symbolic_operand (x, Pmode))
  839. {
  840. if (small_symbolic_operand (x, Pmode))
  841. return x;
  842. else
  843. {
  844. if (can_create_pseudo_p ())
  845. scratch = gen_reg_rtx (Pmode);
  846. emit_insn (gen_rtx_SET (VOIDmode, scratch,
  847. gen_rtx_HIGH (Pmode, x)));
  848. return gen_rtx_LO_SUM (Pmode, scratch, x);
  849. }
  850. }
  851. }
  852. return NULL;
  853. split_addend:
  854. {
  855. HOST_WIDE_INT low, high;
  856. low = ((addend & 0xffff) ^ 0x8000) - 0x8000;
  857. addend -= low;
  858. high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000;
  859. addend -= high;
  860. if (addend)
  861. x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend),
  862. (!can_create_pseudo_p () ? scratch : NULL_RTX),
  863. 1, OPTAB_LIB_WIDEN);
  864. if (high)
  865. x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high),
  866. (!can_create_pseudo_p () ? scratch : NULL_RTX),
  867. 1, OPTAB_LIB_WIDEN);
  868. return plus_constant (Pmode, x, low);
  869. }
  870. }
  871. /* Try machine-dependent ways of modifying an illegitimate address
  872. to be legitimate. Return X or the new, valid address. */
  873. static rtx
  874. alpha_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
  875. enum machine_mode mode)
  876. {
  877. rtx new_x = alpha_legitimize_address_1 (x, NULL_RTX, mode);
  878. return new_x ? new_x : x;
  879. }
  880. /* Primarily this is required for TLS symbols, but given that our move
  881. patterns *ought* to be able to handle any symbol at any time, we
  882. should never be spilling symbolic operands to the constant pool, ever. */
  883. static bool
  884. alpha_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
  885. {
  886. enum rtx_code code = GET_CODE (x);
  887. return code == SYMBOL_REF || code == LABEL_REF || code == CONST;
  888. }
  889. /* We do not allow indirect calls to be optimized into sibling calls, nor
  890. can we allow a call to a function with a different GP to be optimized
  891. into a sibcall. */
  892. static bool
  893. alpha_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
  894. {
  895. /* Can't do indirect tail calls, since we don't know if the target
  896. uses the same GP. */
  897. if (!decl)
  898. return false;
  899. /* Otherwise, we can make a tail call if the target function shares
  900. the same GP. */
  901. return decl_has_samegp (decl);
  902. }
  903. int
  904. some_small_symbolic_operand_int (rtx *px, void *data ATTRIBUTE_UNUSED)
  905. {
  906. rtx x = *px;
  907. /* Don't re-split. */
  908. if (GET_CODE (x) == LO_SUM)
  909. return -1;
  910. return small_symbolic_operand (x, Pmode) != 0;
  911. }
  912. static int
  913. split_small_symbolic_operand_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
  914. {
  915. rtx x = *px;
  916. /* Don't re-split. */
  917. if (GET_CODE (x) == LO_SUM)
  918. return -1;
  919. if (small_symbolic_operand (x, Pmode))
  920. {
  921. x = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x);
  922. *px = x;
  923. return -1;
  924. }
  925. return 0;
  926. }
  927. rtx
  928. split_small_symbolic_operand (rtx x)
  929. {
  930. x = copy_insn (x);
  931. for_each_rtx (&x, split_small_symbolic_operand_1, NULL);
  932. return x;
  933. }
  934. /* Indicate that INSN cannot be duplicated. This is true for any insn
  935. that we've marked with gpdisp relocs, since those have to stay in
  936. 1-1 correspondence with one another.
  937. Technically we could copy them if we could set up a mapping from one
  938. sequence number to another, across the set of insns to be duplicated.
  939. This seems overly complicated and error-prone since interblock motion
  940. from sched-ebb could move one of the pair of insns to a different block.
  941. Also cannot allow jsr insns to be duplicated. If they throw exceptions,
  942. then they'll be in a different block from their ldgp. Which could lead
  943. the bb reorder code to think that it would be ok to copy just the block
  944. containing the call and branch to the block containing the ldgp. */
  945. static bool
  946. alpha_cannot_copy_insn_p (rtx insn)
  947. {
  948. if (!reload_completed || !TARGET_EXPLICIT_RELOCS)
  949. return false;
  950. if (recog_memoized (insn) >= 0)
  951. return get_attr_cannot_copy (insn);
  952. else
  953. return false;
  954. }
  955. /* Try a machine-dependent way of reloading an illegitimate address
  956. operand. If we find one, push the reload and return the new rtx. */
  957. rtx
  958. alpha_legitimize_reload_address (rtx x,
  959. enum machine_mode mode ATTRIBUTE_UNUSED,
  960. int opnum, int type,
  961. int ind_levels ATTRIBUTE_UNUSED)
  962. {
  963. /* We must recognize output that we have already generated ourselves. */
  964. if (GET_CODE (x) == PLUS
  965. && GET_CODE (XEXP (x, 0)) == PLUS
  966. && REG_P (XEXP (XEXP (x, 0), 0))
  967. && CONST_INT_P (XEXP (XEXP (x, 0), 1))
  968. && CONST_INT_P (XEXP (x, 1)))
  969. {
  970. push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
  971. BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
  972. opnum, (enum reload_type) type);
  973. return x;
  974. }
  975. /* We wish to handle large displacements off a base register by
  976. splitting the addend across an ldah and the mem insn. This
  977. cuts number of extra insns needed from 3 to 1. */
  978. if (GET_CODE (x) == PLUS
  979. && REG_P (XEXP (x, 0))
  980. && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
  981. && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0)))
  982. && GET_CODE (XEXP (x, 1)) == CONST_INT)
  983. {
  984. HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
  985. HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
  986. HOST_WIDE_INT high
  987. = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
  988. /* Check for 32-bit overflow. */
  989. if (high + low != val)
  990. return NULL_RTX;
  991. /* Reload the high part into a base reg; leave the low part
  992. in the mem directly. */
  993. x = gen_rtx_PLUS (GET_MODE (x),
  994. gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
  995. GEN_INT (high)),
  996. GEN_INT (low));
  997. push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
  998. BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
  999. opnum, (enum reload_type) type);
  1000. return x;
  1001. }
  1002. return NULL_RTX;
  1003. }
  1004. /* Compute a (partial) cost for rtx X. Return true if the complete
  1005. cost has been computed, and false if subexpressions should be
  1006. scanned. In either case, *TOTAL contains the cost result. */
  1007. static bool
  1008. alpha_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
  1009. bool speed)
  1010. {
  1011. enum machine_mode mode = GET_MODE (x);
  1012. bool float_mode_p = FLOAT_MODE_P (mode);
  1013. const struct alpha_rtx_cost_data *cost_data;
  1014. if (!speed)
  1015. cost_data = &alpha_rtx_cost_size;
  1016. else
  1017. cost_data = &alpha_rtx_cost_data[alpha_tune];
  1018. switch (code)
  1019. {
  1020. case CONST_INT:
  1021. /* If this is an 8-bit constant, return zero since it can be used
  1022. nearly anywhere with no cost. If it is a valid operand for an
  1023. ADD or AND, likewise return 0 if we know it will be used in that
  1024. context. Otherwise, return 2 since it might be used there later.
  1025. All other constants take at least two insns. */
  1026. if (INTVAL (x) >= 0 && INTVAL (x) < 256)
  1027. {
  1028. *total = 0;
  1029. return true;
  1030. }
  1031. /* FALLTHRU */
  1032. case CONST_DOUBLE:
  1033. if (x == CONST0_RTX (mode))
  1034. *total = 0;
  1035. else if ((outer_code == PLUS && add_operand (x, VOIDmode))
  1036. || (outer_code == AND && and_operand (x, VOIDmode)))
  1037. *total = 0;
  1038. else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode))
  1039. *total = 2;
  1040. else
  1041. *total = COSTS_N_INSNS (2);
  1042. return true;
  1043. case CONST:
  1044. case SYMBOL_REF:
  1045. case LABEL_REF:
  1046. if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode))
  1047. *total = COSTS_N_INSNS (outer_code != MEM);
  1048. else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode))
  1049. *total = COSTS_N_INSNS (1 + (outer_code != MEM));
  1050. else if (tls_symbolic_operand_type (x))
  1051. /* Estimate of cost for call_pal rduniq. */
  1052. /* ??? How many insns do we emit here? More than one... */
  1053. *total = COSTS_N_INSNS (15);
  1054. else
  1055. /* Otherwise we do a load from the GOT. */
  1056. *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
  1057. return true;
  1058. case HIGH:
  1059. /* This is effectively an add_operand. */
  1060. *total = 2;
  1061. return true;
  1062. case PLUS:
  1063. case MINUS:
  1064. if (float_mode_p)
  1065. *total = cost_data->fp_add;
  1066. else if (GET_CODE (XEXP (x, 0)) == MULT
  1067. && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode))
  1068. {
  1069. *total = (rtx_cost (XEXP (XEXP (x, 0), 0),
  1070. (enum rtx_code) outer_code, opno, speed)
  1071. + rtx_cost (XEXP (x, 1),
  1072. (enum rtx_code) outer_code, opno, speed)
  1073. + COSTS_N_INSNS (1));
  1074. return true;
  1075. }
  1076. return false;
  1077. case MULT:
  1078. if (float_mode_p)
  1079. *total = cost_data->fp_mult;
  1080. else if (mode == DImode)
  1081. *total = cost_data->int_mult_di;
  1082. else
  1083. *total = cost_data->int_mult_si;
  1084. return false;
  1085. case ASHIFT:
  1086. if (CONST_INT_P (XEXP (x, 1))
  1087. && INTVAL (XEXP (x, 1)) <= 3)
  1088. {
  1089. *total = COSTS_N_INSNS (1);
  1090. return false;
  1091. }
  1092. /* FALLTHRU */
  1093. case ASHIFTRT:
  1094. case LSHIFTRT:
  1095. *total = cost_data->int_shift;
  1096. return false;
  1097. case IF_THEN_ELSE:
  1098. if (float_mode_p)
  1099. *total = cost_data->fp_add;
  1100. else
  1101. *total = cost_data->int_cmov;
  1102. return false;
  1103. case DIV:
  1104. case UDIV:
  1105. case MOD:
  1106. case UMOD:
  1107. if (!float_mode_p)
  1108. *total = cost_data->int_div;
  1109. else if (mode == SFmode)
  1110. *total = cost_data->fp_div_sf;
  1111. else
  1112. *total = cost_data->fp_div_df;
  1113. return false;
  1114. case MEM:
  1115. *total = COSTS_N_INSNS (!speed ? 1 : alpha_memory_latency);
  1116. return true;
  1117. case NEG:
  1118. if (! float_mode_p)
  1119. {
  1120. *total = COSTS_N_INSNS (1);
  1121. return false;
  1122. }
  1123. /* FALLTHRU */
  1124. case ABS:
  1125. if (! float_mode_p)
  1126. {
  1127. *total = COSTS_N_INSNS (1) + cost_data->int_cmov;
  1128. return false;
  1129. }
  1130. /* FALLTHRU */
  1131. case FLOAT:
  1132. case UNSIGNED_FLOAT:
  1133. case FIX:
  1134. case UNSIGNED_FIX:
  1135. case FLOAT_TRUNCATE:
  1136. *total = cost_data->fp_add;
  1137. return false;
  1138. case FLOAT_EXTEND:
  1139. if (MEM_P (XEXP (x, 0)))
  1140. *total = 0;
  1141. else
  1142. *total = cost_data->fp_add;
  1143. return false;
  1144. default:
  1145. return false;
  1146. }
  1147. }
  1148. /* REF is an alignable memory location. Place an aligned SImode
  1149. reference into *PALIGNED_MEM and the number of bits to shift into
  1150. *PBITNUM. SCRATCH is a free register for use in reloading out
  1151. of range stack slots. */
  1152. void
  1153. get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum)
  1154. {
  1155. rtx base;
  1156. HOST_WIDE_INT disp, offset;
  1157. gcc_assert (MEM_P (ref));
  1158. if (reload_in_progress
  1159. && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
  1160. {
  1161. base = find_replacement (&XEXP (ref, 0));
  1162. gcc_assert (memory_address_p (GET_MODE (ref), base));
  1163. }
  1164. else
  1165. base = XEXP (ref, 0);
  1166. if (GET_CODE (base) == PLUS)
  1167. disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
  1168. else
  1169. disp = 0;
  1170. /* Find the byte offset within an aligned word. If the memory itself is
  1171. claimed to be aligned, believe it. Otherwise, aligned_memory_operand
  1172. will have examined the base register and determined it is aligned, and
  1173. thus displacements from it are naturally alignable. */
  1174. if (MEM_ALIGN (ref) >= 32)
  1175. offset = 0;
  1176. else
  1177. offset = disp & 3;
  1178. /* The location should not cross aligned word boundary. */
  1179. gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref))
  1180. <= GET_MODE_SIZE (SImode));
  1181. /* Access the entire aligned word. */
  1182. *paligned_mem = widen_memory_access (ref, SImode, -offset);
  1183. /* Convert the byte offset within the word to a bit offset. */
  1184. offset *= BITS_PER_UNIT;
  1185. *pbitnum = GEN_INT (offset);
  1186. }
  1187. /* Similar, but just get the address. Handle the two reload cases.
  1188. Add EXTRA_OFFSET to the address we return. */
  1189. rtx
  1190. get_unaligned_address (rtx ref)
  1191. {
  1192. rtx base;
  1193. HOST_WIDE_INT offset = 0;
  1194. gcc_assert (MEM_P (ref));
  1195. if (reload_in_progress
  1196. && ! memory_address_p (GET_MODE (ref), XEXP (ref, 0)))
  1197. {
  1198. base = find_replacement (&XEXP (ref, 0));
  1199. gcc_assert (memory_address_p (GET_MODE (ref), base));
  1200. }
  1201. else
  1202. base = XEXP (ref, 0);
  1203. if (GET_CODE (base) == PLUS)
  1204. offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
  1205. return plus_constant (Pmode, base, offset);
  1206. }
  1207. /* Compute a value X, such that X & 7 == (ADDR + OFS) & 7.
  1208. X is always returned in a register. */
  1209. rtx
  1210. get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs)
  1211. {
  1212. if (GET_CODE (addr) == PLUS)
  1213. {
  1214. ofs += INTVAL (XEXP (addr, 1));
  1215. addr = XEXP (addr, 0);
  1216. }
  1217. return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7),
  1218. NULL_RTX, 1, OPTAB_LIB_WIDEN);
  1219. }
  1220. /* On the Alpha, all (non-symbolic) constants except zero go into
  1221. a floating-point register via memory. Note that we cannot
  1222. return anything that is not a subset of RCLASS, and that some
  1223. symbolic constants cannot be dropped to memory. */
  1224. enum reg_class
  1225. alpha_preferred_reload_class(rtx x, enum reg_class rclass)
  1226. {
  1227. /* Zero is present in any register class. */
  1228. if (x == CONST0_RTX (GET_MODE (x)))
  1229. return rclass;
  1230. /* These sorts of constants we can easily drop to memory. */
  1231. if (CONST_INT_P (x)
  1232. || GET_CODE (x) == CONST_DOUBLE
  1233. || GET_CODE (x) == CONST_VECTOR)
  1234. {
  1235. if (rclass == FLOAT_REGS)
  1236. return NO_REGS;
  1237. if (rclass == ALL_REGS)
  1238. return GENERAL_REGS;
  1239. return rclass;
  1240. }
  1241. /* All other kinds of constants should not (and in the case of HIGH
  1242. cannot) be dropped to memory -- instead we use a GENERAL_REGS
  1243. secondary reload. */
  1244. if (CONSTANT_P (x))
  1245. return (rclass == ALL_REGS ? GENERAL_REGS : rclass);
  1246. return rclass;
  1247. }
  1248. /* Inform reload about cases where moving X with a mode MODE to a register in
  1249. RCLASS requires an extra scratch or immediate register. Return the class
  1250. needed for the immediate register. */
  1251. static reg_class_t
  1252. alpha_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
  1253. enum machine_mode mode, secondary_reload_info *sri)
  1254. {
  1255. enum reg_class rclass = (enum reg_class) rclass_i;
  1256. /* Loading and storing HImode or QImode values to and from memory
  1257. usually requires a scratch register. */
  1258. if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
  1259. {
  1260. if (any_memory_operand (x, mode))
  1261. {
  1262. if (in_p)
  1263. {
  1264. if (!aligned_memory_operand (x, mode))
  1265. sri->icode = direct_optab_handler (reload_in_optab, mode);
  1266. }
  1267. else
  1268. sri->icode = direct_optab_handler (reload_out_optab, mode);
  1269. return NO_REGS;
  1270. }
  1271. }
  1272. /* We also cannot do integral arithmetic into FP regs, as might result
  1273. from register elimination into a DImode fp register. */
  1274. if (rclass == FLOAT_REGS)
  1275. {
  1276. if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
  1277. return GENERAL_REGS;
  1278. if (in_p && INTEGRAL_MODE_P (mode)
  1279. && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
  1280. return GENERAL_REGS;
  1281. }
  1282. return NO_REGS;
  1283. }
  1284. /* Subfunction of the following function. Update the flags of any MEM
  1285. found in part of X. */
  1286. static int
  1287. alpha_set_memflags_1 (rtx *xp, void *data)
  1288. {
  1289. rtx x = *xp, orig = (rtx) data;
  1290. if (!MEM_P (x))
  1291. return 0;
  1292. MEM_VOLATILE_P (x) = MEM_VOLATILE_P (orig);
  1293. MEM_NOTRAP_P (x) = MEM_NOTRAP_P (orig);
  1294. MEM_READONLY_P (x) = MEM_READONLY_P (orig);
  1295. /* Sadly, we cannot use alias sets because the extra aliasing
  1296. produced by the AND interferes. Given that two-byte quantities
  1297. are the only thing we would be able to differentiate anyway,
  1298. there does not seem to be any point in convoluting the early
  1299. out of the alias check. */
  1300. return -1;
  1301. }
  1302. /* Given SEQ, which is an INSN list, look for any MEMs in either
  1303. a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and
  1304. volatile flags from REF into each of the MEMs found. If REF is not
  1305. a MEM, don't do anything. */
  1306. void
  1307. alpha_set_memflags (rtx seq, rtx ref)
  1308. {
  1309. rtx insn;
  1310. if (!MEM_P (ref))
  1311. return;
  1312. /* This is only called from alpha.md, after having had something
  1313. generated from one of the insn patterns. So if everything is
  1314. zero, the pattern is already up-to-date. */
  1315. if (!MEM_VOLATILE_P (ref)
  1316. && !MEM_NOTRAP_P (ref)
  1317. && !MEM_READONLY_P (ref))
  1318. return;
  1319. for (insn = seq; insn; insn = NEXT_INSN (insn))
  1320. if (INSN_P (insn))
  1321. for_each_rtx (&PATTERN (insn), alpha_set_memflags_1, (void *) ref);
  1322. else
  1323. gcc_unreachable ();
  1324. }
  1325. static rtx alpha_emit_set_const (rtx, enum machine_mode, HOST_WIDE_INT,
  1326. int, bool);
  1327. /* Internal routine for alpha_emit_set_const to check for N or below insns.
  1328. If NO_OUTPUT is true, then we only check to see if N insns are possible,
  1329. and return pc_rtx if successful. */
  1330. static rtx
  1331. alpha_emit_set_const_1 (rtx target, enum machine_mode mode,
  1332. HOST_WIDE_INT c, int n, bool no_output)
  1333. {
  1334. HOST_WIDE_INT new_const;
  1335. int i, bits;
  1336. /* Use a pseudo if highly optimizing and still generating RTL. */
  1337. rtx subtarget
  1338. = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target);
  1339. rtx temp, insn;
  1340. /* If this is a sign-extended 32-bit constant, we can do this in at most
  1341. three insns, so do it if we have enough insns left. We always have
  1342. a sign-extended 32-bit constant when compiling on a narrow machine. */
  1343. if (HOST_BITS_PER_WIDE_INT != 64
  1344. || c >> 31 == -1 || c >> 31 == 0)
  1345. {
  1346. HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000;
  1347. HOST_WIDE_INT tmp1 = c - low;
  1348. HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000;
  1349. HOST_WIDE_INT extra = 0;
  1350. /* If HIGH will be interpreted as negative but the constant is
  1351. positive, we must adjust it to do two ldha insns. */
  1352. if ((high & 0x8000) != 0 && c >= 0)
  1353. {
  1354. extra = 0x4000;
  1355. tmp1 -= 0x40000000;
  1356. high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
  1357. }
  1358. if (c == low || (low == 0 && extra == 0))
  1359. {
  1360. /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
  1361. but that meant that we can't handle INT_MIN on 32-bit machines
  1362. (like NT/Alpha), because we recurse indefinitely through
  1363. emit_move_insn to gen_movdi. So instead, since we know exactly
  1364. what we want, create it explicitly. */
  1365. if (no_output)
  1366. return pc_rtx;
  1367. if (target == NULL)
  1368. target = gen_reg_rtx (mode);
  1369. emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
  1370. return target;
  1371. }
  1372. else if (n >= 2 + (extra != 0))
  1373. {
  1374. if (no_output)
  1375. return pc_rtx;
  1376. if (!can_create_pseudo_p ())
  1377. {
  1378. emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (high << 16)));
  1379. temp = target;
  1380. }
  1381. else
  1382. temp = copy_to_suggested_reg (GEN_INT (high << 16),
  1383. subtarget, mode);
  1384. /* As of 2002-02-23, addsi3 is only available when not optimizing.
  1385. This means that if we go through expand_binop, we'll try to
  1386. generate extensions, etc, which will require new pseudos, which
  1387. will fail during some split phases. The SImode add patterns
  1388. still exist, but are not named. So build the insns by hand. */
  1389. if (extra != 0)
  1390. {
  1391. if (! subtarget)
  1392. subtarget = gen_reg_rtx (mode);
  1393. insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16));
  1394. insn = gen_rtx_SET (VOIDmode, subtarget, insn);
  1395. emit_insn (insn);
  1396. temp = subtarget;
  1397. }
  1398. if (target == NULL)
  1399. target = gen_reg_rtx (mode);
  1400. insn = gen_rtx_PLUS (mode, temp, GEN_INT (low));
  1401. insn = gen_rtx_SET (VOIDmode, target, insn);
  1402. emit_insn (insn);
  1403. return target;
  1404. }
  1405. }
  1406. /* If we couldn't do it that way, try some other methods. But if we have
  1407. no instructions left, don't bother. Likewise, if this is SImode and
  1408. we can't make pseudos, we can't do anything since the expand_binop
  1409. and expand_unop calls will widen and try to make pseudos. */
  1410. if (n == 1 || (mode == SImode && !can_create_pseudo_p ()))
  1411. return 0;
  1412. /* Next, see if we can load a related constant and then shift and possibly
  1413. negate it to get the constant we want. Try this once each increasing
  1414. numbers of insns. */
  1415. for (i = 1; i < n; i++)
  1416. {
  1417. /* First, see if minus some low bits, we've an easy load of
  1418. high bits. */
  1419. new_const = ((c & 0xffff) ^ 0x8000) - 0x8000;
  1420. if (new_const != 0)
  1421. {
  1422. temp = alpha_emit_set_const (subtarget, mode, c - new_const, i, no_output);
  1423. if (temp)
  1424. {
  1425. if (no_output)
  1426. return temp;
  1427. return expand_binop (mode, add_optab, temp, GEN_INT (new_const),
  1428. target, 0, OPTAB_WIDEN);
  1429. }
  1430. }
  1431. /* Next try complementing. */
  1432. temp = alpha_emit_set_const (subtarget, mode, ~c, i, no_output);
  1433. if (temp)
  1434. {
  1435. if (no_output)
  1436. return temp;
  1437. return expand_unop (mode, one_cmpl_optab, temp, target, 0);
  1438. }
  1439. /* Next try to form a constant and do a left shift. We can do this
  1440. if some low-order bits are zero; the exact_log2 call below tells
  1441. us that information. The bits we are shifting out could be any
  1442. value, but here we'll just try the 0- and sign-extended forms of
  1443. the constant. To try to increase the chance of having the same
  1444. constant in more than one insn, start at the highest number of
  1445. bits to shift, but try all possibilities in case a ZAPNOT will
  1446. be useful. */
  1447. bits = exact_log2 (c & -c);
  1448. if (bits > 0)
  1449. for (; bits > 0; bits--)
  1450. {
  1451. new_const = c >> bits;
  1452. temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
  1453. if (!temp && c < 0)
  1454. {
  1455. new_const = (unsigned HOST_WIDE_INT)c >> bits;
  1456. temp = alpha_emit_set_const (subtarget, mode, new_const,
  1457. i, no_output);
  1458. }
  1459. if (temp)
  1460. {
  1461. if (no_output)
  1462. return temp;
  1463. return expand_binop (mode, ashl_optab, temp, GEN_INT (bits),
  1464. target, 0, OPTAB_WIDEN);
  1465. }
  1466. }
  1467. /* Now try high-order zero bits. Here we try the shifted-in bits as
  1468. all zero and all ones. Be careful to avoid shifting outside the
  1469. mode and to avoid shifting outside the host wide int size. */
  1470. /* On narrow hosts, don't shift a 1 into the high bit, since we'll
  1471. confuse the recursive call and set all of the high 32 bits. */
  1472. bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
  1473. - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64));
  1474. if (bits > 0)
  1475. for (; bits > 0; bits--)
  1476. {
  1477. new_const = c << bits;
  1478. temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
  1479. if (!temp)
  1480. {
  1481. new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
  1482. temp = alpha_emit_set_const (subtarget, mode, new_const,
  1483. i, no_output);
  1484. }
  1485. if (temp)
  1486. {
  1487. if (no_output)
  1488. return temp;
  1489. return expand_binop (mode, lshr_optab, temp, GEN_INT (bits),
  1490. target, 1, OPTAB_WIDEN);
  1491. }
  1492. }
  1493. /* Now try high-order 1 bits. We get that with a sign-extension.
  1494. But one bit isn't enough here. Be careful to avoid shifting outside
  1495. the mode and to avoid shifting outside the host wide int size. */
  1496. bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
  1497. - floor_log2 (~ c) - 2);
  1498. if (bits > 0)
  1499. for (; bits > 0; bits--)
  1500. {
  1501. new_const = c << bits;
  1502. temp = alpha_emit_set_const (subtarget, mode, new_const, i, no_output);
  1503. if (!temp)
  1504. {
  1505. new_const = (c << bits) | (((HOST_WIDE_INT) 1 << bits) - 1);
  1506. temp = alpha_emit_set_const (subtarget, mode, new_const,
  1507. i, no_output);
  1508. }
  1509. if (temp)
  1510. {
  1511. if (no_output)
  1512. return temp;
  1513. return expand_binop (mode, ashr_optab, temp, GEN_INT (bits),
  1514. target, 0, OPTAB_WIDEN);
  1515. }
  1516. }
  1517. }
  1518. #if HOST_BITS_PER_WIDE_INT == 64
  1519. /* Finally, see if can load a value into the target that is the same as the
  1520. constant except that all bytes that are 0 are changed to be 0xff. If we
  1521. can, then we can do a ZAPNOT to obtain the desired constant. */
  1522. new_const = c;
  1523. for (i = 0; i < 64; i += 8)
  1524. if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0)
  1525. new_const |= (HOST_WIDE_INT) 0xff << i;
  1526. /* We are only called for SImode and DImode. If this is SImode, ensure that
  1527. we are sign extended to a full word. */
  1528. if (mode == SImode)
  1529. new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000;
  1530. if (new_const != c)
  1531. {
  1532. temp = alpha_emit_set_const (subtarget, mode, new_const, n - 1, no_output);
  1533. if (temp)
  1534. {
  1535. if (no_output)
  1536. return temp;
  1537. return expand_binop (mode, and_optab, temp, GEN_INT (c | ~ new_const),
  1538. target, 0, OPTAB_WIDEN);
  1539. }
  1540. }
  1541. #endif
  1542. return 0;
  1543. }
  1544. /* Try to output insns to set TARGET equal to the constant C if it can be
  1545. done in less than N insns. Do all computations in MODE. Returns the place
  1546. where the output has been placed if it can be done and the insns have been
  1547. emitted. If it would take more than N insns, zero is returned and no
  1548. insns and emitted. */
  1549. static rtx
  1550. alpha_emit_set_const (rtx target, enum machine_mode mode,
  1551. HOST_WIDE_INT c, int n, bool no_output)
  1552. {
  1553. enum machine_mode orig_mode = mode;
  1554. rtx orig_target = target;
  1555. rtx result = 0;
  1556. int i;
  1557. /* If we can't make any pseudos, TARGET is an SImode hard register, we
  1558. can't load this constant in one insn, do this in DImode. */
  1559. if (!can_create_pseudo_p () && mode == SImode
  1560. && REG_P (target) && REGNO (target) < FIRST_PSEUDO_REGISTER)
  1561. {
  1562. result = alpha_emit_set_const_1 (target, mode, c, 1, no_output);
  1563. if (result)
  1564. return result;
  1565. target = no_output ? NULL : gen_lowpart (DImode, target);
  1566. mode = DImode;
  1567. }
  1568. else if (mode == V8QImode || mode == V4HImode || mode == V2SImode)
  1569. {
  1570. target = no_output ? NULL : gen_lowpart (DImode, target);
  1571. mode = DImode;
  1572. }
  1573. /* Try 1 insn, then 2, then up to N. */
  1574. for (i = 1; i <= n; i++)
  1575. {
  1576. result = alpha_emit_set_const_1 (target, mode, c, i, no_output);
  1577. if (result)
  1578. {
  1579. rtx insn, set;
  1580. if (no_output)
  1581. return result;
  1582. insn = get_last_insn ();
  1583. set = single_set (insn);
  1584. if (! CONSTANT_P (SET_SRC (set)))
  1585. set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c));
  1586. break;
  1587. }
  1588. }
  1589. /* Allow for the case where we changed the mode of TARGET. */
  1590. if (result)
  1591. {
  1592. if (result == target)
  1593. result = orig_target;
  1594. else if (mode != orig_mode)
  1595. result = gen_lowpart (orig_mode, result);
  1596. }
  1597. return result;
  1598. }
  1599. /* Having failed to find a 3 insn sequence in alpha_emit_set_const,
  1600. fall back to a straight forward decomposition. We do this to avoid
  1601. exponential run times encountered when looking for longer sequences
  1602. with alpha_emit_set_const. */
  1603. static rtx
  1604. alpha_emit_set_long_const (rtx target, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
  1605. {
  1606. HOST_WIDE_INT d1, d2, d3, d4;
  1607. /* Decompose the entire word */
  1608. #if HOST_BITS_PER_WIDE_INT >= 64
  1609. gcc_assert (c2 == -(c1 < 0));
  1610. d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
  1611. c1 -= d1;
  1612. d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
  1613. c1 = (c1 - d2) >> 32;
  1614. d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
  1615. c1 -= d3;
  1616. d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
  1617. gcc_assert (c1 == d4);
  1618. #else
  1619. d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000;
  1620. c1 -= d1;
  1621. d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000;
  1622. gcc_assert (c1 == d2);
  1623. c2 += (d2 < 0);
  1624. d3 = ((c2 & 0xffff) ^ 0x8000) - 0x8000;
  1625. c2 -= d3;
  1626. d4 = ((c2 & 0xffffffff) ^ 0x80000000) - 0x80000000;
  1627. gcc_assert (c2 == d4);
  1628. #endif
  1629. /* Construct the high word */
  1630. if (d4)
  1631. {
  1632. emit_move_insn (target, GEN_INT (d4));
  1633. if (d3)
  1634. emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3)));
  1635. }
  1636. else
  1637. emit_move_insn (target, GEN_INT (d3));
  1638. /* Shift it into place */
  1639. emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32)));
  1640. /* Add in the low bits. */
  1641. if (d2)
  1642. emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2)));
  1643. if (d1)
  1644. emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1)));
  1645. return target;
  1646. }
  1647. /* Given an integral CONST_INT, CONST_DOUBLE, or CONST_VECTOR, return
  1648. the low 64 bits. */
  1649. static void
  1650. alpha_extract_integer (rtx x, HOST_WIDE_INT *p0, HOST_WIDE_INT *p1)
  1651. {
  1652. HOST_WIDE_INT i0, i1;
  1653. if (GET_CODE (x) == CONST_VECTOR)
  1654. x = simplify_subreg (DImode, x, GET_MODE (x), 0);
  1655. if (CONST_INT_P (x))
  1656. {
  1657. i0 = INTVAL (x);
  1658. i1 = -(i0 < 0);
  1659. }
  1660. else if (HOST_BITS_PER_WIDE_INT >= 64)
  1661. {
  1662. i0 = CONST_DOUBLE_LOW (x);
  1663. i1 = -(i0 < 0);
  1664. }
  1665. else
  1666. {
  1667. i0 = CONST_DOUBLE_LOW (x);
  1668. i1 = CONST_DOUBLE_HIGH (x);
  1669. }
  1670. *p0 = i0;
  1671. *p1 = i1;
  1672. }
  1673. /* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which
  1674. we are willing to load the value into a register via a move pattern.
  1675. Normally this is all symbolic constants, integral constants that
  1676. take three or fewer instructions, and floating-point zero. */
  1677. bool
  1678. alpha_legitimate_constant_p (enum machine_mode mode, rtx x)
  1679. {
  1680. HOST_WIDE_INT i0, i1;
  1681. switch (GET_CODE (x))
  1682. {
  1683. case LABEL_REF:
  1684. case HIGH:
  1685. return true;
  1686. case CONST:
  1687. if (GET_CODE (XEXP (x, 0)) == PLUS
  1688. && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
  1689. x = XEXP (XEXP (x, 0), 0);
  1690. else
  1691. return true;
  1692. if (GET_CODE (x) != SYMBOL_REF)
  1693. return true;
  1694. /* FALLTHRU */
  1695. case SYMBOL_REF:
  1696. /* TLS symbols are never valid. */
  1697. return SYMBOL_REF_TLS_MODEL (x) == 0;
  1698. case CONST_DOUBLE:
  1699. if (x == CONST0_RTX (mode))
  1700. return true;
  1701. if (FLOAT_MODE_P (mode))
  1702. return false;
  1703. goto do_integer;
  1704. case CONST_VECTOR:
  1705. if (x == CONST0_RTX (mode))
  1706. return true;
  1707. if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
  1708. return false;
  1709. if (GET_MODE_SIZE (mode) != 8)
  1710. return false;
  1711. goto do_integer;
  1712. case CONST_INT:
  1713. do_integer:
  1714. if (TARGET_BUILD_CONSTANTS)
  1715. return true;
  1716. alpha_extract_integer (x, &i0, &i1);
  1717. if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == (-i0 < 0))
  1718. return alpha_emit_set_const_1 (x, mode, i0, 3, true) != NULL;
  1719. return false;
  1720. default:
  1721. return false;
  1722. }
  1723. }
  1724. /* Operand 1 is known to be a constant, and should require more than one
  1725. instruction to load. Emit that multi-part load. */
  1726. bool
  1727. alpha_split_const_mov (enum machine_mode mode, rtx *operands)
  1728. {
  1729. HOST_WIDE_INT i0, i1;
  1730. rtx temp = NULL_RTX;
  1731. alpha_extract_integer (operands[1], &i0, &i1);
  1732. if (HOST_BITS_PER_WIDE_INT >= 64 || i1 == -(i0 < 0))
  1733. temp = alpha_emit_set_const (operands[0], mode, i0, 3, false);
  1734. if (!temp && TARGET_BUILD_CONSTANTS)
  1735. temp = alpha_emit_set_long_const (operands[0], i0, i1);
  1736. if (temp)
  1737. {
  1738. if (!rtx_equal_p (operands[0], temp))
  1739. emit_move_insn (operands[0], temp);
  1740. return true;
  1741. }
  1742. return false;
  1743. }
  1744. /* Expand a move instruction; return true if all work is done.
  1745. We don't handle non-bwx subword loads here. */
  1746. bool
  1747. alpha_expand_mov (enum machine_mode mode, rtx *operands)
  1748. {
  1749. rtx tmp;
  1750. /* If the output is not a register, the input must be. */
  1751. if (MEM_P (operands[0])
  1752. && ! reg_or_0_operand (operands[1], mode))
  1753. operands[1] = force_reg (mode, operands[1]);
  1754. /* Allow legitimize_address to perform some simplifications. */
  1755. if (mode == Pmode && symbolic_operand (operands[1], mode))
  1756. {
  1757. tmp = alpha_legitimize_address_1 (operands[1], operands[0], mode);
  1758. if (tmp)
  1759. {
  1760. if (tmp == operands[0])
  1761. return true;
  1762. operands[1] = tmp;
  1763. return false;
  1764. }
  1765. }
  1766. /* Early out for non-constants and valid constants. */
  1767. if (! CONSTANT_P (operands[1]) || input_operand (operands[1], mode))
  1768. return false;
  1769. /* Split large integers. */
  1770. if (CONST_INT_P (operands[1])
  1771. || GET_CODE (operands[1]) == CONST_DOUBLE
  1772. || GET_CODE (operands[1]) == CONST_VECTOR)
  1773. {
  1774. if (alpha_split_const_mov (mode, operands))
  1775. return true;
  1776. }
  1777. /* Otherwise we've nothing left but to drop the thing to memory. */
  1778. tmp = force_const_mem (mode, operands[1]);
  1779. if (tmp == NULL_RTX)
  1780. return false;
  1781. if (reload_in_progress)
  1782. {
  1783. emit_move_insn (operands[0], XEXP (tmp, 0));
  1784. operands[1] = replace_equiv_address (tmp, operands[0]);
  1785. }
  1786. else
  1787. operands[1] = validize_mem (tmp);
  1788. return false;
  1789. }
  1790. /* Expand a non-bwx QImode or HImode move instruction;
  1791. return true if all work is done. */
  1792. bool
  1793. alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
  1794. {
  1795. rtx seq;
  1796. /* If the output is not a register, the input must be. */
  1797. if (MEM_P (operands[0]))
  1798. operands[1] = force_reg (mode, operands[1]);
  1799. /* Handle four memory cases, unaligned and aligned for either the input
  1800. or the output. The only case where we can be called during reload is
  1801. for aligned loads; all other cases require temporaries. */
  1802. if (any_memory_operand (operands[1], mode))
  1803. {
  1804. if (aligned_memory_operand (operands[1], mode))
  1805. {
  1806. if (reload_in_progress)
  1807. {
  1808. if (mode == QImode)
  1809. seq = gen_reload_inqi_aligned (operands[0], operands[1]);
  1810. else
  1811. seq = gen_reload_inhi_aligned (operands[0], operands[1]);
  1812. emit_insn (seq);
  1813. }
  1814. else
  1815. {
  1816. rtx aligned_mem, bitnum;
  1817. rtx scratch = gen_reg_rtx (SImode);
  1818. rtx subtarget;
  1819. bool copyout;
  1820. get_aligned_mem (operands[1], &aligned_mem, &bitnum);
  1821. subtarget = operands[0];
  1822. if (REG_P (subtarget))
  1823. subtarget = gen_lowpart (DImode, subtarget), copyout = false;
  1824. else
  1825. subtarget = gen_reg_rtx (DImode), copyout = true;
  1826. if (mode == QImode)
  1827. seq = gen_aligned_loadqi (subtarget, aligned_mem,
  1828. bitnum, scratch);
  1829. else
  1830. seq = gen_aligned_loadhi (subtarget, aligned_mem,
  1831. bitnum, scratch);
  1832. emit_insn (seq);
  1833. if (copyout)
  1834. emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
  1835. }
  1836. }
  1837. else
  1838. {
  1839. /* Don't pass these as parameters since that makes the generated
  1840. code depend on parameter evaluation order which will cause
  1841. bootstrap failures. */
  1842. rtx temp1, temp2, subtarget, ua;
  1843. bool copyout;
  1844. temp1 = gen_reg_rtx (DImode);
  1845. temp2 = gen_reg_rtx (DImode);
  1846. subtarget = operands[0];
  1847. if (REG_P (subtarget))
  1848. subtarget = gen_lowpart (DImode, subtarget), copyout = false;
  1849. else
  1850. subtarget = gen_reg_rtx (DImode), copyout = true;
  1851. ua = get_unaligned_address (operands[1]);
  1852. if (mode == QImode)
  1853. seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
  1854. else
  1855. seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
  1856. alpha_set_memflags (seq, operands[1]);
  1857. emit_insn (seq);
  1858. if (copyout)
  1859. emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
  1860. }
  1861. return true;
  1862. }
  1863. if (any_memory_operand (operands[0], mode))
  1864. {
  1865. if (aligned_memory_operand (operands[0], mode))
  1866. {
  1867. rtx aligned_mem, bitnum;
  1868. rtx temp1 = gen_reg_rtx (SImode);
  1869. rtx temp2 = gen_reg_rtx (SImode);
  1870. get_aligned_mem (operands[0], &aligned_mem, &bitnum);
  1871. emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
  1872. temp1, temp2));
  1873. }
  1874. else
  1875. {
  1876. rtx temp1 = gen_reg_rtx (DImode);
  1877. rtx temp2 = gen_reg_rtx (DImode);
  1878. rtx temp3 = gen_reg_rtx (DImode);
  1879. rtx ua = get_unaligned_address (operands[0]);
  1880. if (mode == QImode)
  1881. seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
  1882. else
  1883. seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
  1884. alpha_set_memflags (seq, operands[0]);
  1885. emit_insn (seq);
  1886. }
  1887. return true;
  1888. }
  1889. return false;
  1890. }
  1891. /* Implement the movmisalign patterns. One of the operands is a memory
  1892. that is not naturally aligned. Emit instructions to load it. */
  1893. void
  1894. alpha_expand_movmisalign (enum machine_mode mode, rtx *operands)
  1895. {
  1896. /* Honor misaligned loads, for those we promised to do so. */
  1897. if (MEM_P (operands[1]))
  1898. {
  1899. rtx tmp;
  1900. if (register_operand (operands[0], mode))
  1901. tmp = operands[0];
  1902. else
  1903. tmp = gen_reg_rtx (mode);
  1904. alpha_expand_unaligned_load (tmp, operands[1], 8, 0, 0);
  1905. if (tmp != operands[0])
  1906. emit_move_insn (operands[0], tmp);
  1907. }
  1908. else if (MEM_P (operands[0]))
  1909. {
  1910. if (!reg_or_0_operand (operands[1], mode))
  1911. operands[1] = force_reg (mode, operands[1]);
  1912. alpha_expand_unaligned_store (operands[0], operands[1], 8, 0);
  1913. }
  1914. else
  1915. gcc_unreachable ();
  1916. }
  1917. /* Generate an unsigned DImode to FP conversion. This is the same code
  1918. optabs would emit if we didn't have TFmode patterns.
  1919. For SFmode, this is the only construction I've found that can pass
  1920. gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode
  1921. intermediates will work, because you'll get intermediate rounding
  1922. that ruins the end result. Some of this could be fixed by turning
  1923. on round-to-positive-infinity, but that requires diddling the fpsr,
  1924. which kills performance. I tried turning this around and converting
  1925. to a negative number, so that I could turn on /m, but either I did
  1926. it wrong or there's something else cause I wound up with the exact
  1927. same single-bit error. There is a branch-less form of this same code:
  1928. srl $16,1,$1
  1929. and $16,1,$2
  1930. cmplt $16,0,$3
  1931. or $1,$2,$2
  1932. cmovge $16,$16,$2
  1933. itoft $3,$f10
  1934. itoft $2,$f11
  1935. cvtqs $f11,$f11
  1936. adds $f11,$f11,$f0
  1937. fcmoveq $f10,$f11,$f0
  1938. I'm not using it because it's the same number of instructions as
  1939. this branch-full form, and it has more serialized long latency
  1940. instructions on the critical path.
  1941. For DFmode, we can avoid rounding errors by breaking up the word
  1942. into two pieces, converting them separately, and adding them back:
  1943. LC0: .long 0,0x5f800000
  1944. itoft $16,$f11
  1945. lda $2,LC0
  1946. cmplt $16,0,$1
  1947. cpyse $f11,$f31,$f10
  1948. cpyse $f31,$f11,$f11
  1949. s4addq $1,$2,$1
  1950. lds $f12,0($1)
  1951. cvtqt $f10,$f10
  1952. cvtqt $f11,$f11
  1953. addt $f12,$f10,$f0
  1954. addt $f0,$f11,$f0
  1955. This doesn't seem to be a clear-cut win over the optabs form.
  1956. It probably all depends on the distribution of numbers being
  1957. converted -- in the optabs form, all but high-bit-set has a
  1958. much lower minimum execution time. */
  1959. void
  1960. alpha_emit_floatuns (rtx operands[2])
  1961. {
  1962. rtx neglab, donelab, i0, i1, f0, in, out;
  1963. enum machine_mode mode;
  1964. out = operands[0];
  1965. in = force_reg (DImode, operands[1]);
  1966. mode = GET_MODE (out);
  1967. neglab = gen_label_rtx ();
  1968. donelab = gen_label_rtx ();
  1969. i0 = gen_reg_rtx (DImode);
  1970. i1 = gen_reg_rtx (DImode);
  1971. f0 = gen_reg_rtx (mode);
  1972. emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
  1973. emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
  1974. emit_jump_insn (gen_jump (donelab));
  1975. emit_barrier ();
  1976. emit_label (neglab);
  1977. emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
  1978. emit_insn (gen_anddi3 (i1, in, const1_rtx));
  1979. emit_insn (gen_iordi3 (i0, i0, i1));
  1980. emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
  1981. emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
  1982. emit_label (donelab);
  1983. }
  1984. /* Generate the comparison for a conditional branch. */
  1985. void
  1986. alpha_emit_conditional_branch (rtx operands[], enum machine_mode cmp_mode)
  1987. {
  1988. enum rtx_code cmp_code, branch_code;
  1989. enum machine_mode branch_mode = VOIDmode;
  1990. enum rtx_code code = GET_CODE (operands[0]);
  1991. rtx op0 = operands[1], op1 = operands[2];
  1992. rtx tem;
  1993. if (cmp_mode == TFmode)
  1994. {
  1995. op0 = alpha_emit_xfloating_compare (&code, op0, op1);
  1996. op1 = const0_rtx;
  1997. cmp_mode = DImode;
  1998. }
  1999. /* The general case: fold the comparison code to the types of compares
  2000. that we have, choosing the branch as necessary. */
  2001. switch (code)
  2002. {
  2003. case EQ: case LE: case LT: case LEU: case LTU:
  2004. case UNORDERED:
  2005. /* We have these compares. */
  2006. cmp_code = code, branch_code = NE;
  2007. break;
  2008. case NE:
  2009. case ORDERED:
  2010. /* These must be reversed. */
  2011. cmp_code = reverse_condition (code), branch_code = EQ;
  2012. break;
  2013. case GE: case GT: case GEU: case GTU:
  2014. /* For FP, we swap them, for INT, we reverse them. */
  2015. if (cmp_mode == DFmode)
  2016. {
  2017. cmp_code = swap_condition (code);
  2018. branch_code = NE;
  2019. tem = op0, op0 = op1, op1 = tem;
  2020. }
  2021. else
  2022. {
  2023. cmp_code = reverse_condition (code);
  2024. branch_code = EQ;
  2025. }
  2026. break;
  2027. default:
  2028. gcc_unreachable ();
  2029. }
  2030. if (cmp_mode == DFmode)
  2031. {
  2032. if (flag_unsafe_math_optimizations && cmp_code != UNORDERED)
  2033. {
  2034. /* When we are not as concerned about non-finite values, and we
  2035. are comparing against zero, we can branch directly. */
  2036. if (op1 == CONST0_RTX (DFmode))
  2037. cmp_code = UNKNOWN, branch_code = code;
  2038. else if (op0 == CONST0_RTX (DFmode))
  2039. {
  2040. /* Undo the swap we probably did just above. */
  2041. tem = op0, op0 = op1, op1 = tem;
  2042. branch_code = swap_condition (cmp_code);
  2043. cmp_code = UNKNOWN;
  2044. }
  2045. }
  2046. else
  2047. {
  2048. /* ??? We mark the branch mode to be CCmode to prevent the
  2049. compare and branch from being combined, since the compare
  2050. insn follows IEEE rules that the branch does not. */
  2051. branch_mode = CCmode;
  2052. }
  2053. }
  2054. else
  2055. {
  2056. /* The following optimizations are only for signed compares. */
  2057. if (code != LEU && code != LTU && code != GEU && code != GTU)
  2058. {
  2059. /* Whee. Compare and branch against 0 directly. */
  2060. if (op1 == const0_rtx)
  2061. cmp_code = UNKNOWN, branch_code = code;
  2062. /* If the constants doesn't fit into an immediate, but can
  2063. be generated by lda/ldah, we adjust the argument and
  2064. compare against zero, so we can use beq/bne directly. */
  2065. /* ??? Don't do this when comparing against symbols, otherwise
  2066. we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will
  2067. be declared false out of hand (at least for non-weak). */
  2068. else if (CONST_INT_P (op1)
  2069. && (code == EQ || code == NE)
  2070. && !(symbolic_operand (op0, VOIDmode)
  2071. || (REG_P (op0) && REG_POINTER (op0))))
  2072. {
  2073. rtx n_op1 = GEN_INT (-INTVAL (op1));
  2074. if (! satisfies_constraint_I (op1)
  2075. && (satisfies_constraint_K (n_op1)
  2076. || satisfies_constraint_L (n_op1)))
  2077. cmp_code = PLUS, branch_code = code, op1 = n_op1;
  2078. }
  2079. }
  2080. if (!reg_or_0_operand (op0, DImode))
  2081. op0 = force_reg (DImode, op0);
  2082. if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode))
  2083. op1 = force_reg (DImode, op1);
  2084. }
  2085. /* Emit an initial compare instruction, if necessary. */
  2086. tem = op0;
  2087. if (cmp_code != UNKNOWN)
  2088. {
  2089. tem = gen_reg_rtx (cmp_mode);
  2090. emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
  2091. }
  2092. /* Emit the branch instruction. */
  2093. tem = gen_rtx_SET (VOIDmode, pc_rtx,
  2094. gen_rtx_IF_THEN_ELSE (VOIDmode,
  2095. gen_rtx_fmt_ee (branch_code,
  2096. branch_mode, tem,
  2097. CONST0_RTX (cmp_mode)),
  2098. gen_rtx_LABEL_REF (VOIDmode,
  2099. operands[3]),
  2100. pc_rtx));
  2101. emit_jump_insn (tem);
  2102. }
  2103. /* Certain simplifications can be done to make invalid setcc operations
  2104. valid. Return the final comparison, or NULL if we can't work. */
  2105. bool
  2106. alpha_emit_setcc (rtx operands[], enum machine_mode cmp_mode)
  2107. {
  2108. enum rtx_code cmp_code;
  2109. enum rtx_code code = GET_CODE (operands[1]);
  2110. rtx op0 = operands[2], op1 = operands[3];
  2111. rtx tmp;
  2112. if (cmp_mode == TFmode)
  2113. {
  2114. op0 = alpha_emit_xfloating_compare (&code, op0, op1);
  2115. op1 = const0_rtx;
  2116. cmp_mode = DImode;
  2117. }
  2118. if (cmp_mode == DFmode && !TARGET_FIX)
  2119. return 0;
  2120. /* The general case: fold the comparison code to the types of compares
  2121. that we have, choosing the branch as necessary. */
  2122. cmp_code = UNKNOWN;
  2123. switch (code)
  2124. {
  2125. case EQ: case LE: case LT: case LEU: case LTU:
  2126. case UNORDERED:
  2127. /* We have these compares. */
  2128. if (cmp_mode == DFmode)
  2129. cmp_code = code, code = NE;
  2130. break;
  2131. case NE:
  2132. if (cmp_mode == DImode && op1 == const0_rtx)
  2133. break;
  2134. /* FALLTHRU */
  2135. case ORDERED:
  2136. cmp_code = reverse_condition (code);
  2137. code = EQ;
  2138. break;
  2139. case GE: case GT: case GEU: case GTU:
  2140. /* These normally need swapping, but for integer zero we have
  2141. special patterns that recognize swapped operands. */
  2142. if (cmp_mode == DImode && op1 == const0_rtx)
  2143. break;
  2144. code = swap_condition (code);
  2145. if (cmp_mode == DFmode)
  2146. cmp_code = code, code = NE;
  2147. tmp = op0, op0 = op1, op1 = tmp;
  2148. break;
  2149. default:
  2150. gcc_unreachable ();
  2151. }
  2152. if (cmp_mode == DImode)
  2153. {
  2154. if (!register_operand (op0, DImode))
  2155. op0 = force_reg (DImode, op0);
  2156. if (!reg_or_8bit_operand (op1, DImode))
  2157. op1 = force_reg (DImode, op1);
  2158. }
  2159. /* Emit an initial compare instruction, if necessary. */
  2160. if (cmp_code != UNKNOWN)
  2161. {
  2162. tmp = gen_reg_rtx (cmp_mode);
  2163. emit_insn (gen_rtx_SET (VOIDmode, tmp,
  2164. gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)));
  2165. op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp;
  2166. op1 = const0_rtx;
  2167. }
  2168. /* Emit the setcc instruction. */
  2169. emit_insn (gen_rtx_SET (VOIDmode, operands[0],
  2170. gen_rtx_fmt_ee (code, DImode, op0, op1)));
  2171. return true;
  2172. }
  2173. /* Rewrite a comparison against zero CMP of the form
  2174. (CODE (cc0) (const_int 0)) so it can be written validly in
  2175. a conditional move (if_then_else CMP ...).
  2176. If both of the operands that set cc0 are nonzero we must emit
  2177. an insn to perform the compare (it can't be done within
  2178. the conditional move). */
  2179. rtx
  2180. alpha_emit_conditional_move (rtx cmp, enum machine_mode mode)
  2181. {
  2182. enum rtx_code code = GET_CODE (cmp);
  2183. enum rtx_code cmov_code = NE;
  2184. rtx op0 = XEXP (cmp, 0);
  2185. rtx op1 = XEXP (cmp, 1);
  2186. enum machine_mode cmp_mode
  2187. = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
  2188. enum machine_mode cmov_mode = VOIDmode;
  2189. int local_fast_math = flag_unsafe_math_optimizations;
  2190. rtx tem;
  2191. if (cmp_mode == TFmode)
  2192. {
  2193. op0 = alpha_emit_xfloating_compare (&code, op0, op1);
  2194. op1 = const0_rtx;
  2195. cmp_mode = DImode;
  2196. }
  2197. gcc_assert (cmp_mode == DFmode || cmp_mode == DImode);
  2198. if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode))
  2199. {
  2200. enum rtx_code cmp_code;
  2201. if (! TARGET_FIX)
  2202. return 0;
  2203. /* If we have fp<->int register move instructions, do a cmov by
  2204. performing the comparison in fp registers, and move the
  2205. zero/nonzero value to integer registers, where we can then
  2206. use a normal cmov, or vice-versa. */
  2207. switch (code)
  2208. {
  2209. case EQ: case LE: case LT: case LEU: case LTU:
  2210. case UNORDERED:
  2211. /* We have these compares. */
  2212. cmp_code = code, code = NE;
  2213. break;
  2214. case NE:
  2215. case ORDERED:
  2216. /* These must be reversed. */
  2217. cmp_code = reverse_condition (code), code = EQ;
  2218. break;
  2219. case GE: case GT: case GEU: case GTU:
  2220. /* These normally need swapping, but for integer zero we have
  2221. special patterns that recognize swapped operands. */
  2222. if (cmp_mode == DImode && op1 == const0_rtx)
  2223. cmp_code = code, code = NE;
  2224. else
  2225. {
  2226. cmp_code = swap_condition (code);
  2227. code = NE;
  2228. tem = op0, op0 = op1, op1 = tem;
  2229. }
  2230. break;
  2231. default:
  2232. gcc_unreachable ();
  2233. }
  2234. if (cmp_mode == DImode)
  2235. {
  2236. if (!reg_or_0_operand (op0, DImode))
  2237. op0 = force_reg (DImode, op0);
  2238. if (!reg_or_8bit_operand (op1, DImode))
  2239. op1 = force_reg (DImode, op1);
  2240. }
  2241. tem = gen_reg_rtx (cmp_mode);
  2242. emit_insn (gen_rtx_SET (VOIDmode, tem,
  2243. gen_rtx_fmt_ee (cmp_code, cmp_mode,
  2244. op0, op1)));
  2245. cmp_mode = cmp_mode == DImode ? DFmode : DImode;
  2246. op0 = gen_lowpart (cmp_mode, tem);
  2247. op1 = CONST0_RTX (cmp_mode);
  2248. local_fast_math = 1;
  2249. }
  2250. if (cmp_mode == DImode)
  2251. {
  2252. if (!reg_or_0_operand (op0, DImode))
  2253. op0 = force_reg (DImode, op0);
  2254. if (!reg_or_8bit_operand (op1, DImode))
  2255. op1 = force_reg (DImode, op1);
  2256. }
  2257. /* We may be able to use a conditional move directly.
  2258. This avoids emitting spurious compares. */
  2259. if (signed_comparison_operator (cmp, VOIDmode)
  2260. && (cmp_mode == DImode || local_fast_math)
  2261. && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
  2262. return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
  2263. /* We can't put the comparison inside the conditional move;
  2264. emit a compare instruction and put that inside the
  2265. conditional move. Make sure we emit only comparisons we have;
  2266. swap or reverse as necessary. */
  2267. if (!can_create_pseudo_p ())
  2268. return NULL_RTX;
  2269. switch (code)
  2270. {
  2271. case EQ: case LE: case LT: case LEU: case LTU:
  2272. case UNORDERED:
  2273. /* We have these compares: */
  2274. break;
  2275. case NE:
  2276. case ORDERED:
  2277. /* These must be reversed. */
  2278. code = reverse_condition (code);
  2279. cmov_code = EQ;
  2280. break;
  2281. case GE: case GT: case GEU: case GTU:
  2282. /* These must be swapped. */
  2283. if (op1 != CONST0_RTX (cmp_mode))
  2284. {
  2285. code = swap_condition (code);
  2286. tem = op0, op0 = op1, op1 = tem;
  2287. }
  2288. break;
  2289. default:
  2290. gcc_unreachable ();
  2291. }
  2292. if (cmp_mode == DImode)
  2293. {
  2294. if (!reg_or_0_operand (op0, DImode))
  2295. op0 = force_reg (DImode, op0);
  2296. if (!reg_or_8bit_operand (op1, DImode))
  2297. op1 = force_reg (DImode, op1);
  2298. }
  2299. /* ??? We mark the branch mode to be CCmode to prevent the compare
  2300. and cmov from being combined, since the compare insn follows IEEE
  2301. rules that the cmov does not. */
  2302. if (cmp_mode == DFmode && !local_fast_math)
  2303. cmov_mode = CCmode;
  2304. tem = gen_reg_rtx (cmp_mode);
  2305. emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1));
  2306. return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode));
  2307. }
  2308. /* Simplify a conditional move of two constants into a setcc with
  2309. arithmetic. This is done with a splitter since combine would
  2310. just undo the work if done during code generation. It also catches
  2311. cases we wouldn't have before cse. */
  2312. int
  2313. alpha_split_conditional_move (enum rtx_code code, rtx dest, rtx cond,
  2314. rtx t_rtx, rtx f_rtx)
  2315. {
  2316. HOST_WIDE_INT t, f, diff;
  2317. enum machine_mode mode;
  2318. rtx target, subtarget, tmp;
  2319. mode = GET_MODE (dest);
  2320. t = INTVAL (t_rtx);
  2321. f = INTVAL (f_rtx);
  2322. diff = t - f;
  2323. if (((code == NE || code == EQ) && diff < 0)
  2324. || (code == GE || code == GT))
  2325. {
  2326. code = reverse_condition (code);
  2327. diff = t, t = f, f = diff;
  2328. diff = t - f;
  2329. }
  2330. subtarget = target = dest;
  2331. if (mode != DImode)
  2332. {
  2333. target = gen_lowpart (DImode, dest);
  2334. if (can_create_pseudo_p ())
  2335. subtarget = gen_reg_rtx (DImode);
  2336. else
  2337. subtarget = target;
  2338. }
  2339. /* Below, we must be careful to use copy_rtx on target and subtarget
  2340. in intermediate insns, as they may be a subreg rtx, which may not
  2341. be shared. */
  2342. if (f == 0 && exact_log2 (diff) > 0
  2343. /* On EV6, we've got enough shifters to make non-arithmetic shifts
  2344. viable over a longer latency cmove. On EV5, the E0 slot is a
  2345. scarce resource, and on EV4 shift has the same latency as a cmove. */
  2346. && (diff <= 8 || alpha_tune == PROCESSOR_EV6))
  2347. {
  2348. tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
  2349. emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
  2350. tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget),
  2351. GEN_INT (exact_log2 (t)));
  2352. emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
  2353. }
  2354. else if (f == 0 && t == -1)
  2355. {
  2356. tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
  2357. emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
  2358. emit_insn (gen_negdi2 (target, copy_rtx (subtarget)));
  2359. }
  2360. else if (diff == 1 || diff == 4 || diff == 8)
  2361. {
  2362. rtx add_op;
  2363. tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx);
  2364. emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (subtarget), tmp));
  2365. if (diff == 1)
  2366. emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f)));
  2367. else
  2368. {
  2369. add_op = GEN_INT (f);
  2370. if (sext_add_operand (add_op, mode))
  2371. {
  2372. tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget),
  2373. GEN_INT (diff));
  2374. tmp = gen_rtx_PLUS (DImode, tmp, add_op);
  2375. emit_insn (gen_rtx_SET (VOIDmode, target, tmp));
  2376. }
  2377. else
  2378. return 0;
  2379. }
  2380. }
  2381. else
  2382. return 0;
  2383. return 1;
  2384. }
  2385. /* Look up the function X_floating library function name for the
  2386. given operation. */
  2387. struct GTY(()) xfloating_op
  2388. {
  2389. const enum rtx_code code;
  2390. const char *const GTY((skip)) osf_func;
  2391. const char *const GTY((skip)) vms_func;
  2392. rtx libcall;
  2393. };
  2394. static GTY(()) struct xfloating_op xfloating_ops[] =
  2395. {
  2396. { PLUS, "_OtsAddX", "OTS$ADD_X", 0 },
  2397. { MINUS, "_OtsSubX", "OTS$SUB_X", 0 },
  2398. { MULT, "_OtsMulX", "OTS$MUL_X", 0 },
  2399. { DIV, "_OtsDivX", "OTS$DIV_X", 0 },
  2400. { EQ, "_OtsEqlX", "OTS$EQL_X", 0 },
  2401. { NE, "_OtsNeqX", "OTS$NEQ_X", 0 },
  2402. { LT, "_OtsLssX", "OTS$LSS_X", 0 },
  2403. { LE, "_OtsLeqX", "OTS$LEQ_X", 0 },
  2404. { GT, "_OtsGtrX", "OTS$GTR_X", 0 },
  2405. { GE, "_OtsGeqX", "OTS$GEQ_X", 0 },
  2406. { FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0 },
  2407. { FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0 },
  2408. { UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0 },
  2409. { FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0 },
  2410. { FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0 }
  2411. };
  2412. static GTY(()) struct xfloating_op vax_cvt_ops[] =
  2413. {
  2414. { FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0 },
  2415. { FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0 }
  2416. };
  2417. static rtx
  2418. alpha_lookup_xfloating_lib_func (enum rtx_code code)
  2419. {
  2420. struct xfloating_op *ops = xfloating_ops;
  2421. long n = ARRAY_SIZE (xfloating_ops);
  2422. long i;
  2423. gcc_assert (TARGET_HAS_XFLOATING_LIBS);
  2424. /* How irritating. Nothing to key off for the main table. */
  2425. if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE))
  2426. {
  2427. ops = vax_cvt_ops;
  2428. n = ARRAY_SIZE (vax_cvt_ops);
  2429. }
  2430. for (i = 0; i < n; ++i, ++ops)
  2431. if (ops->code == code)
  2432. {
  2433. rtx func = ops->libcall;
  2434. if (!func)
  2435. {
  2436. func = init_one_libfunc (TARGET_ABI_OPEN_VMS
  2437. ? ops->vms_func : ops->osf_func);
  2438. ops->libcall = func;
  2439. }
  2440. return func;
  2441. }
  2442. gcc_unreachable ();
  2443. }
  2444. /* Most X_floating operations take the rounding mode as an argument.
  2445. Compute that here. */
  2446. static int
  2447. alpha_compute_xfloating_mode_arg (enum rtx_code code,
  2448. enum alpha_fp_rounding_mode round)
  2449. {
  2450. int mode;
  2451. switch (round)
  2452. {
  2453. case ALPHA_FPRM_NORM:
  2454. mode = 2;
  2455. break;
  2456. case ALPHA_FPRM_MINF:
  2457. mode = 1;
  2458. break;
  2459. case ALPHA_FPRM_CHOP:
  2460. mode = 0;
  2461. break;
  2462. case ALPHA_FPRM_DYN:
  2463. mode = 4;
  2464. break;
  2465. default:
  2466. gcc_unreachable ();
  2467. /* XXX For reference, round to +inf is mode = 3. */
  2468. }
  2469. if (code == FLOAT_TRUNCATE && alpha_fptm == ALPHA_FPTM_N)
  2470. mode |= 0x10000;
  2471. return mode;
  2472. }
  2473. /* Emit an X_floating library function call.
  2474. Note that these functions do not follow normal calling conventions:
  2475. TFmode arguments are passed in two integer registers (as opposed to
  2476. indirect); TFmode return values appear in R16+R17.
  2477. FUNC is the function to call.
  2478. TARGET is where the output belongs.
  2479. OPERANDS are the inputs.
  2480. NOPERANDS is the count of inputs.
  2481. EQUIV is the expression equivalent for the function.
  2482. */
  2483. static void
  2484. alpha_emit_xfloating_libcall (rtx func, rtx target, rtx operands[],
  2485. int noperands, rtx equiv)
  2486. {
  2487. rtx usage = NULL_RTX, tmp, reg;
  2488. int regno = 16, i;
  2489. start_sequence ();
  2490. for (i = 0; i < noperands; ++i)
  2491. {
  2492. switch (GET_MODE (operands[i]))
  2493. {
  2494. case TFmode:
  2495. reg = gen_rtx_REG (TFmode, regno);
  2496. regno += 2;
  2497. break;
  2498. case DFmode:
  2499. reg = gen_rtx_REG (DFmode, regno + 32);
  2500. regno += 1;
  2501. break;
  2502. case VOIDmode:
  2503. gcc_assert (CONST_INT_P (operands[i]));
  2504. /* FALLTHRU */
  2505. case DImode:
  2506. reg = gen_rtx_REG (DImode, regno);
  2507. regno += 1;
  2508. break;
  2509. default:
  2510. gcc_unreachable ();
  2511. }
  2512. emit_move_insn (reg, operands[i]);
  2513. use_reg (&usage, reg);
  2514. }
  2515. switch (GET_MODE (target))
  2516. {
  2517. case TFmode:
  2518. reg = gen_rtx_REG (TFmode, 16);
  2519. break;
  2520. case DFmode:
  2521. reg = gen_rtx_REG (DFmode, 32);
  2522. break;
  2523. case DImode:
  2524. reg = gen_rtx_REG (DImode, 0);
  2525. break;
  2526. default:
  2527. gcc_unreachable ();
  2528. }
  2529. tmp = gen_rtx_MEM (QImode, func);
  2530. tmp = emit_call_insn (GEN_CALL_VALUE (reg, tmp, const0_rtx,
  2531. const0_rtx, const0_rtx));
  2532. CALL_INSN_FUNCTION_USAGE (tmp) = usage;
  2533. RTL_CONST_CALL_P (tmp) = 1;
  2534. tmp = get_insns ();
  2535. end_sequence ();
  2536. emit_libcall_block (tmp, target, reg, equiv);
  2537. }
  2538. /* Emit an X_floating library function call for arithmetic (+,-,*,/). */
  2539. void
  2540. alpha_emit_xfloating_arith (enum rtx_code code, rtx operands[])
  2541. {
  2542. rtx func;
  2543. int mode;
  2544. rtx out_operands[3];
  2545. func = alpha_lookup_xfloating_lib_func (code);
  2546. mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
  2547. out_operands[0] = operands[1];
  2548. out_operands[1] = operands[2];
  2549. out_operands[2] = GEN_INT (mode);
  2550. alpha_emit_xfloating_libcall (func, operands[0], out_operands, 3,
  2551. gen_rtx_fmt_ee (code, TFmode, operands[1],
  2552. operands[2]));
  2553. }
  2554. /* Emit an X_floating library function call for a comparison. */
  2555. static rtx
  2556. alpha_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1)
  2557. {
  2558. enum rtx_code cmp_code, res_code;
  2559. rtx func, out, operands[2], note;
  2560. /* X_floating library comparison functions return
  2561. -1 unordered
  2562. 0 false
  2563. 1 true
  2564. Convert the compare against the raw return value. */
  2565. cmp_code = *pcode;
  2566. switch (cmp_code)
  2567. {
  2568. case UNORDERED:
  2569. cmp_code = EQ;
  2570. res_code = LT;
  2571. break;
  2572. case ORDERED:
  2573. cmp_code = EQ;
  2574. res_code = GE;
  2575. break;
  2576. case NE:
  2577. res_code = NE;
  2578. break;
  2579. case EQ:
  2580. case LT:
  2581. case GT:
  2582. case LE:
  2583. case GE:
  2584. res_code = GT;
  2585. break;
  2586. default:
  2587. gcc_unreachable ();
  2588. }
  2589. *pcode = res_code;
  2590. func = alpha_lookup_xfloating_lib_func (cmp_code);
  2591. operands[0] = op0;
  2592. operands[1] = op1;
  2593. out = gen_reg_rtx (DImode);
  2594. /* What's actually returned is -1,0,1, not a proper boolean value,
  2595. so use an EXPR_LIST as with a generic libcall instead of a
  2596. comparison type expression. */
  2597. note = gen_rtx_EXPR_LIST (VOIDmode, op1, NULL_RTX);
  2598. note = gen_rtx_EXPR_LIST (VOIDmode, op0, note);
  2599. note = gen_rtx_EXPR_LIST (VOIDmode, func, note);
  2600. alpha_emit_xfloating_libcall (func, out, operands, 2, note);
  2601. return out;
  2602. }
  2603. /* Emit an X_floating library function call for a conversion. */
  2604. void
  2605. alpha_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[])
  2606. {
  2607. int noperands = 1, mode;
  2608. rtx out_operands[2];
  2609. rtx func;
  2610. enum rtx_code code = orig_code;
  2611. if (code == UNSIGNED_FIX)
  2612. code = FIX;
  2613. func = alpha_lookup_xfloating_lib_func (code);
  2614. out_operands[0] = operands[1];
  2615. switch (code)
  2616. {
  2617. case FIX:
  2618. mode = alpha_compute_xfloating_mode_arg (code, ALPHA_FPRM_CHOP);
  2619. out_operands[1] = GEN_INT (mode);
  2620. noperands = 2;
  2621. break;
  2622. case FLOAT_TRUNCATE:
  2623. mode = alpha_compute_xfloating_mode_arg (code, alpha_fprm);
  2624. out_operands[1] = GEN_INT (mode);
  2625. noperands = 2;
  2626. break;
  2627. default:
  2628. break;
  2629. }
  2630. alpha_emit_xfloating_libcall (func, operands[0], out_operands, noperands,
  2631. gen_rtx_fmt_e (orig_code,
  2632. GET_MODE (operands[0]),
  2633. operands[1]));
  2634. }
  2635. /* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of
  2636. DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true,
  2637. guarantee that the sequence
  2638. set (OP[0] OP[2])
  2639. set (OP[1] OP[3])
  2640. is valid. Naturally, output operand ordering is little-endian.
  2641. This is used by *movtf_internal and *movti_internal. */
  2642. void
  2643. alpha_split_tmode_pair (rtx operands[4], enum machine_mode mode,
  2644. bool fixup_overlap)
  2645. {
  2646. switch (GET_CODE (operands[1]))
  2647. {
  2648. case REG:
  2649. operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
  2650. operands[2] = gen_rtx_REG (DImode, REGNO (operands[1]));
  2651. break;
  2652. case MEM:
  2653. operands[3] = adjust_address (operands[1], DImode, 8);
  2654. operands[2] = adjust_address (operands[1], DImode, 0);
  2655. break;
  2656. case CONST_INT:
  2657. case CONST_DOUBLE:
  2658. gcc_assert (operands[1] == CONST0_RTX (mode));
  2659. operands[2] = operands[3] = const0_rtx;
  2660. break;
  2661. default:
  2662. gcc_unreachable ();
  2663. }
  2664. switch (GET_CODE (operands[0]))
  2665. {
  2666. case REG:
  2667. operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1);
  2668. operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
  2669. break;
  2670. case MEM:
  2671. operands[1] = adjust_address (operands[0], DImode, 8);
  2672. operands[0] = adjust_address (operands[0], DImode, 0);
  2673. break;
  2674. default:
  2675. gcc_unreachable ();
  2676. }
  2677. if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3]))
  2678. {
  2679. rtx tmp;
  2680. tmp = operands[0], operands[0] = operands[1], operands[1] = tmp;
  2681. tmp = operands[2], operands[2] = operands[3], operands[3] = tmp;
  2682. }
  2683. }
  2684. /* Implement negtf2 or abstf2. Op0 is destination, op1 is source,
  2685. op2 is a register containing the sign bit, operation is the
  2686. logical operation to be performed. */
  2687. void
  2688. alpha_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx))
  2689. {
  2690. rtx high_bit = operands[2];
  2691. rtx scratch;
  2692. int move;
  2693. alpha_split_tmode_pair (operands, TFmode, false);
  2694. /* Detect three flavors of operand overlap. */
  2695. move = 1;
  2696. if (rtx_equal_p (operands[0], operands[2]))
  2697. move = 0;
  2698. else if (rtx_equal_p (operands[1], operands[2]))
  2699. {
  2700. if (rtx_equal_p (operands[0], high_bit))
  2701. move = 2;
  2702. else
  2703. move = -1;
  2704. }
  2705. if (move < 0)
  2706. emit_move_insn (operands[0], operands[2]);
  2707. /* ??? If the destination overlaps both source tf and high_bit, then
  2708. assume source tf is dead in its entirety and use the other half
  2709. for a scratch register. Otherwise "scratch" is just the proper
  2710. destination register. */
  2711. scratch = operands[move < 2 ? 1 : 3];
  2712. emit_insn ((*operation) (scratch, high_bit, operands[3]));
  2713. if (move > 0)
  2714. {
  2715. emit_move_insn (operands[0], operands[2]);
  2716. if (move > 1)
  2717. emit_move_insn (operands[1], scratch);
  2718. }
  2719. }
  2720. /* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
  2721. unaligned data:
  2722. unsigned: signed:
  2723. word: ldq_u r1,X(r11) ldq_u r1,X(r11)
  2724. ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
  2725. lda r3,X(r11) lda r3,X+2(r11)
  2726. extwl r1,r3,r1 extql r1,r3,r1
  2727. extwh r2,r3,r2 extqh r2,r3,r2
  2728. or r1.r2.r1 or r1,r2,r1
  2729. sra r1,48,r1
  2730. long: ldq_u r1,X(r11) ldq_u r1,X(r11)
  2731. ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
  2732. lda r3,X(r11) lda r3,X(r11)
  2733. extll r1,r3,r1 extll r1,r3,r1
  2734. extlh r2,r3,r2 extlh r2,r3,r2
  2735. or r1.r2.r1 addl r1,r2,r1
  2736. quad: ldq_u r1,X(r11)
  2737. ldq_u r2,X+7(r11)
  2738. lda r3,X(r11)
  2739. extql r1,r3,r1
  2740. extqh r2,r3,r2
  2741. or r1.r2.r1
  2742. */
  2743. void
  2744. alpha_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size,
  2745. HOST_WIDE_INT ofs, int sign)
  2746. {
  2747. rtx meml, memh, addr, extl, exth, tmp, mema;
  2748. enum machine_mode mode;
  2749. if (TARGET_BWX && size == 2)
  2750. {
  2751. meml = adjust_address (mem, QImode, ofs);
  2752. memh = adjust_address (mem, QImode, ofs+1);
  2753. extl = gen_reg_rtx (DImode);
  2754. exth = gen_reg_rtx (DImode);
  2755. emit_insn (gen_zero_extendqidi2 (extl, meml));
  2756. emit_insn (gen_zero_extendqidi2 (exth, memh));
  2757. exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8),
  2758. NULL, 1, OPTAB_LIB_WIDEN);
  2759. addr = expand_simple_binop (DImode, IOR, extl, exth,
  2760. NULL, 1, OPTAB_LIB_WIDEN);
  2761. if (sign && GET_MODE (tgt) != HImode)
  2762. {
  2763. addr = gen_lowpart (HImode, addr);
  2764. emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0));
  2765. }
  2766. else
  2767. {
  2768. if (GET_MODE (tgt) != DImode)
  2769. addr = gen_lowpart (GET_MODE (tgt), addr);
  2770. emit_move_insn (tgt, addr);
  2771. }
  2772. return;
  2773. }
  2774. meml = gen_reg_rtx (DImode);
  2775. memh = gen_reg_rtx (DImode);
  2776. addr = gen_reg_rtx (DImode);
  2777. extl = gen_reg_rtx (DImode);
  2778. exth = gen_reg_rtx (DImode);
  2779. mema = XEXP (mem, 0);
  2780. if (GET_CODE (mema) == LO_SUM)
  2781. mema = force_reg (Pmode, mema);
  2782. /* AND addresses cannot be in any alias set, since they may implicitly
  2783. alias surrounding code. Ideally we'd have some alias set that
  2784. covered all types except those with alignment 8 or higher. */
  2785. tmp = change_address (mem, DImode,
  2786. gen_rtx_AND (DImode,
  2787. plus_constant (DImode, mema, ofs),
  2788. GEN_INT (-8)));
  2789. set_mem_alias_set (tmp, 0);
  2790. emit_move_insn (meml, tmp);
  2791. tmp = change_address (mem, DImode,
  2792. gen_rtx_AND (DImode,
  2793. plus_constant (DImode, mema,
  2794. ofs + size - 1),
  2795. GEN_INT (-8)));
  2796. set_mem_alias_set (tmp, 0);
  2797. emit_move_insn (memh, tmp);
  2798. if (sign && size == 2)
  2799. {
  2800. emit_move_insn (addr, plus_constant (Pmode, mema, ofs+2));
  2801. emit_insn (gen_extql (extl, meml, addr));
  2802. emit_insn (gen_extqh (exth, memh, addr));
  2803. /* We must use tgt here for the target. Alpha-vms port fails if we use
  2804. addr for the target, because addr is marked as a pointer and combine
  2805. knows that pointers are always sign-extended 32-bit values. */
  2806. addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
  2807. addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
  2808. addr, 1, OPTAB_WIDEN);
  2809. }
  2810. else
  2811. {
  2812. emit_move_insn (addr, plus_constant (Pmode, mema, ofs));
  2813. emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
  2814. switch ((int) size)
  2815. {
  2816. case 2:
  2817. emit_insn (gen_extwh (exth, memh, addr));
  2818. mode = HImode;
  2819. break;
  2820. case 4:
  2821. emit_insn (gen_extlh (exth, memh, addr));
  2822. mode = SImode;
  2823. break;
  2824. case 8:
  2825. emit_insn (gen_extqh (exth, memh, addr));
  2826. mode = DImode;
  2827. break;
  2828. default:
  2829. gcc_unreachable ();
  2830. }
  2831. addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
  2832. gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
  2833. sign, OPTAB_WIDEN);
  2834. }
  2835. if (addr != tgt)
  2836. emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr));
  2837. }
  2838. /* Similarly, use ins and msk instructions to perform unaligned stores. */
  2839. void
  2840. alpha_expand_unaligned_store (rtx dst, rtx src,
  2841. HOST_WIDE_INT size, HOST_WIDE_INT ofs)
  2842. {
  2843. rtx dstl, dsth, addr, insl, insh, meml, memh, dsta;
  2844. if (TARGET_BWX && size == 2)
  2845. {
  2846. if (src != const0_rtx)
  2847. {
  2848. dstl = gen_lowpart (QImode, src);
  2849. dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8),
  2850. NULL, 1, OPTAB_LIB_WIDEN);
  2851. dsth = gen_lowpart (QImode, dsth);
  2852. }
  2853. else
  2854. dstl = dsth = const0_rtx;
  2855. meml = adjust_address (dst, QImode, ofs);
  2856. memh = adjust_address (dst, QImode, ofs+1);
  2857. emit_move_insn (meml, dstl);
  2858. emit_move_insn (memh, dsth);
  2859. return;
  2860. }
  2861. dstl = gen_reg_rtx (DImode);
  2862. dsth = gen_reg_rtx (DImode);
  2863. insl = gen_reg_rtx (DImode);
  2864. insh = gen_reg_rtx (DImode);
  2865. dsta = XEXP (dst, 0);
  2866. if (GET_CODE (dsta) == LO_SUM)
  2867. dsta = force_reg (Pmode, dsta);
  2868. /* AND addresses cannot be in any alias set, since they may implicitly
  2869. alias surrounding code. Ideally we'd have some alias set that
  2870. covered all types except those with alignment 8 or higher. */
  2871. meml = change_address (dst, DImode,
  2872. gen_rtx_AND (DImode,
  2873. plus_constant (DImode, dsta, ofs),
  2874. GEN_INT (-8)));
  2875. set_mem_alias_set (meml, 0);
  2876. memh = change_address (dst, DImode,
  2877. gen_rtx_AND (DImode,
  2878. plus_constant (DImode, dsta,
  2879. ofs + size - 1),
  2880. GEN_INT (-8)));
  2881. set_mem_alias_set (memh, 0);
  2882. emit_move_insn (dsth, memh);
  2883. emit_move_insn (dstl, meml);
  2884. addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs));
  2885. if (src != CONST0_RTX (GET_MODE (src)))
  2886. {
  2887. emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
  2888. GEN_INT (size*8), addr));
  2889. switch ((int) size)
  2890. {
  2891. case 2:
  2892. emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
  2893. break;
  2894. case 4:
  2895. emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
  2896. break;
  2897. case 8:
  2898. emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr));
  2899. break;
  2900. default:
  2901. gcc_unreachable ();
  2902. }
  2903. }
  2904. emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
  2905. switch ((int) size)
  2906. {
  2907. case 2:
  2908. emit_insn (gen_mskwl (dstl, dstl, addr));
  2909. break;
  2910. case 4:
  2911. emit_insn (gen_mskll (dstl, dstl, addr));
  2912. break;
  2913. case 8:
  2914. emit_insn (gen_mskql (dstl, dstl, addr));
  2915. break;
  2916. default:
  2917. gcc_unreachable ();
  2918. }
  2919. if (src != CONST0_RTX (GET_MODE (src)))
  2920. {
  2921. dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
  2922. dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
  2923. }
  2924. /* Must store high before low for degenerate case of aligned. */
  2925. emit_move_insn (memh, dsth);
  2926. emit_move_insn (meml, dstl);
  2927. }
  2928. /* The block move code tries to maximize speed by separating loads and
  2929. stores at the expense of register pressure: we load all of the data
  2930. before we store it back out. There are two secondary effects worth
  2931. mentioning, that this speeds copying to/from aligned and unaligned
  2932. buffers, and that it makes the code significantly easier to write. */
  2933. #define MAX_MOVE_WORDS 8
  2934. /* Load an integral number of consecutive unaligned quadwords. */
  2935. static void
  2936. alpha_expand_unaligned_load_words (rtx *out_regs, rtx smem,
  2937. HOST_WIDE_INT words, HOST_WIDE_INT ofs)
  2938. {
  2939. rtx const im8 = GEN_INT (-8);
  2940. rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
  2941. rtx sreg, areg, tmp, smema;
  2942. HOST_WIDE_INT i;
  2943. smema = XEXP (smem, 0);
  2944. if (GET_CODE (smema) == LO_SUM)
  2945. smema = force_reg (Pmode, smema);
  2946. /* Generate all the tmp registers we need. */
  2947. for (i = 0; i < words; ++i)
  2948. {
  2949. data_regs[i] = out_regs[i];
  2950. ext_tmps[i] = gen_reg_rtx (DImode);
  2951. }
  2952. data_regs[words] = gen_reg_rtx (DImode);
  2953. if (ofs != 0)
  2954. smem = adjust_address (smem, GET_MODE (smem), ofs);
  2955. /* Load up all of the source data. */
  2956. for (i = 0; i < words; ++i)
  2957. {
  2958. tmp = change_address (smem, DImode,
  2959. gen_rtx_AND (DImode,
  2960. plus_constant (DImode, smema, 8*i),
  2961. im8));
  2962. set_mem_alias_set (tmp, 0);
  2963. emit_move_insn (data_regs[i], tmp);
  2964. }
  2965. tmp = change_address (smem, DImode,
  2966. gen_rtx_AND (DImode,
  2967. plus_constant (DImode, smema,
  2968. 8*words - 1),
  2969. im8));
  2970. set_mem_alias_set (tmp, 0);
  2971. emit_move_insn (data_regs[words], tmp);
  2972. /* Extract the half-word fragments. Unfortunately DEC decided to make
  2973. extxh with offset zero a noop instead of zeroing the register, so
  2974. we must take care of that edge condition ourselves with cmov. */
  2975. sreg = copy_addr_to_reg (smema);
  2976. areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
  2977. 1, OPTAB_WIDEN);
  2978. for (i = 0; i < words; ++i)
  2979. {
  2980. emit_insn (gen_extql (data_regs[i], data_regs[i], sreg));
  2981. emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
  2982. emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
  2983. gen_rtx_IF_THEN_ELSE (DImode,
  2984. gen_rtx_EQ (DImode, areg,
  2985. const0_rtx),
  2986. const0_rtx, ext_tmps[i])));
  2987. }
  2988. /* Merge the half-words into whole words. */
  2989. for (i = 0; i < words; ++i)
  2990. {
  2991. out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
  2992. ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
  2993. }
  2994. }
  2995. /* Store an integral number of consecutive unaligned quadwords. DATA_REGS
  2996. may be NULL to store zeros. */
  2997. static void
  2998. alpha_expand_unaligned_store_words (rtx *data_regs, rtx dmem,
  2999. HOST_WIDE_INT words, HOST_WIDE_INT ofs)
  3000. {
  3001. rtx const im8 = GEN_INT (-8);
  3002. rtx ins_tmps[MAX_MOVE_WORDS];
  3003. rtx st_tmp_1, st_tmp_2, dreg;
  3004. rtx st_addr_1, st_addr_2, dmema;
  3005. HOST_WIDE_INT i;
  3006. dmema = XEXP (dmem, 0);
  3007. if (GET_CODE (dmema) == LO_SUM)
  3008. dmema = force_reg (Pmode, dmema);
  3009. /* Generate all the tmp registers we need. */
  3010. if (data_regs != NULL)
  3011. for (i = 0; i < words; ++i)
  3012. ins_tmps[i] = gen_reg_rtx(DImode);
  3013. st_tmp_1 = gen_reg_rtx(DImode);
  3014. st_tmp_2 = gen_reg_rtx(DImode);
  3015. if (ofs != 0)
  3016. dmem = adjust_address (dmem, GET_MODE (dmem), ofs);
  3017. st_addr_2 = change_address (dmem, DImode,
  3018. gen_rtx_AND (DImode,
  3019. plus_constant (DImode, dmema,
  3020. words*8 - 1),
  3021. im8));
  3022. set_mem_alias_set (st_addr_2, 0);
  3023. st_addr_1 = change_address (dmem, DImode,
  3024. gen_rtx_AND (DImode, dmema, im8));
  3025. set_mem_alias_set (st_addr_1, 0);
  3026. /* Load up the destination end bits. */
  3027. emit_move_insn (st_tmp_2, st_addr_2);
  3028. emit_move_insn (st_tmp_1, st_addr_1);
  3029. /* Shift the input data into place. */
  3030. dreg = copy_addr_to_reg (dmema);
  3031. if (data_regs != NULL)
  3032. {
  3033. for (i = words-1; i >= 0; --i)
  3034. {
  3035. emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg));
  3036. emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
  3037. }
  3038. for (i = words-1; i > 0; --i)
  3039. {
  3040. ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
  3041. ins_tmps[i-1], ins_tmps[i-1], 1,
  3042. OPTAB_WIDEN);
  3043. }
  3044. }
  3045. /* Split and merge the ends with the destination data. */
  3046. emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg));
  3047. emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg));
  3048. if (data_regs != NULL)
  3049. {
  3050. st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
  3051. st_tmp_2, 1, OPTAB_WIDEN);
  3052. st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
  3053. st_tmp_1, 1, OPTAB_WIDEN);
  3054. }
  3055. /* Store it all. */
  3056. emit_move_insn (st_addr_2, st_tmp_2);
  3057. for (i = words-1; i > 0; --i)
  3058. {
  3059. rtx tmp = change_address (dmem, DImode,
  3060. gen_rtx_AND (DImode,
  3061. plus_constant (DImode,
  3062. dmema, i*8),
  3063. im8));
  3064. set_mem_alias_set (tmp, 0);
  3065. emit_move_insn (tmp, data_regs ? ins_tmps[i-1] : const0_rtx);
  3066. }
  3067. emit_move_insn (st_addr_1, st_tmp_1);
  3068. }
  3069. /* Expand string/block move operations.
  3070. operands[0] is the pointer to the destination.
  3071. operands[1] is the pointer to the source.
  3072. operands[2] is the number of bytes to move.
  3073. operands[3] is the alignment. */
  3074. int
  3075. alpha_expand_block_move (rtx operands[])
  3076. {
  3077. rtx bytes_rtx = operands[2];
  3078. rtx align_rtx = operands[3];
  3079. HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
  3080. HOST_WIDE_INT bytes = orig_bytes;
  3081. HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT;
  3082. HOST_WIDE_INT dst_align = src_align;
  3083. rtx orig_src = operands[1];
  3084. rtx orig_dst = operands[0];
  3085. rtx data_regs[2 * MAX_MOVE_WORDS + 16];
  3086. rtx tmp;
  3087. unsigned int i, words, ofs, nregs = 0;
  3088. if (orig_bytes <= 0)
  3089. return 1;
  3090. else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
  3091. return 0;
  3092. /* Look for additional alignment information from recorded register info. */
  3093. tmp = XEXP (orig_src, 0);
  3094. if (REG_P (tmp))
  3095. src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
  3096. else if (GET_CODE (tmp) == PLUS
  3097. && REG_P (XEXP (tmp, 0))
  3098. && CONST_INT_P (XEXP (tmp, 1)))
  3099. {
  3100. unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
  3101. unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
  3102. if (a > src_align)
  3103. {
  3104. if (a >= 64 && c % 8 == 0)
  3105. src_align = 64;
  3106. else if (a >= 32 && c % 4 == 0)
  3107. src_align = 32;
  3108. else if (a >= 16 && c % 2 == 0)
  3109. src_align = 16;
  3110. }
  3111. }
  3112. tmp = XEXP (orig_dst, 0);
  3113. if (REG_P (tmp))
  3114. dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp)));
  3115. else if (GET_CODE (tmp) == PLUS
  3116. && REG_P (XEXP (tmp, 0))
  3117. && CONST_INT_P (XEXP (tmp, 1)))
  3118. {
  3119. unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
  3120. unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
  3121. if (a > dst_align)
  3122. {
  3123. if (a >= 64 && c % 8 == 0)
  3124. dst_align = 64;
  3125. else if (a >= 32 && c % 4 == 0)
  3126. dst_align = 32;
  3127. else if (a >= 16 && c % 2 == 0)
  3128. dst_align = 16;
  3129. }
  3130. }
  3131. ofs = 0;
  3132. if (src_align >= 64 && bytes >= 8)
  3133. {
  3134. words = bytes / 8;
  3135. for (i = 0; i < words; ++i)
  3136. data_regs[nregs + i] = gen_reg_rtx (DImode);
  3137. for (i = 0; i < words; ++i)
  3138. emit_move_insn (data_regs[nregs + i],
  3139. adjust_address (orig_src, DImode, ofs + i * 8));
  3140. nregs += words;
  3141. bytes -= words * 8;
  3142. ofs += words * 8;
  3143. }
  3144. if (src_align >= 32 && bytes >= 4)
  3145. {
  3146. words = bytes / 4;
  3147. for (i = 0; i < words; ++i)
  3148. data_regs[nregs + i] = gen_reg_rtx (SImode);
  3149. for (i = 0; i < words; ++i)
  3150. emit_move_insn (data_regs[nregs + i],
  3151. adjust_address (orig_src, SImode, ofs + i * 4));
  3152. nregs += words;
  3153. bytes -= words * 4;
  3154. ofs += words * 4;
  3155. }
  3156. if (bytes >= 8)
  3157. {
  3158. words = bytes / 8;
  3159. for (i = 0; i < words+1; ++i)
  3160. data_regs[nregs + i] = gen_reg_rtx (DImode);
  3161. alpha_expand_unaligned_load_words (data_regs + nregs, orig_src,
  3162. words, ofs);
  3163. nregs += words;
  3164. bytes -= words * 8;
  3165. ofs += words * 8;
  3166. }
  3167. if (! TARGET_BWX && bytes >= 4)
  3168. {
  3169. data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
  3170. alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
  3171. bytes -= 4;
  3172. ofs += 4;
  3173. }
  3174. if (bytes >= 2)
  3175. {
  3176. if (src_align >= 16)
  3177. {
  3178. do {
  3179. data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
  3180. emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs));
  3181. bytes -= 2;
  3182. ofs += 2;
  3183. } while (bytes >= 2);
  3184. }
  3185. else if (! TARGET_BWX)
  3186. {
  3187. data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
  3188. alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
  3189. bytes -= 2;
  3190. ofs += 2;
  3191. }
  3192. }
  3193. while (bytes > 0)
  3194. {
  3195. data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
  3196. emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs));
  3197. bytes -= 1;
  3198. ofs += 1;
  3199. }
  3200. gcc_assert (nregs <= ARRAY_SIZE (data_regs));
  3201. /* Now save it back out again. */
  3202. i = 0, ofs = 0;
  3203. /* Write out the data in whatever chunks reading the source allowed. */
  3204. if (dst_align >= 64)
  3205. {
  3206. while (i < nregs && GET_MODE (data_regs[i]) == DImode)
  3207. {
  3208. emit_move_insn (adjust_address (orig_dst, DImode, ofs),
  3209. data_regs[i]);
  3210. ofs += 8;
  3211. i++;
  3212. }
  3213. }
  3214. if (dst_align >= 32)
  3215. {
  3216. /* If the source has remaining DImode regs, write them out in
  3217. two pieces. */
  3218. while (i < nregs && GET_MODE (data_regs[i]) == DImode)
  3219. {
  3220. tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
  3221. NULL_RTX, 1, OPTAB_WIDEN);
  3222. emit_move_insn (adjust_address (orig_dst, SImode, ofs),
  3223. gen_lowpart (SImode, data_regs[i]));
  3224. emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4),
  3225. gen_lowpart (SImode, tmp));
  3226. ofs += 8;
  3227. i++;
  3228. }
  3229. while (i < nregs && GET_MODE (data_regs[i]) == SImode)
  3230. {
  3231. emit_move_insn (adjust_address (orig_dst, SImode, ofs),
  3232. data_regs[i]);
  3233. ofs += 4;
  3234. i++;
  3235. }
  3236. }
  3237. if (i < nregs && GET_MODE (data_regs[i]) == DImode)
  3238. {
  3239. /* Write out a remaining block of words using unaligned methods. */
  3240. for (words = 1; i + words < nregs; words++)
  3241. if (GET_MODE (data_regs[i + words]) != DImode)
  3242. break;
  3243. if (words == 1)
  3244. alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
  3245. else
  3246. alpha_expand_unaligned_store_words (data_regs + i, orig_dst,
  3247. words, ofs);
  3248. i += words;
  3249. ofs += words * 8;
  3250. }
  3251. /* Due to the above, this won't be aligned. */
  3252. /* ??? If we have more than one of these, consider constructing full
  3253. words in registers and using alpha_expand_unaligned_store_words. */
  3254. while (i < nregs && GET_MODE (data_regs[i]) == SImode)
  3255. {
  3256. alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
  3257. ofs += 4;
  3258. i++;
  3259. }
  3260. if (dst_align >= 16)
  3261. while (i < nregs && GET_MODE (data_regs[i]) == HImode)
  3262. {
  3263. emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]);
  3264. i++;
  3265. ofs += 2;
  3266. }
  3267. else
  3268. while (i < nregs && GET_MODE (data_regs[i]) == HImode)
  3269. {
  3270. alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
  3271. i++;
  3272. ofs += 2;
  3273. }
  3274. /* The remainder must be byte copies. */
  3275. while (i < nregs)
  3276. {
  3277. gcc_assert (GET_MODE (data_regs[i]) == QImode);
  3278. emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]);
  3279. i++;
  3280. ofs += 1;
  3281. }
  3282. return 1;
  3283. }
  3284. int
  3285. alpha_expand_block_clear (rtx operands[])
  3286. {
  3287. rtx bytes_rtx = operands[1];
  3288. rtx align_rtx = operands[3];
  3289. HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx);
  3290. HOST_WIDE_INT bytes = orig_bytes;
  3291. HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT;
  3292. HOST_WIDE_INT alignofs = 0;
  3293. rtx orig_dst = operands[0];
  3294. rtx tmp;
  3295. int i, words, ofs = 0;
  3296. if (orig_bytes <= 0)
  3297. return 1;
  3298. if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD)
  3299. return 0;
  3300. /* Look for stricter alignment. */
  3301. tmp = XEXP (orig_dst, 0);
  3302. if (REG_P (tmp))
  3303. align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp)));
  3304. else if (GET_CODE (tmp) == PLUS
  3305. && REG_P (XEXP (tmp, 0))
  3306. && CONST_INT_P (XEXP (tmp, 1)))
  3307. {
  3308. HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
  3309. int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
  3310. if (a > align)
  3311. {
  3312. if (a >= 64)
  3313. align = a, alignofs = 8 - c % 8;
  3314. else if (a >= 32)
  3315. align = a, alignofs = 4 - c % 4;
  3316. else if (a >= 16)
  3317. align = a, alignofs = 2 - c % 2;
  3318. }
  3319. }
  3320. /* Handle an unaligned prefix first. */
  3321. if (alignofs > 0)
  3322. {
  3323. #if HOST_BITS_PER_WIDE_INT >= 64
  3324. /* Given that alignofs is bounded by align, the only time BWX could
  3325. generate three stores is for a 7 byte fill. Prefer two individual
  3326. stores over a load/mask/store sequence. */
  3327. if ((!TARGET_BWX || alignofs == 7)
  3328. && align >= 32
  3329. && !(alignofs == 4 && bytes >= 4))
  3330. {
  3331. enum machine_mode mode = (align >= 64 ? DImode : SImode);
  3332. int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs;
  3333. rtx mem, tmp;
  3334. HOST_WIDE_INT mask;
  3335. mem = adjust_address (orig_dst, mode, ofs - inv_alignofs);
  3336. set_mem_alias_set (mem, 0);
  3337. mask = ~(~(HOST_WIDE_INT)0 << (inv_alignofs * 8));
  3338. if (bytes < alignofs)
  3339. {
  3340. mask |= ~(HOST_WIDE_INT)0 << ((inv_alignofs + bytes) * 8);
  3341. ofs += bytes;
  3342. bytes = 0;
  3343. }
  3344. else
  3345. {
  3346. bytes -= alignofs;
  3347. ofs += alignofs;
  3348. }
  3349. alignofs = 0;
  3350. tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask),
  3351. NULL_RTX, 1, OPTAB_WIDEN);
  3352. emit_move_insn (mem, tmp);
  3353. }
  3354. #endif
  3355. if (TARGET_BWX && (alignofs & 1) && bytes >= 1)
  3356. {
  3357. emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
  3358. bytes -= 1;
  3359. ofs += 1;
  3360. alignofs -= 1;
  3361. }
  3362. if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2)
  3363. {
  3364. emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx);
  3365. bytes -= 2;
  3366. ofs += 2;
  3367. alignofs -= 2;
  3368. }
  3369. if (alignofs == 4 && bytes >= 4)
  3370. {
  3371. emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
  3372. bytes -= 4;
  3373. ofs += 4;
  3374. alignofs = 0;
  3375. }
  3376. /* If we've not used the extra lead alignment information by now,
  3377. we won't be able to. Downgrade align to match what's left over. */
  3378. if (alignofs > 0)
  3379. {
  3380. alignofs = alignofs & -alignofs;
  3381. align = MIN (align, alignofs * BITS_PER_UNIT);
  3382. }
  3383. }
  3384. /* Handle a block of contiguous long-words. */
  3385. if (align >= 64 && bytes >= 8)
  3386. {
  3387. words = bytes / 8;
  3388. for (i = 0; i < words; ++i)
  3389. emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8),
  3390. const0_rtx);
  3391. bytes -= words * 8;
  3392. ofs += words * 8;
  3393. }
  3394. /* If the block is large and appropriately aligned, emit a single
  3395. store followed by a sequence of stq_u insns. */
  3396. if (align >= 32 && bytes > 16)
  3397. {
  3398. rtx orig_dsta;
  3399. emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx);
  3400. bytes -= 4;
  3401. ofs += 4;
  3402. orig_dsta = XEXP (orig_dst, 0);
  3403. if (GET_CODE (orig_dsta) == LO_SUM)
  3404. orig_dsta = force_reg (Pmode, orig_dsta);
  3405. words = bytes / 8;
  3406. for (i = 0; i < words; ++i)
  3407. {
  3408. rtx mem
  3409. = change_address (orig_dst, DImode,
  3410. gen_rtx_AND (DImode,
  3411. plus_constant (DImode, orig_dsta,
  3412. ofs + i*8),
  3413. GEN_INT (-8)));
  3414. set_mem_alias_set (mem, 0);
  3415. emit_move_insn (mem, const0_rtx);
  3416. }
  3417. /* Depending on the alignment, the first stq_u may have overlapped
  3418. with the initial stl, which means that the last stq_u didn't
  3419. write as much as it would appear. Leave those questionable bytes
  3420. unaccounted for. */
  3421. bytes -= words * 8 - 4;
  3422. ofs += words * 8 - 4;
  3423. }
  3424. /* Handle a smaller block of aligned words. */
  3425. if ((align >= 64 && bytes == 4)
  3426. || (align == 32 && bytes >= 4))
  3427. {
  3428. words = bytes / 4;
  3429. for (i = 0; i < words; ++i)
  3430. emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4),
  3431. const0_rtx);
  3432. bytes -= words * 4;
  3433. ofs += words * 4;
  3434. }
  3435. /* An unaligned block uses stq_u stores for as many as possible. */
  3436. if (bytes >= 8)
  3437. {
  3438. words = bytes / 8;
  3439. alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
  3440. bytes -= words * 8;
  3441. ofs += words * 8;
  3442. }
  3443. /* Next clean up any trailing pieces. */
  3444. #if HOST_BITS_PER_WIDE_INT >= 64
  3445. /* Count the number of bits in BYTES for which aligned stores could
  3446. be emitted. */
  3447. words = 0;
  3448. for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align ; i <<= 1)
  3449. if (bytes & i)
  3450. words += 1;
  3451. /* If we have appropriate alignment (and it wouldn't take too many
  3452. instructions otherwise), mask out the bytes we need. */
  3453. if (TARGET_BWX ? words > 2 : bytes > 0)
  3454. {
  3455. if (align >= 64)
  3456. {
  3457. rtx mem, tmp;
  3458. HOST_WIDE_INT mask;
  3459. mem = adjust_address (orig_dst, DImode, ofs);
  3460. set_mem_alias_set (mem, 0);
  3461. mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
  3462. tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask),
  3463. NULL_RTX, 1, OPTAB_WIDEN);
  3464. emit_move_insn (mem, tmp);
  3465. return 1;
  3466. }
  3467. else if (align >= 32 && bytes < 4)
  3468. {
  3469. rtx mem, tmp;
  3470. HOST_WIDE_INT mask;
  3471. mem = adjust_address (orig_dst, SImode, ofs);
  3472. set_mem_alias_set (mem, 0);
  3473. mask = ~(HOST_WIDE_INT)0 << (bytes * 8);
  3474. tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask),
  3475. NULL_RTX, 1, OPTAB_WIDEN);
  3476. emit_move_insn (mem, tmp);
  3477. return 1;
  3478. }
  3479. }
  3480. #endif
  3481. if (!TARGET_BWX && bytes >= 4)
  3482. {
  3483. alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
  3484. bytes -= 4;
  3485. ofs += 4;
  3486. }
  3487. if (bytes >= 2)
  3488. {
  3489. if (align >= 16)
  3490. {
  3491. do {
  3492. emit_move_insn (adjust_address (orig_dst, HImode, ofs),
  3493. const0_rtx);
  3494. bytes -= 2;
  3495. ofs += 2;
  3496. } while (bytes >= 2);
  3497. }
  3498. else if (! TARGET_BWX)
  3499. {
  3500. alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
  3501. bytes -= 2;
  3502. ofs += 2;
  3503. }
  3504. }
  3505. while (bytes > 0)
  3506. {
  3507. emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx);
  3508. bytes -= 1;
  3509. ofs += 1;
  3510. }
  3511. return 1;
  3512. }
  3513. /* Returns a mask so that zap(x, value) == x & mask. */
  3514. rtx
  3515. alpha_expand_zap_mask (HOST_WIDE_INT value)
  3516. {
  3517. rtx result;
  3518. int i;
  3519. if (HOST_BITS_PER_WIDE_INT >= 64)
  3520. {
  3521. HOST_WIDE_INT mask = 0;
  3522. for (i = 7; i >= 0; --i)
  3523. {
  3524. mask <<= 8;
  3525. if (!((value >> i) & 1))
  3526. mask |= 0xff;
  3527. }
  3528. result = gen_int_mode (mask, DImode);
  3529. }
  3530. else
  3531. {
  3532. HOST_WIDE_INT mask_lo = 0, mask_hi = 0;
  3533. gcc_assert (HOST_BITS_PER_WIDE_INT == 32);
  3534. for (i = 7; i >= 4; --i)
  3535. {
  3536. mask_hi <<= 8;
  3537. if (!((value >> i) & 1))
  3538. mask_hi |= 0xff;
  3539. }
  3540. for (i = 3; i >= 0; --i)
  3541. {
  3542. mask_lo <<= 8;
  3543. if (!((value >> i) & 1))
  3544. mask_lo |= 0xff;
  3545. }
  3546. result = immed_double_const (mask_lo, mask_hi, DImode);
  3547. }
  3548. return result;
  3549. }
  3550. void
  3551. alpha_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx),
  3552. enum machine_mode mode,
  3553. rtx op0, rtx op1, rtx op2)
  3554. {
  3555. op0 = gen_lowpart (mode, op0);
  3556. if (op1 == const0_rtx)
  3557. op1 = CONST0_RTX (mode);
  3558. else
  3559. op1 = gen_lowpart (mode, op1);
  3560. if (op2 == const0_rtx)
  3561. op2 = CONST0_RTX (mode);
  3562. else
  3563. op2 = gen_lowpart (mode, op2);
  3564. emit_insn ((*gen) (op0, op1, op2));
  3565. }
  3566. /* A subroutine of the atomic operation splitters. Jump to LABEL if
  3567. COND is true. Mark the jump as unlikely to be taken. */
  3568. static void
  3569. emit_unlikely_jump (rtx cond, rtx label)
  3570. {
  3571. rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
  3572. rtx x;
  3573. x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
  3574. x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
  3575. add_reg_note (x, REG_BR_PROB, very_unlikely);
  3576. }
  3577. /* A subroutine of the atomic operation splitters. Emit a load-locked
  3578. instruction in MODE. */
  3579. static void
  3580. emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
  3581. {
  3582. rtx (*fn) (rtx, rtx) = NULL;
  3583. if (mode == SImode)
  3584. fn = gen_load_locked_si;
  3585. else if (mode == DImode)
  3586. fn = gen_load_locked_di;
  3587. emit_insn (fn (reg, mem));
  3588. }
  3589. /* A subroutine of the atomic operation splitters. Emit a store-conditional
  3590. instruction in MODE. */
  3591. static void
  3592. emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
  3593. {
  3594. rtx (*fn) (rtx, rtx, rtx) = NULL;
  3595. if (mode == SImode)
  3596. fn = gen_store_conditional_si;
  3597. else if (mode == DImode)
  3598. fn = gen_store_conditional_di;
  3599. emit_insn (fn (res, mem, val));
  3600. }
  3601. /* Subroutines of the atomic operation splitters. Emit barriers
  3602. as needed for the memory MODEL. */
  3603. static void
  3604. alpha_pre_atomic_barrier (enum memmodel model)
  3605. {
  3606. if (need_atomic_barrier_p (model, true))
  3607. emit_insn (gen_memory_barrier ());
  3608. }
  3609. static void
  3610. alpha_post_atomic_barrier (enum memmodel model)
  3611. {
  3612. if (need_atomic_barrier_p (model, false))
  3613. emit_insn (gen_memory_barrier ());
  3614. }
  3615. /* A subroutine of the atomic operation splitters. Emit an insxl
  3616. instruction in MODE. */
  3617. static rtx
  3618. emit_insxl (enum machine_mode mode, rtx op1, rtx op2)
  3619. {
  3620. rtx ret = gen_reg_rtx (DImode);
  3621. rtx (*fn) (rtx, rtx, rtx);
  3622. switch (mode)
  3623. {
  3624. case QImode:
  3625. fn = gen_insbl;
  3626. break;
  3627. case HImode:
  3628. fn = gen_inswl;
  3629. break;
  3630. case SImode:
  3631. fn = gen_insll;
  3632. break;
  3633. case DImode:
  3634. fn = gen_insql;
  3635. break;
  3636. default:
  3637. gcc_unreachable ();
  3638. }
  3639. op1 = force_reg (mode, op1);
  3640. emit_insn (fn (ret, op1, op2));
  3641. return ret;
  3642. }
  3643. /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
  3644. to perform. MEM is the memory on which to operate. VAL is the second
  3645. operand of the binary operator. BEFORE and AFTER are optional locations to
  3646. return the value of MEM either before of after the operation. SCRATCH is
  3647. a scratch register. */
  3648. void
  3649. alpha_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before,
  3650. rtx after, rtx scratch, enum memmodel model)
  3651. {
  3652. enum machine_mode mode = GET_MODE (mem);
  3653. rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch));
  3654. alpha_pre_atomic_barrier (model);
  3655. label = gen_label_rtx ();
  3656. emit_label (label);
  3657. label = gen_rtx_LABEL_REF (DImode, label);
  3658. if (before == NULL)
  3659. before = scratch;
  3660. emit_load_locked (mode, before, mem);
  3661. if (code == NOT)
  3662. {
  3663. x = gen_rtx_AND (mode, before, val);
  3664. emit_insn (gen_rtx_SET (VOIDmode, val, x));
  3665. x = gen_rtx_NOT (mode, val);
  3666. }
  3667. else
  3668. x = gen_rtx_fmt_ee (code, mode, before, val);
  3669. if (after)
  3670. emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
  3671. emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
  3672. emit_store_conditional (mode, cond, mem, scratch);
  3673. x = gen_rtx_EQ (DImode, cond, const0_rtx);
  3674. emit_unlikely_jump (x, label);
  3675. alpha_post_atomic_barrier (model);
  3676. }
  3677. /* Expand a compare and swap operation. */
  3678. void
  3679. alpha_split_compare_and_swap (rtx operands[])
  3680. {
  3681. rtx cond, retval, mem, oldval, newval;
  3682. bool is_weak;
  3683. enum memmodel mod_s, mod_f;
  3684. enum machine_mode mode;
  3685. rtx label1, label2, x;
  3686. cond = operands[0];
  3687. retval = operands[1];
  3688. mem = operands[2];
  3689. oldval = operands[3];
  3690. newval = operands[4];
  3691. is_weak = (operands[5] != const0_rtx);
  3692. mod_s = (enum memmodel) INTVAL (operands[6]);
  3693. mod_f = (enum memmodel) INTVAL (operands[7]);
  3694. mode = GET_MODE (mem);
  3695. alpha_pre_atomic_barrier (mod_s);
  3696. label1 = NULL_RTX;
  3697. if (!is_weak)
  3698. {
  3699. label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
  3700. emit_label (XEXP (label1, 0));
  3701. }
  3702. label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
  3703. emit_load_locked (mode, retval, mem);
  3704. x = gen_lowpart (DImode, retval);
  3705. if (oldval == const0_rtx)
  3706. {
  3707. emit_move_insn (cond, const0_rtx);
  3708. x = gen_rtx_NE (DImode, x, const0_rtx);
  3709. }
  3710. else
  3711. {
  3712. x = gen_rtx_EQ (DImode, x, oldval);
  3713. emit_insn (gen_rtx_SET (VOIDmode, cond, x));
  3714. x = gen_rtx_EQ (DImode, cond, const0_rtx);
  3715. }
  3716. emit_unlikely_jump (x, label2);
  3717. emit_move_insn (cond, newval);
  3718. emit_store_conditional (mode, cond, mem, gen_lowpart (mode, cond));
  3719. if (!is_weak)
  3720. {
  3721. x = gen_rtx_EQ (DImode, cond, const0_rtx);
  3722. emit_unlikely_jump (x, label1);
  3723. }
  3724. if (mod_f != MEMMODEL_RELAXED)
  3725. emit_label (XEXP (label2, 0));
  3726. alpha_post_atomic_barrier (mod_s);
  3727. if (mod_f == MEMMODEL_RELAXED)
  3728. emit_label (XEXP (label2, 0));
  3729. }
  3730. void
  3731. alpha_expand_compare_and_swap_12 (rtx operands[])
  3732. {
  3733. rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f;
  3734. enum machine_mode mode;
  3735. rtx addr, align, wdst;
  3736. rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
  3737. cond = operands[0];
  3738. dst = operands[1];
  3739. mem = operands[2];
  3740. oldval = operands[3];
  3741. newval = operands[4];
  3742. is_weak = operands[5];
  3743. mod_s = operands[6];
  3744. mod_f = operands[7];
  3745. mode = GET_MODE (mem);
  3746. /* We forced the address into a register via mem_noofs_operand. */
  3747. addr = XEXP (mem, 0);
  3748. gcc_assert (register_operand (addr, DImode));
  3749. align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
  3750. NULL_RTX, 1, OPTAB_DIRECT);
  3751. oldval = convert_modes (DImode, mode, oldval, 1);
  3752. if (newval != const0_rtx)
  3753. newval = emit_insxl (mode, newval, addr);
  3754. wdst = gen_reg_rtx (DImode);
  3755. if (mode == QImode)
  3756. gen = gen_atomic_compare_and_swapqi_1;
  3757. else
  3758. gen = gen_atomic_compare_and_swaphi_1;
  3759. emit_insn (gen (cond, wdst, mem, oldval, newval, align,
  3760. is_weak, mod_s, mod_f));
  3761. emit_move_insn (dst, gen_lowpart (mode, wdst));
  3762. }
  3763. void
  3764. alpha_split_compare_and_swap_12 (rtx operands[])
  3765. {
  3766. rtx cond, dest, orig_mem, oldval, newval, align, scratch;
  3767. enum machine_mode mode;
  3768. bool is_weak;
  3769. enum memmodel mod_s, mod_f;
  3770. rtx label1, label2, mem, addr, width, mask, x;
  3771. cond = operands[0];
  3772. dest = operands[1];
  3773. orig_mem = operands[2];
  3774. oldval = operands[3];
  3775. newval = operands[4];
  3776. align = operands[5];
  3777. is_weak = (operands[6] != const0_rtx);
  3778. mod_s = (enum memmodel) INTVAL (operands[7]);
  3779. mod_f = (enum memmodel) INTVAL (operands[8]);
  3780. scratch = operands[9];
  3781. mode = GET_MODE (orig_mem);
  3782. addr = XEXP (orig_mem, 0);
  3783. mem = gen_rtx_MEM (DImode, align);
  3784. MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
  3785. if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
  3786. set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
  3787. alpha_pre_atomic_barrier (mod_s);
  3788. label1 = NULL_RTX;
  3789. if (!is_weak)
  3790. {
  3791. label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
  3792. emit_label (XEXP (label1, 0));
  3793. }
  3794. label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
  3795. emit_load_locked (DImode, scratch, mem);
  3796. width = GEN_INT (GET_MODE_BITSIZE (mode));
  3797. mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
  3798. emit_insn (gen_extxl (dest, scratch, width, addr));
  3799. if (oldval == const0_rtx)
  3800. {
  3801. emit_move_insn (cond, const0_rtx);
  3802. x = gen_rtx_NE (DImode, dest, const0_rtx);
  3803. }
  3804. else
  3805. {
  3806. x = gen_rtx_EQ (DImode, dest, oldval);
  3807. emit_insn (gen_rtx_SET (VOIDmode, cond, x));
  3808. x = gen_rtx_EQ (DImode, cond, const0_rtx);
  3809. }
  3810. emit_unlikely_jump (x, label2);
  3811. emit_insn (gen_mskxl (cond, scratch, mask, addr));
  3812. if (newval != const0_rtx)
  3813. emit_insn (gen_iordi3 (cond, cond, newval));
  3814. emit_store_conditional (DImode, cond, mem, cond);
  3815. if (!is_weak)
  3816. {
  3817. x = gen_rtx_EQ (DImode, cond, const0_rtx);
  3818. emit_unlikely_jump (x, label1);
  3819. }
  3820. if (mod_f != MEMMODEL_RELAXED)
  3821. emit_label (XEXP (label2, 0));
  3822. alpha_post_atomic_barrier (mod_s);
  3823. if (mod_f == MEMMODEL_RELAXED)
  3824. emit_label (XEXP (label2, 0));
  3825. }
  3826. /* Expand an atomic exchange operation. */
  3827. void
  3828. alpha_split_atomic_exchange (rtx operands[])
  3829. {
  3830. rtx retval, mem, val, scratch;
  3831. enum memmodel model;
  3832. enum machine_mode mode;
  3833. rtx label, x, cond;
  3834. retval = operands[0];
  3835. mem = operands[1];
  3836. val = operands[2];
  3837. model = (enum memmodel) INTVAL (operands[3]);
  3838. scratch = operands[4];
  3839. mode = GET_MODE (mem);
  3840. cond = gen_lowpart (DImode, scratch);
  3841. alpha_pre_atomic_barrier (model);
  3842. label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
  3843. emit_label (XEXP (label, 0));
  3844. emit_load_locked (mode, retval, mem);
  3845. emit_move_insn (scratch, val);
  3846. emit_store_conditional (mode, cond, mem, scratch);
  3847. x = gen_rtx_EQ (DImode, cond, const0_rtx);
  3848. emit_unlikely_jump (x, label);
  3849. alpha_post_atomic_barrier (model);
  3850. }
  3851. void
  3852. alpha_expand_atomic_exchange_12 (rtx operands[])
  3853. {
  3854. rtx dst, mem, val, model;
  3855. enum machine_mode mode;
  3856. rtx addr, align, wdst;
  3857. rtx (*gen) (rtx, rtx, rtx, rtx, rtx);
  3858. dst = operands[0];
  3859. mem = operands[1];
  3860. val = operands[2];
  3861. model = operands[3];
  3862. mode = GET_MODE (mem);
  3863. /* We forced the address into a register via mem_noofs_operand. */
  3864. addr = XEXP (mem, 0);
  3865. gcc_assert (register_operand (addr, DImode));
  3866. align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8),
  3867. NULL_RTX, 1, OPTAB_DIRECT);
  3868. /* Insert val into the correct byte location within the word. */
  3869. if (val != const0_rtx)
  3870. val = emit_insxl (mode, val, addr);
  3871. wdst = gen_reg_rtx (DImode);
  3872. if (mode == QImode)
  3873. gen = gen_atomic_exchangeqi_1;
  3874. else
  3875. gen = gen_atomic_exchangehi_1;
  3876. emit_insn (gen (wdst, mem, val, align, model));
  3877. emit_move_insn (dst, gen_lowpart (mode, wdst));
  3878. }
  3879. void
  3880. alpha_split_atomic_exchange_12 (rtx operands[])
  3881. {
  3882. rtx dest, orig_mem, addr, val, align, scratch;
  3883. rtx label, mem, width, mask, x;
  3884. enum machine_mode mode;
  3885. enum memmodel model;
  3886. dest = operands[0];
  3887. orig_mem = operands[1];
  3888. val = operands[2];
  3889. align = operands[3];
  3890. model = (enum memmodel) INTVAL (operands[4]);
  3891. scratch = operands[5];
  3892. mode = GET_MODE (orig_mem);
  3893. addr = XEXP (orig_mem, 0);
  3894. mem = gen_rtx_MEM (DImode, align);
  3895. MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
  3896. if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
  3897. set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
  3898. alpha_pre_atomic_barrier (model);
  3899. label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ());
  3900. emit_label (XEXP (label, 0));
  3901. emit_load_locked (DImode, scratch, mem);
  3902. width = GEN_INT (GET_MODE_BITSIZE (mode));
  3903. mask = GEN_INT (mode == QImode ? 0xff : 0xffff);
  3904. emit_insn (gen_extxl (dest, scratch, width, addr));
  3905. emit_insn (gen_mskxl (scratch, scratch, mask, addr));
  3906. if (val != const0_rtx)
  3907. emit_insn (gen_iordi3 (scratch, scratch, val));
  3908. emit_store_conditional (DImode, scratch, mem, scratch);
  3909. x = gen_rtx_EQ (DImode, scratch, const0_rtx);
  3910. emit_unlikely_jump (x, label);
  3911. alpha_post_atomic_barrier (model);
  3912. }
  3913. /* Adjust the cost of a scheduling dependency. Return the new cost of
  3914. a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
  3915. static int
  3916. alpha_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
  3917. {
  3918. enum attr_type dep_insn_type;
  3919. /* If the dependence is an anti-dependence, there is no cost. For an
  3920. output dependence, there is sometimes a cost, but it doesn't seem
  3921. worth handling those few cases. */
  3922. if (REG_NOTE_KIND (link) != 0)
  3923. return cost;
  3924. /* If we can't recognize the insns, we can't really do anything. */
  3925. if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
  3926. return cost;
  3927. dep_insn_type = get_attr_type (dep_insn);
  3928. /* Bring in the user-defined memory latency. */
  3929. if (dep_insn_type == TYPE_ILD
  3930. || dep_insn_type == TYPE_FLD
  3931. || dep_insn_type == TYPE_LDSYM)
  3932. cost += alpha_memory_latency-1;
  3933. /* Everything else handled in DFA bypasses now. */
  3934. return cost;
  3935. }
  3936. /* The number of instructions that can be issued per cycle. */
  3937. static int
  3938. alpha_issue_rate (void)
  3939. {
  3940. return (alpha_tune == PROCESSOR_EV4 ? 2 : 4);
  3941. }
  3942. /* How many alternative schedules to try. This should be as wide as the
  3943. scheduling freedom in the DFA, but no wider. Making this value too
  3944. large results extra work for the scheduler.
  3945. For EV4, loads can be issued to either IB0 or IB1, thus we have 2
  3946. alternative schedules. For EV5, we can choose between E0/E1 and
  3947. FA/FM. For EV6, an arithmetic insn can be issued to U0/U1/L0/L1. */
  3948. static int
  3949. alpha_multipass_dfa_lookahead (void)
  3950. {
  3951. return (alpha_tune == PROCESSOR_EV6 ? 4 : 2);
  3952. }
  3953. /* Machine-specific function data. */
  3954. struct GTY(()) alpha_links;
  3955. struct GTY(()) machine_function
  3956. {
  3957. /* For OSF. */
  3958. const char *some_ld_name;
  3959. /* For flag_reorder_blocks_and_partition. */
  3960. rtx gp_save_rtx;
  3961. /* For VMS condition handlers. */
  3962. bool uses_condition_handler;
  3963. /* Linkage entries. */
  3964. splay_tree GTY ((param1_is (char *), param2_is (struct alpha_links *)))
  3965. links;
  3966. };
  3967. /* How to allocate a 'struct machine_function'. */
  3968. static struct machine_function *
  3969. alpha_init_machine_status (void)
  3970. {
  3971. return ggc_alloc_cleared_machine_function ();
  3972. }
  3973. /* Support for frame based VMS condition handlers. */
  3974. /* A VMS condition handler may be established for a function with a call to
  3975. __builtin_establish_vms_condition_handler, and cancelled with a call to
  3976. __builtin_revert_vms_condition_handler.
  3977. The VMS Condition Handling Facility knows about the existence of a handler
  3978. from the procedure descriptor .handler field. As the VMS native compilers,
  3979. we store the user specified handler's address at a fixed location in the
  3980. stack frame and point the procedure descriptor at a common wrapper which
  3981. fetches the real handler's address and issues an indirect call.
  3982. The indirection wrapper is "__gcc_shell_handler", provided by libgcc.
  3983. We force the procedure kind to PT_STACK, and the fixed frame location is
  3984. fp+8, just before the register save area. We use the handler_data field in
  3985. the procedure descriptor to state the fp offset at which the installed
  3986. handler address can be found. */
  3987. #define VMS_COND_HANDLER_FP_OFFSET 8
  3988. /* Expand code to store the currently installed user VMS condition handler
  3989. into TARGET and install HANDLER as the new condition handler. */
  3990. void
  3991. alpha_expand_builtin_establish_vms_condition_handler (rtx target, rtx handler)
  3992. {
  3993. rtx handler_slot_address = plus_constant (Pmode, hard_frame_pointer_rtx,
  3994. VMS_COND_HANDLER_FP_OFFSET);
  3995. rtx handler_slot
  3996. = gen_rtx_MEM (DImode, handler_slot_address);
  3997. emit_move_insn (target, handler_slot);
  3998. emit_move_insn (handler_slot, handler);
  3999. /* Notify the start/prologue/epilogue emitters that the condition handler
  4000. slot is needed. In addition to reserving the slot space, this will force
  4001. the procedure kind to PT_STACK so ensure that the hard_frame_pointer_rtx
  4002. use above is correct. */
  4003. cfun->machine->uses_condition_handler = true;
  4004. }
  4005. /* Expand code to store the current VMS condition handler into TARGET and
  4006. nullify it. */
  4007. void
  4008. alpha_expand_builtin_revert_vms_condition_handler (rtx target)
  4009. {
  4010. /* We implement this by establishing a null condition handler, with the tiny
  4011. side effect of setting uses_condition_handler. This is a little bit
  4012. pessimistic if no actual builtin_establish call is ever issued, which is
  4013. not a real problem and expected never to happen anyway. */
  4014. alpha_expand_builtin_establish_vms_condition_handler (target, const0_rtx);
  4015. }
  4016. /* Functions to save and restore alpha_return_addr_rtx. */
  4017. /* Start the ball rolling with RETURN_ADDR_RTX. */
  4018. rtx
  4019. alpha_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
  4020. {
  4021. if (count != 0)
  4022. return const0_rtx;
  4023. return get_hard_reg_initial_val (Pmode, REG_RA);
  4024. }
  4025. /* Return or create a memory slot containing the gp value for the current
  4026. function. Needed only if TARGET_LD_BUGGY_LDGP. */
  4027. rtx
  4028. alpha_gp_save_rtx (void)
  4029. {
  4030. rtx seq, m = cfun->machine->gp_save_rtx;
  4031. if (m == NULL)
  4032. {
  4033. start_sequence ();
  4034. m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD);
  4035. m = validize_mem (m);
  4036. emit_move_insn (m, pic_offset_table_rtx);
  4037. seq = get_insns ();
  4038. end_sequence ();
  4039. /* We used to simply emit the sequence after entry_of_function.
  4040. However this breaks the CFG if the first instruction in the
  4041. first block is not the NOTE_INSN_BASIC_BLOCK, for example a
  4042. label. Emit the sequence properly on the edge. We are only
  4043. invoked from dw2_build_landing_pads and finish_eh_generation
  4044. will call commit_edge_insertions thanks to a kludge. */
  4045. insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
  4046. cfun->machine->gp_save_rtx = m;
  4047. }
  4048. return m;
  4049. }
  4050. static void
  4051. alpha_instantiate_decls (void)
  4052. {
  4053. if (cfun->machine->gp_save_rtx != NULL_RTX)
  4054. instantiate_decl_rtl (cfun->machine->gp_save_rtx);
  4055. }
  4056. static int
  4057. alpha_ra_ever_killed (void)
  4058. {
  4059. rtx top;
  4060. if (!has_hard_reg_initial_val (Pmode, REG_RA))
  4061. return (int)df_regs_ever_live_p (REG_RA);
  4062. push_topmost_sequence ();
  4063. top = get_insns ();
  4064. pop_topmost_sequence ();
  4065. return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
  4066. }
  4067. /* Return the trap mode suffix applicable to the current
  4068. instruction, or NULL. */
  4069. static const char *
  4070. get_trap_mode_suffix (void)
  4071. {
  4072. enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn);
  4073. switch (s)
  4074. {
  4075. case TRAP_SUFFIX_NONE:
  4076. return NULL;
  4077. case TRAP_SUFFIX_SU:
  4078. if (alpha_fptm >= ALPHA_FPTM_SU)
  4079. return "su";
  4080. return NULL;
  4081. case TRAP_SUFFIX_SUI:
  4082. if (alpha_fptm >= ALPHA_FPTM_SUI)
  4083. return "sui";
  4084. return NULL;
  4085. case TRAP_SUFFIX_V_SV:
  4086. switch (alpha_fptm)
  4087. {
  4088. case ALPHA_FPTM_N:
  4089. return NULL;
  4090. case ALPHA_FPTM_U:
  4091. return "v";
  4092. case ALPHA_FPTM_SU:
  4093. case ALPHA_FPTM_SUI:
  4094. return "sv";
  4095. default:
  4096. gcc_unreachable ();
  4097. }
  4098. case TRAP_SUFFIX_V_SV_SVI:
  4099. switch (alpha_fptm)
  4100. {
  4101. case ALPHA_FPTM_N:
  4102. return NULL;
  4103. case ALPHA_FPTM_U:
  4104. return "v";
  4105. case ALPHA_FPTM_SU:
  4106. return "sv";
  4107. case ALPHA_FPTM_SUI:
  4108. return "svi";
  4109. default:
  4110. gcc_unreachable ();
  4111. }
  4112. break;
  4113. case TRAP_SUFFIX_U_SU_SUI:
  4114. switch (alpha_fptm)
  4115. {
  4116. case ALPHA_FPTM_N:
  4117. return NULL;
  4118. case ALPHA_FPTM_U:
  4119. return "u";
  4120. case ALPHA_FPTM_SU:
  4121. return "su";
  4122. case ALPHA_FPTM_SUI:
  4123. return "sui";
  4124. default:
  4125. gcc_unreachable ();
  4126. }
  4127. break;
  4128. default:
  4129. gcc_unreachable ();
  4130. }
  4131. gcc_unreachable ();
  4132. }
  4133. /* Return the rounding mode suffix applicable to the current
  4134. instruction, or NULL. */
  4135. static const char *
  4136. get_round_mode_suffix (void)
  4137. {
  4138. enum attr_round_suffix s = get_attr_round_suffix (current_output_insn);
  4139. switch (s)
  4140. {
  4141. case ROUND_SUFFIX_NONE:
  4142. return NULL;
  4143. case ROUND_SUFFIX_NORMAL:
  4144. switch (alpha_fprm)
  4145. {
  4146. case ALPHA_FPRM_NORM:
  4147. return NULL;
  4148. case ALPHA_FPRM_MINF:
  4149. return "m";
  4150. case ALPHA_FPRM_CHOP:
  4151. return "c";
  4152. case ALPHA_FPRM_DYN:
  4153. return "d";
  4154. default:
  4155. gcc_unreachable ();
  4156. }
  4157. break;
  4158. case ROUND_SUFFIX_C:
  4159. return "c";
  4160. default:
  4161. gcc_unreachable ();
  4162. }
  4163. gcc_unreachable ();
  4164. }
  4165. /* Locate some local-dynamic symbol still in use by this function
  4166. so that we can print its name in some movdi_er_tlsldm pattern. */
  4167. static int
  4168. get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
  4169. {
  4170. rtx x = *px;
  4171. if (GET_CODE (x) == SYMBOL_REF
  4172. && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
  4173. {
  4174. cfun->machine->some_ld_name = XSTR (x, 0);
  4175. return 1;
  4176. }
  4177. return 0;
  4178. }
  4179. static const char *
  4180. get_some_local_dynamic_name (void)
  4181. {
  4182. rtx insn;
  4183. if (cfun->machine->some_ld_name)
  4184. return cfun->machine->some_ld_name;
  4185. for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
  4186. if (INSN_P (insn)
  4187. && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
  4188. return cfun->machine->some_ld_name;
  4189. gcc_unreachable ();
  4190. }
  4191. /* Print an operand. Recognize special options, documented below. */
  4192. void
  4193. print_operand (FILE *file, rtx x, int code)
  4194. {
  4195. int i;
  4196. switch (code)
  4197. {
  4198. case '~':
  4199. /* Print the assembler name of the current function. */
  4200. assemble_name (file, alpha_fnname);
  4201. break;
  4202. case '&':
  4203. assemble_name (file, get_some_local_dynamic_name ());
  4204. break;
  4205. case '/':
  4206. {
  4207. const char *trap = get_trap_mode_suffix ();
  4208. const char *round = get_round_mode_suffix ();
  4209. if (trap || round)
  4210. fprintf (file, "/%s%s", (trap ? trap : ""), (round ? round : ""));
  4211. break;
  4212. }
  4213. case ',':
  4214. /* Generates single precision instruction suffix. */
  4215. fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file);
  4216. break;
  4217. case '-':
  4218. /* Generates double precision instruction suffix. */
  4219. fputc ((TARGET_FLOAT_VAX ? 'g' : 't'), file);
  4220. break;
  4221. case '#':
  4222. if (alpha_this_literal_sequence_number == 0)
  4223. alpha_this_literal_sequence_number = alpha_next_sequence_number++;
  4224. fprintf (file, "%d", alpha_this_literal_sequence_number);
  4225. break;
  4226. case '*':
  4227. if (alpha_this_gpdisp_sequence_number == 0)
  4228. alpha_this_gpdisp_sequence_number = alpha_next_sequence_number++;
  4229. fprintf (file, "%d", alpha_this_gpdisp_sequence_number);
  4230. break;
  4231. case 'H':
  4232. if (GET_CODE (x) == HIGH)
  4233. output_addr_const (file, XEXP (x, 0));
  4234. else
  4235. output_operand_lossage ("invalid %%H value");
  4236. break;
  4237. case 'J':
  4238. {
  4239. const char *lituse;
  4240. if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL)
  4241. {
  4242. x = XVECEXP (x, 0, 0);
  4243. lituse = "lituse_tlsgd";
  4244. }
  4245. else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL)
  4246. {
  4247. x = XVECEXP (x, 0, 0);
  4248. lituse = "lituse_tlsldm";
  4249. }
  4250. else if (CONST_INT_P (x))
  4251. lituse = "lituse_jsr";
  4252. else
  4253. {
  4254. output_operand_lossage ("invalid %%J value");
  4255. break;
  4256. }
  4257. if (x != const0_rtx)
  4258. fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
  4259. }
  4260. break;
  4261. case 'j':
  4262. {
  4263. const char *lituse;
  4264. #ifdef HAVE_AS_JSRDIRECT_RELOCS
  4265. lituse = "lituse_jsrdirect";
  4266. #else
  4267. lituse = "lituse_jsr";
  4268. #endif
  4269. gcc_assert (INTVAL (x) != 0);
  4270. fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x));
  4271. }
  4272. break;
  4273. case 'r':
  4274. /* If this operand is the constant zero, write it as "$31". */
  4275. if (REG_P (x))
  4276. fprintf (file, "%s", reg_names[REGNO (x)]);
  4277. else if (x == CONST0_RTX (GET_MODE (x)))
  4278. fprintf (file, "$31");
  4279. else
  4280. output_operand_lossage ("invalid %%r value");
  4281. break;
  4282. case 'R':
  4283. /* Similar, but for floating-point. */
  4284. if (REG_P (x))
  4285. fprintf (file, "%s", reg_names[REGNO (x)]);
  4286. else if (x == CONST0_RTX (GET_MODE (x)))
  4287. fprintf (file, "$f31");
  4288. else
  4289. output_operand_lossage ("invalid %%R value");
  4290. break;
  4291. case 'N':
  4292. /* Write the 1's complement of a constant. */
  4293. if (!CONST_INT_P (x))
  4294. output_operand_lossage ("invalid %%N value");
  4295. fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
  4296. break;
  4297. case 'P':
  4298. /* Write 1 << C, for a constant C. */
  4299. if (!CONST_INT_P (x))
  4300. output_operand_lossage ("invalid %%P value");
  4301. fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
  4302. break;
  4303. case 'h':
  4304. /* Write the high-order 16 bits of a constant, sign-extended. */
  4305. if (!CONST_INT_P (x))
  4306. output_operand_lossage ("invalid %%h value");
  4307. fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
  4308. break;
  4309. case 'L':
  4310. /* Write the low-order 16 bits of a constant, sign-extended. */
  4311. if (!CONST_INT_P (x))
  4312. output_operand_lossage ("invalid %%L value");
  4313. fprintf (file, HOST_WIDE_INT_PRINT_DEC,
  4314. (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
  4315. break;
  4316. case 'm':
  4317. /* Write mask for ZAP insn. */
  4318. if (GET_CODE (x) == CONST_DOUBLE)
  4319. {
  4320. HOST_WIDE_INT mask = 0;
  4321. HOST_WIDE_INT value;
  4322. value = CONST_DOUBLE_LOW (x);
  4323. for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
  4324. i++, value >>= 8)
  4325. if (value & 0xff)
  4326. mask |= (1 << i);
  4327. value = CONST_DOUBLE_HIGH (x);
  4328. for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
  4329. i++, value >>= 8)
  4330. if (value & 0xff)
  4331. mask |= (1 << (i + sizeof (int)));
  4332. fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
  4333. }
  4334. else if (CONST_INT_P (x))
  4335. {
  4336. HOST_WIDE_INT mask = 0, value = INTVAL (x);
  4337. for (i = 0; i < 8; i++, value >>= 8)
  4338. if (value & 0xff)
  4339. mask |= (1 << i);
  4340. fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
  4341. }
  4342. else
  4343. output_operand_lossage ("invalid %%m value");
  4344. break;
  4345. case 'M':
  4346. /* 'b', 'w', 'l', or 'q' as the value of the constant. */
  4347. if (!CONST_INT_P (x)
  4348. || (INTVAL (x) != 8 && INTVAL (x) != 16
  4349. && INTVAL (x) != 32 && INTVAL (x) != 64))
  4350. output_operand_lossage ("invalid %%M value");
  4351. fprintf (file, "%s",
  4352. (INTVAL (x) == 8 ? "b"
  4353. : INTVAL (x) == 16 ? "w"
  4354. : INTVAL (x) == 32 ? "l"
  4355. : "q"));
  4356. break;
  4357. case 'U':
  4358. /* Similar, except do it from the mask. */
  4359. if (CONST_INT_P (x))
  4360. {
  4361. HOST_WIDE_INT value = INTVAL (x);
  4362. if (value == 0xff)
  4363. {
  4364. fputc ('b', file);
  4365. break;
  4366. }
  4367. if (value == 0xffff)
  4368. {
  4369. fputc ('w', file);
  4370. break;
  4371. }
  4372. if (value == 0xffffffff)
  4373. {
  4374. fputc ('l', file);
  4375. break;
  4376. }
  4377. if (value == -1)
  4378. {
  4379. fputc ('q', file);
  4380. break;
  4381. }
  4382. }
  4383. else if (HOST_BITS_PER_WIDE_INT == 32
  4384. && GET_CODE (x) == CONST_DOUBLE
  4385. && CONST_DOUBLE_LOW (x) == 0xffffffff
  4386. && CONST_DOUBLE_HIGH (x) == 0)
  4387. {
  4388. fputc ('l', file);
  4389. break;
  4390. }
  4391. output_operand_lossage ("invalid %%U value");
  4392. break;
  4393. case 's':
  4394. /* Write the constant value divided by 8. */
  4395. if (!CONST_INT_P (x)
  4396. || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
  4397. || (INTVAL (x) & 7) != 0)
  4398. output_operand_lossage ("invalid %%s value");
  4399. fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
  4400. break;
  4401. case 'S':
  4402. /* Same, except compute (64 - c) / 8 */
  4403. if (!CONST_INT_P (x)
  4404. && (unsigned HOST_WIDE_INT) INTVAL (x) >= 64
  4405. && (INTVAL (x) & 7) != 8)
  4406. output_operand_lossage ("invalid %%s value");
  4407. fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
  4408. break;
  4409. case 'C': case 'D': case 'c': case 'd':
  4410. /* Write out comparison name. */
  4411. {
  4412. enum rtx_code c = GET_CODE (x);
  4413. if (!COMPARISON_P (x))
  4414. output_operand_lossage ("invalid %%C value");
  4415. else if (code == 'D')
  4416. c = reverse_condition (c);
  4417. else if (code == 'c')
  4418. c = swap_condition (c);
  4419. else if (code == 'd')
  4420. c = swap_condition (reverse_condition (c));
  4421. if (c == LEU)
  4422. fprintf (file, "ule");
  4423. else if (c == LTU)
  4424. fprintf (file, "ult");
  4425. else if (c == UNORDERED)
  4426. fprintf (file, "un");
  4427. else
  4428. fprintf (file, "%s", GET_RTX_NAME (c));
  4429. }
  4430. break;
  4431. case 'E':
  4432. /* Write the divide or modulus operator. */
  4433. switch (GET_CODE (x))
  4434. {
  4435. case DIV:
  4436. fprintf (file, "div%s", GET_MODE (x) == SImode ? "l" : "q");
  4437. break;
  4438. case UDIV:
  4439. fprintf (file, "div%su", GET_MODE (x) == SImode ? "l" : "q");
  4440. break;
  4441. case MOD:
  4442. fprintf (file, "rem%s", GET_MODE (x) == SImode ? "l" : "q");
  4443. break;
  4444. case UMOD:
  4445. fprintf (file, "rem%su", GET_MODE (x) == SImode ? "l" : "q");
  4446. break;
  4447. default:
  4448. output_operand_lossage ("invalid %%E value");
  4449. break;
  4450. }
  4451. break;
  4452. case 'A':
  4453. /* Write "_u" for unaligned access. */
  4454. if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
  4455. fprintf (file, "_u");
  4456. break;
  4457. case 0:
  4458. if (REG_P (x))
  4459. fprintf (file, "%s", reg_names[REGNO (x)]);
  4460. else if (MEM_P (x))
  4461. output_address (XEXP (x, 0));
  4462. else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC)
  4463. {
  4464. switch (XINT (XEXP (x, 0), 1))
  4465. {
  4466. case UNSPEC_DTPREL:
  4467. case UNSPEC_TPREL:
  4468. output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0));
  4469. break;
  4470. default:
  4471. output_operand_lossage ("unknown relocation unspec");
  4472. break;
  4473. }
  4474. }
  4475. else
  4476. output_addr_const (file, x);
  4477. break;
  4478. default:
  4479. output_operand_lossage ("invalid %%xn code");
  4480. }
  4481. }
  4482. void
  4483. print_operand_address (FILE *file, rtx addr)
  4484. {
  4485. int basereg = 31;
  4486. HOST_WIDE_INT offset = 0;
  4487. if (GET_CODE (addr) == AND)
  4488. addr = XEXP (addr, 0);
  4489. if (GET_CODE (addr) == PLUS
  4490. && CONST_INT_P (XEXP (addr, 1)))
  4491. {
  4492. offset = INTVAL (XEXP (addr, 1));
  4493. addr = XEXP (addr, 0);
  4494. }
  4495. if (GET_CODE (addr) == LO_SUM)
  4496. {
  4497. const char *reloc16, *reloclo;
  4498. rtx op1 = XEXP (addr, 1);
  4499. if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC)
  4500. {
  4501. op1 = XEXP (op1, 0);
  4502. switch (XINT (op1, 1))
  4503. {
  4504. case UNSPEC_DTPREL:
  4505. reloc16 = NULL;
  4506. reloclo = (alpha_tls_size == 16 ? "dtprel" : "dtprello");
  4507. break;
  4508. case UNSPEC_TPREL:
  4509. reloc16 = NULL;
  4510. reloclo = (alpha_tls_size == 16 ? "tprel" : "tprello");
  4511. break;
  4512. default:
  4513. output_operand_lossage ("unknown relocation unspec");
  4514. return;
  4515. }
  4516. output_addr_const (file, XVECEXP (op1, 0, 0));
  4517. }
  4518. else
  4519. {
  4520. reloc16 = "gprel";
  4521. reloclo = "gprellow";
  4522. output_addr_const (file, op1);
  4523. }
  4524. if (offset)
  4525. fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
  4526. addr = XEXP (addr, 0);
  4527. switch (GET_CODE (addr))
  4528. {
  4529. case REG:
  4530. basereg = REGNO (addr);
  4531. break;
  4532. case SUBREG:
  4533. basereg = subreg_regno (addr);
  4534. break;
  4535. default:
  4536. gcc_unreachable ();
  4537. }
  4538. fprintf (file, "($%d)\t\t!%s", basereg,
  4539. (basereg == 29 ? reloc16 : reloclo));
  4540. return;
  4541. }
  4542. switch (GET_CODE (addr))
  4543. {
  4544. case REG:
  4545. basereg = REGNO (addr);
  4546. break;
  4547. case SUBREG:
  4548. basereg = subreg_regno (addr);
  4549. break;
  4550. case CONST_INT:
  4551. offset = INTVAL (addr);
  4552. break;
  4553. #if TARGET_ABI_OPEN_VMS
  4554. case SYMBOL_REF:
  4555. fprintf (file, "%s", XSTR (addr, 0));
  4556. return;
  4557. case CONST:
  4558. gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS
  4559. && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF);
  4560. fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC,
  4561. XSTR (XEXP (XEXP (addr, 0), 0), 0),
  4562. INTVAL (XEXP (XEXP (addr, 0), 1)));
  4563. return;
  4564. #endif
  4565. default:
  4566. gcc_unreachable ();
  4567. }
  4568. fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg);
  4569. }
  4570. /* Emit RTL insns to initialize the variable parts of a trampoline at
  4571. M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx
  4572. for the static chain value for the function. */
  4573. static void
  4574. alpha_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
  4575. {
  4576. rtx fnaddr, mem, word1, word2;
  4577. fnaddr = XEXP (DECL_RTL (fndecl), 0);
  4578. #ifdef POINTERS_EXTEND_UNSIGNED
  4579. fnaddr = convert_memory_address (Pmode, fnaddr);
  4580. chain_value = convert_memory_address (Pmode, chain_value);
  4581. #endif
  4582. if (TARGET_ABI_OPEN_VMS)
  4583. {
  4584. const char *fnname;
  4585. char *trname;
  4586. /* Construct the name of the trampoline entry point. */
  4587. fnname = XSTR (fnaddr, 0);
  4588. trname = (char *) alloca (strlen (fnname) + 5);
  4589. strcpy (trname, fnname);
  4590. strcat (trname, "..tr");
  4591. fnname = ggc_alloc_string (trname, strlen (trname) + 1);
  4592. word2 = gen_rtx_SYMBOL_REF (Pmode, fnname);
  4593. /* Trampoline (or "bounded") procedure descriptor is constructed from
  4594. the function's procedure descriptor with certain fields zeroed IAW
  4595. the VMS calling standard. This is stored in the first quadword. */
  4596. word1 = force_reg (DImode, gen_const_mem (DImode, fnaddr));
  4597. word1 = expand_and (DImode, word1,
  4598. GEN_INT (HOST_WIDE_INT_C (0xffff0fff0000fff0)),
  4599. NULL);
  4600. }
  4601. else
  4602. {
  4603. /* These 4 instructions are:
  4604. ldq $1,24($27)
  4605. ldq $27,16($27)
  4606. jmp $31,($27),0
  4607. nop
  4608. We don't bother setting the HINT field of the jump; the nop
  4609. is merely there for padding. */
  4610. word1 = GEN_INT (HOST_WIDE_INT_C (0xa77b0010a43b0018));
  4611. word2 = GEN_INT (HOST_WIDE_INT_C (0x47ff041f6bfb0000));
  4612. }
  4613. /* Store the first two words, as computed above. */
  4614. mem = adjust_address (m_tramp, DImode, 0);
  4615. emit_move_insn (mem, word1);
  4616. mem = adjust_address (m_tramp, DImode, 8);
  4617. emit_move_insn (mem, word2);
  4618. /* Store function address and static chain value. */
  4619. mem = adjust_address (m_tramp, Pmode, 16);
  4620. emit_move_insn (mem, fnaddr);
  4621. mem = adjust_address (m_tramp, Pmode, 24);
  4622. emit_move_insn (mem, chain_value);
  4623. if (TARGET_ABI_OSF)
  4624. {
  4625. emit_insn (gen_imb ());
  4626. #ifdef HAVE_ENABLE_EXECUTE_STACK
  4627. emit_library_call (init_one_libfunc ("__enable_execute_stack"),
  4628. LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
  4629. #endif
  4630. }
  4631. }
  4632. /* Determine where to put an argument to a function.
  4633. Value is zero to push the argument on the stack,
  4634. or a hard register in which to store the argument.
  4635. MODE is the argument's machine mode.
  4636. TYPE is the data type of the argument (as a tree).
  4637. This is null for libcalls where that information may
  4638. not be available.
  4639. CUM is a variable of type CUMULATIVE_ARGS which gives info about
  4640. the preceding args and about the function being called.
  4641. NAMED is nonzero if this argument is a named parameter
  4642. (otherwise it is an extra parameter matching an ellipsis).
  4643. On Alpha the first 6 words of args are normally in registers
  4644. and the rest are pushed. */
  4645. static rtx
  4646. alpha_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
  4647. const_tree type, bool named ATTRIBUTE_UNUSED)
  4648. {
  4649. CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
  4650. int basereg;
  4651. int num_args;
  4652. /* Don't get confused and pass small structures in FP registers. */
  4653. if (type && AGGREGATE_TYPE_P (type))
  4654. basereg = 16;
  4655. else
  4656. {
  4657. #ifdef ENABLE_CHECKING
  4658. /* With alpha_split_complex_arg, we shouldn't see any raw complex
  4659. values here. */
  4660. gcc_assert (!COMPLEX_MODE_P (mode));
  4661. #endif
  4662. /* Set up defaults for FP operands passed in FP registers, and
  4663. integral operands passed in integer registers. */
  4664. if (TARGET_FPREGS && GET_MODE_CLASS (mode) == MODE_FLOAT)
  4665. basereg = 32 + 16;
  4666. else
  4667. basereg = 16;
  4668. }
  4669. /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for
  4670. the two platforms, so we can't avoid conditional compilation. */
  4671. #if TARGET_ABI_OPEN_VMS
  4672. {
  4673. if (mode == VOIDmode)
  4674. return alpha_arg_info_reg_val (*cum);
  4675. num_args = cum->num_args;
  4676. if (num_args >= 6
  4677. || targetm.calls.must_pass_in_stack (mode, type))
  4678. return NULL_RTX;
  4679. }
  4680. #elif TARGET_ABI_OSF
  4681. {
  4682. if (*cum >= 6)
  4683. return NULL_RTX;
  4684. num_args = *cum;
  4685. /* VOID is passed as a special flag for "last argument". */
  4686. if (type == void_type_node)
  4687. basereg = 16;
  4688. else if (targetm.calls.must_pass_in_stack (mode, type))
  4689. return NULL_RTX;
  4690. }
  4691. #else
  4692. #error Unhandled ABI
  4693. #endif
  4694. return gen_rtx_REG (mode, num_args + basereg);
  4695. }
  4696. /* Update the data in CUM to advance over an argument
  4697. of mode MODE and data type TYPE.
  4698. (TYPE is null for libcalls where that information may not be available.) */
  4699. static void
  4700. alpha_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
  4701. const_tree type, bool named ATTRIBUTE_UNUSED)
  4702. {
  4703. CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
  4704. bool onstack = targetm.calls.must_pass_in_stack (mode, type);
  4705. int increment = onstack ? 6 : ALPHA_ARG_SIZE (mode, type, named);
  4706. #if TARGET_ABI_OSF
  4707. *cum += increment;
  4708. #else
  4709. if (!onstack && cum->num_args < 6)
  4710. cum->atypes[cum->num_args] = alpha_arg_type (mode);
  4711. cum->num_args += increment;
  4712. #endif
  4713. }
  4714. static int
  4715. alpha_arg_partial_bytes (cumulative_args_t cum_v,
  4716. enum machine_mode mode ATTRIBUTE_UNUSED,
  4717. tree type ATTRIBUTE_UNUSED,
  4718. bool named ATTRIBUTE_UNUSED)
  4719. {
  4720. int words = 0;
  4721. CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v);
  4722. #if TARGET_ABI_OPEN_VMS
  4723. if (cum->num_args < 6
  4724. && 6 < cum->num_args + ALPHA_ARG_SIZE (mode, type, named))
  4725. words = 6 - cum->num_args;
  4726. #elif TARGET_ABI_OSF
  4727. if (*cum < 6 && 6 < *cum + ALPHA_ARG_SIZE (mode, type, named))
  4728. words = 6 - *cum;
  4729. #else
  4730. #error Unhandled ABI
  4731. #endif
  4732. return words * UNITS_PER_WORD;
  4733. }
  4734. /* Return true if TYPE must be returned in memory, instead of in registers. */
  4735. static bool
  4736. alpha_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
  4737. {
  4738. enum machine_mode mode = VOIDmode;
  4739. int size;
  4740. if (type)
  4741. {
  4742. mode = TYPE_MODE (type);
  4743. /* All aggregates are returned in memory, except on OpenVMS where
  4744. records that fit 64 bits should be returned by immediate value
  4745. as required by section 3.8.7.1 of the OpenVMS Calling Standard. */
  4746. if (TARGET_ABI_OPEN_VMS
  4747. && TREE_CODE (type) != ARRAY_TYPE
  4748. && (unsigned HOST_WIDE_INT) int_size_in_bytes(type) <= 8)
  4749. return false;
  4750. if (AGGREGATE_TYPE_P (type))
  4751. return true;
  4752. }
  4753. size = GET_MODE_SIZE (mode);
  4754. switch (GET_MODE_CLASS (mode))
  4755. {
  4756. case MODE_VECTOR_FLOAT:
  4757. /* Pass all float vectors in memory, like an aggregate. */
  4758. return true;
  4759. case MODE_COMPLEX_FLOAT:
  4760. /* We judge complex floats on the size of their element,
  4761. not the size of the whole type. */
  4762. size = GET_MODE_UNIT_SIZE (mode);
  4763. break;
  4764. case MODE_INT:
  4765. case MODE_FLOAT:
  4766. case MODE_COMPLEX_INT:
  4767. case MODE_VECTOR_INT:
  4768. break;
  4769. default:
  4770. /* ??? We get called on all sorts of random stuff from
  4771. aggregate_value_p. We must return something, but it's not
  4772. clear what's safe to return. Pretend it's a struct I
  4773. guess. */
  4774. return true;
  4775. }
  4776. /* Otherwise types must fit in one register. */
  4777. return size > UNITS_PER_WORD;
  4778. }
  4779. /* Return true if TYPE should be passed by invisible reference. */
  4780. static bool
  4781. alpha_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
  4782. enum machine_mode mode,
  4783. const_tree type ATTRIBUTE_UNUSED,
  4784. bool named ATTRIBUTE_UNUSED)
  4785. {
  4786. return mode == TFmode || mode == TCmode;
  4787. }
  4788. /* Define how to find the value returned by a function. VALTYPE is the
  4789. data type of the value (as a tree). If the precise function being
  4790. called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0.
  4791. MODE is set instead of VALTYPE for libcalls.
  4792. On Alpha the value is found in $0 for integer functions and
  4793. $f0 for floating-point functions. */
  4794. rtx
  4795. function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
  4796. enum machine_mode mode)
  4797. {
  4798. unsigned int regnum, dummy ATTRIBUTE_UNUSED;
  4799. enum mode_class mclass;
  4800. gcc_assert (!valtype || !alpha_return_in_memory (valtype, func));
  4801. if (valtype)
  4802. mode = TYPE_MODE (valtype);
  4803. mclass = GET_MODE_CLASS (mode);
  4804. switch (mclass)
  4805. {
  4806. case MODE_INT:
  4807. /* Do the same thing as PROMOTE_MODE except for libcalls on VMS,
  4808. where we have them returning both SImode and DImode. */
  4809. if (!(TARGET_ABI_OPEN_VMS && valtype && AGGREGATE_TYPE_P (valtype)))
  4810. PROMOTE_MODE (mode, dummy, valtype);
  4811. /* FALLTHRU */
  4812. case MODE_COMPLEX_INT:
  4813. case MODE_VECTOR_INT:
  4814. regnum = 0;
  4815. break;
  4816. case MODE_FLOAT:
  4817. regnum = 32;
  4818. break;
  4819. case MODE_COMPLEX_FLOAT:
  4820. {
  4821. enum machine_mode cmode = GET_MODE_INNER (mode);
  4822. return gen_rtx_PARALLEL
  4823. (VOIDmode,
  4824. gen_rtvec (2,
  4825. gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32),
  4826. const0_rtx),
  4827. gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33),
  4828. GEN_INT (GET_MODE_SIZE (cmode)))));
  4829. }
  4830. case MODE_RANDOM:
  4831. /* We should only reach here for BLKmode on VMS. */
  4832. gcc_assert (TARGET_ABI_OPEN_VMS && mode == BLKmode);
  4833. regnum = 0;
  4834. break;
  4835. default:
  4836. gcc_unreachable ();
  4837. }
  4838. return gen_rtx_REG (mode, regnum);
  4839. }
  4840. /* TCmode complex values are passed by invisible reference. We
  4841. should not split these values. */
  4842. static bool
  4843. alpha_split_complex_arg (const_tree type)
  4844. {
  4845. return TYPE_MODE (type) != TCmode;
  4846. }
  4847. static tree
  4848. alpha_build_builtin_va_list (void)
  4849. {
  4850. tree base, ofs, space, record, type_decl;
  4851. if (TARGET_ABI_OPEN_VMS)
  4852. return ptr_type_node;
  4853. record = (*lang_hooks.types.make_type) (RECORD_TYPE);
  4854. type_decl = build_decl (BUILTINS_LOCATION,
  4855. TYPE_DECL, get_identifier ("__va_list_tag"), record);
  4856. TYPE_STUB_DECL (record) = type_decl;
  4857. TYPE_NAME (record) = type_decl;
  4858. /* C++? SET_IS_AGGR_TYPE (record, 1); */
  4859. /* Dummy field to prevent alignment warnings. */
  4860. space = build_decl (BUILTINS_LOCATION,
  4861. FIELD_DECL, NULL_TREE, integer_type_node);
  4862. DECL_FIELD_CONTEXT (space) = record;
  4863. DECL_ARTIFICIAL (space) = 1;
  4864. DECL_IGNORED_P (space) = 1;
  4865. ofs = build_decl (BUILTINS_LOCATION,
  4866. FIELD_DECL, get_identifier ("__offset"),
  4867. integer_type_node);
  4868. DECL_FIELD_CONTEXT (ofs) = record;
  4869. DECL_CHAIN (ofs) = space;
  4870. /* ??? This is a hack, __offset is marked volatile to prevent
  4871. DCE that confuses stdarg optimization and results in
  4872. gcc.c-torture/execute/stdarg-1.c failure. See PR 41089. */
  4873. TREE_THIS_VOLATILE (ofs) = 1;
  4874. base = build_decl (BUILTINS_LOCATION,
  4875. FIELD_DECL, get_identifier ("__base"),
  4876. ptr_type_node);
  4877. DECL_FIELD_CONTEXT (base) = record;
  4878. DECL_CHAIN (base) = ofs;
  4879. TYPE_FIELDS (record) = base;
  4880. layout_type (record);
  4881. va_list_gpr_counter_field = ofs;
  4882. return record;
  4883. }
  4884. #if TARGET_ABI_OSF
  4885. /* Helper function for alpha_stdarg_optimize_hook. Skip over casts
  4886. and constant additions. */
  4887. static gimple
  4888. va_list_skip_additions (tree lhs)
  4889. {
  4890. gimple stmt;
  4891. for (;;)
  4892. {
  4893. enum tree_code code;
  4894. stmt = SSA_NAME_DEF_STMT (lhs);
  4895. if (gimple_code (stmt) == GIMPLE_PHI)
  4896. return stmt;
  4897. if (!is_gimple_assign (stmt)
  4898. || gimple_assign_lhs (stmt) != lhs)
  4899. return NULL;
  4900. if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
  4901. return stmt;
  4902. code = gimple_assign_rhs_code (stmt);
  4903. if (!CONVERT_EXPR_CODE_P (code)
  4904. && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR)
  4905. || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST
  4906. || !host_integerp (gimple_assign_rhs2 (stmt), 1)))
  4907. return stmt;
  4908. lhs = gimple_assign_rhs1 (stmt);
  4909. }
  4910. }
  4911. /* Check if LHS = RHS statement is
  4912. LHS = *(ap.__base + ap.__offset + cst)
  4913. or
  4914. LHS = *(ap.__base
  4915. + ((ap.__offset + cst <= 47)
  4916. ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2).
  4917. If the former, indicate that GPR registers are needed,
  4918. if the latter, indicate that FPR registers are needed.
  4919. Also look for LHS = (*ptr).field, where ptr is one of the forms
  4920. listed above.
  4921. On alpha, cfun->va_list_gpr_size is used as size of the needed
  4922. regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR
  4923. registers are needed and bit 1 set if FPR registers are needed.
  4924. Return true if va_list references should not be scanned for the
  4925. current statement. */
  4926. static bool
  4927. alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt)
  4928. {
  4929. tree base, offset, rhs;
  4930. int offset_arg = 1;
  4931. gimple base_stmt;
  4932. if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))
  4933. != GIMPLE_SINGLE_RHS)
  4934. return false;
  4935. rhs = gimple_assign_rhs1 (stmt);
  4936. while (handled_component_p (rhs))
  4937. rhs = TREE_OPERAND (rhs, 0);
  4938. if (TREE_CODE (rhs) != MEM_REF
  4939. || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME)
  4940. return false;
  4941. stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0));
  4942. if (stmt == NULL
  4943. || !is_gimple_assign (stmt)
  4944. || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
  4945. return false;
  4946. base = gimple_assign_rhs1 (stmt);
  4947. if (TREE_CODE (base) == SSA_NAME)
  4948. {
  4949. base_stmt = va_list_skip_additions (base);
  4950. if (base_stmt
  4951. && is_gimple_assign (base_stmt)
  4952. && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
  4953. base = gimple_assign_rhs1 (base_stmt);
  4954. }
  4955. if (TREE_CODE (base) != COMPONENT_REF
  4956. || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
  4957. {
  4958. base = gimple_assign_rhs2 (stmt);
  4959. if (TREE_CODE (base) == SSA_NAME)
  4960. {
  4961. base_stmt = va_list_skip_additions (base);
  4962. if (base_stmt
  4963. && is_gimple_assign (base_stmt)
  4964. && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF)
  4965. base = gimple_assign_rhs1 (base_stmt);
  4966. }
  4967. if (TREE_CODE (base) != COMPONENT_REF
  4968. || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node))
  4969. return false;
  4970. offset_arg = 0;
  4971. }
  4972. base = get_base_address (base);
  4973. if (TREE_CODE (base) != VAR_DECL
  4974. || !bitmap_bit_p (si->va_list_vars, DECL_UID (base)))
  4975. return false;
  4976. offset = gimple_op (stmt, 1 + offset_arg);
  4977. if (TREE_CODE (offset) == SSA_NAME)
  4978. {
  4979. gimple offset_stmt = va_list_skip_additions (offset);
  4980. if (offset_stmt
  4981. && gimple_code (offset_stmt) == GIMPLE_PHI)
  4982. {
  4983. HOST_WIDE_INT sub;
  4984. gimple arg1_stmt, arg2_stmt;
  4985. tree arg1, arg2;
  4986. enum tree_code code1, code2;
  4987. if (gimple_phi_num_args (offset_stmt) != 2)
  4988. goto escapes;
  4989. arg1_stmt
  4990. = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0));
  4991. arg2_stmt
  4992. = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1));
  4993. if (arg1_stmt == NULL
  4994. || !is_gimple_assign (arg1_stmt)
  4995. || arg2_stmt == NULL
  4996. || !is_gimple_assign (arg2_stmt))
  4997. goto escapes;
  4998. code1 = gimple_assign_rhs_code (arg1_stmt);
  4999. code2 = gimple_assign_rhs_code (arg2_stmt);
  5000. if (code1 == COMPONENT_REF
  5001. && (code2 == MINUS_EXPR || code2 == PLUS_EXPR))
  5002. /* Do nothing. */;
  5003. else if (code2 == COMPONENT_REF
  5004. && (code1 == MINUS_EXPR || code1 == PLUS_EXPR))
  5005. {
  5006. gimple tem = arg1_stmt;
  5007. code2 = code1;
  5008. arg1_stmt = arg2_stmt;
  5009. arg2_stmt = tem;
  5010. }
  5011. else
  5012. goto escapes;
  5013. if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0))
  5014. goto escapes;
  5015. sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0);
  5016. if (code2 == MINUS_EXPR)
  5017. sub = -sub;
  5018. if (sub < -48 || sub > -32)
  5019. goto escapes;
  5020. arg1 = gimple_assign_rhs1 (arg1_stmt);
  5021. arg2 = gimple_assign_rhs1 (arg2_stmt);
  5022. if (TREE_CODE (arg2) == SSA_NAME)
  5023. {
  5024. arg2_stmt = va_list_skip_additions (arg2);
  5025. if (arg2_stmt == NULL
  5026. || !is_gimple_assign (arg2_stmt)
  5027. || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF)
  5028. goto escapes;
  5029. arg2 = gimple_assign_rhs1 (arg2_stmt);
  5030. }
  5031. if (arg1 != arg2)
  5032. goto escapes;
  5033. if (TREE_CODE (arg1) != COMPONENT_REF
  5034. || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field
  5035. || get_base_address (arg1) != base)
  5036. goto escapes;
  5037. /* Need floating point regs. */
  5038. cfun->va_list_fpr_size |= 2;
  5039. return false;
  5040. }
  5041. if (offset_stmt
  5042. && is_gimple_assign (offset_stmt)
  5043. && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF)
  5044. offset = gimple_assign_rhs1 (offset_stmt);
  5045. }
  5046. if (TREE_CODE (offset) != COMPONENT_REF
  5047. || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field
  5048. || get_base_address (offset) != base)
  5049. goto escapes;
  5050. else
  5051. /* Need general regs. */
  5052. cfun->va_list_fpr_size |= 1;
  5053. return false;
  5054. escapes:
  5055. si->va_list_escapes = true;
  5056. return false;
  5057. }
  5058. #endif
  5059. /* Perform any needed actions needed for a function that is receiving a
  5060. variable number of arguments. */
  5061. static void
  5062. alpha_setup_incoming_varargs (cumulative_args_t pcum, enum machine_mode mode,
  5063. tree type, int *pretend_size, int no_rtl)
  5064. {
  5065. CUMULATIVE_ARGS cum = *get_cumulative_args (pcum);
  5066. /* Skip the current argument. */
  5067. targetm.calls.function_arg_advance (pack_cumulative_args (&cum), mode, type,
  5068. true);
  5069. #if TARGET_ABI_OPEN_VMS
  5070. /* For VMS, we allocate space for all 6 arg registers plus a count.
  5071. However, if NO registers need to be saved, don't allocate any space.
  5072. This is not only because we won't need the space, but because AP
  5073. includes the current_pretend_args_size and we don't want to mess up
  5074. any ap-relative addresses already made. */
  5075. if (cum.num_args < 6)
  5076. {
  5077. if (!no_rtl)
  5078. {
  5079. emit_move_insn (gen_rtx_REG (DImode, 1), virtual_incoming_args_rtx);
  5080. emit_insn (gen_arg_home ());
  5081. }
  5082. *pretend_size = 7 * UNITS_PER_WORD;
  5083. }
  5084. #else
  5085. /* On OSF/1 and friends, we allocate space for all 12 arg registers, but
  5086. only push those that are remaining. However, if NO registers need to
  5087. be saved, don't allocate any space. This is not only because we won't
  5088. need the space, but because AP includes the current_pretend_args_size
  5089. and we don't want to mess up any ap-relative addresses already made.
  5090. If we are not to use the floating-point registers, save the integer
  5091. registers where we would put the floating-point registers. This is
  5092. not the most efficient way to implement varargs with just one register
  5093. class, but it isn't worth doing anything more efficient in this rare
  5094. case. */
  5095. if (cum >= 6)
  5096. return;
  5097. if (!no_rtl)
  5098. {
  5099. int count;
  5100. alias_set_type set = get_varargs_alias_set ();
  5101. rtx tmp;
  5102. count = cfun->va_list_gpr_size / UNITS_PER_WORD;
  5103. if (count > 6 - cum)
  5104. count = 6 - cum;
  5105. /* Detect whether integer registers or floating-point registers
  5106. are needed by the detected va_arg statements. See above for
  5107. how these values are computed. Note that the "escape" value
  5108. is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of
  5109. these bits set. */
  5110. gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3);
  5111. if (cfun->va_list_fpr_size & 1)
  5112. {
  5113. tmp = gen_rtx_MEM (BLKmode,
  5114. plus_constant (Pmode, virtual_incoming_args_rtx,
  5115. (cum + 6) * UNITS_PER_WORD));
  5116. MEM_NOTRAP_P (tmp) = 1;
  5117. set_mem_alias_set (tmp, set);
  5118. move_block_from_reg (16 + cum, tmp, count);
  5119. }
  5120. if (cfun->va_list_fpr_size & 2)
  5121. {
  5122. tmp = gen_rtx_MEM (BLKmode,
  5123. plus_constant (Pmode, virtual_incoming_args_rtx,
  5124. cum * UNITS_PER_WORD));
  5125. MEM_NOTRAP_P (tmp) = 1;
  5126. set_mem_alias_set (tmp, set);
  5127. move_block_from_reg (16 + cum + TARGET_FPREGS*32, tmp, count);
  5128. }
  5129. }
  5130. *pretend_size = 12 * UNITS_PER_WORD;
  5131. #endif
  5132. }
  5133. static void
  5134. alpha_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
  5135. {
  5136. HOST_WIDE_INT offset;
  5137. tree t, offset_field, base_field;
  5138. if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK)
  5139. return;
  5140. /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base
  5141. up by 48, storing fp arg registers in the first 48 bytes, and the
  5142. integer arg registers in the next 48 bytes. This is only done,
  5143. however, if any integer registers need to be stored.
  5144. If no integer registers need be stored, then we must subtract 48
  5145. in order to account for the integer arg registers which are counted
  5146. in argsize above, but which are not actually stored on the stack.
  5147. Must further be careful here about structures straddling the last
  5148. integer argument register; that futzes with pretend_args_size,
  5149. which changes the meaning of AP. */
  5150. if (NUM_ARGS < 6)
  5151. offset = TARGET_ABI_OPEN_VMS ? UNITS_PER_WORD : 6 * UNITS_PER_WORD;
  5152. else
  5153. offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size;
  5154. if (TARGET_ABI_OPEN_VMS)
  5155. {
  5156. t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
  5157. t = fold_build_pointer_plus_hwi (t, offset + NUM_ARGS * UNITS_PER_WORD);
  5158. t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
  5159. TREE_SIDE_EFFECTS (t) = 1;
  5160. expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
  5161. }
  5162. else
  5163. {
  5164. base_field = TYPE_FIELDS (TREE_TYPE (valist));
  5165. offset_field = DECL_CHAIN (base_field);
  5166. base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
  5167. valist, base_field, NULL_TREE);
  5168. offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
  5169. valist, offset_field, NULL_TREE);
  5170. t = make_tree (ptr_type_node, virtual_incoming_args_rtx);
  5171. t = fold_build_pointer_plus_hwi (t, offset);
  5172. t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t);
  5173. TREE_SIDE_EFFECTS (t) = 1;
  5174. expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
  5175. t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD);
  5176. t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t);
  5177. TREE_SIDE_EFFECTS (t) = 1;
  5178. expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
  5179. }
  5180. }
  5181. static tree
  5182. alpha_gimplify_va_arg_1 (tree type, tree base, tree offset,
  5183. gimple_seq *pre_p)
  5184. {
  5185. tree type_size, ptr_type, addend, t, addr;
  5186. gimple_seq internal_post;
  5187. /* If the type could not be passed in registers, skip the block
  5188. reserved for the registers. */
  5189. if (targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
  5190. {
  5191. t = build_int_cst (TREE_TYPE (offset), 6*8);
  5192. gimplify_assign (offset,
  5193. build2 (MAX_EXPR, TREE_TYPE (offset), offset, t),
  5194. pre_p);
  5195. }
  5196. addend = offset;
  5197. ptr_type = build_pointer_type_for_mode (type, ptr_mode, true);
  5198. if (TREE_CODE (type) == COMPLEX_TYPE)
  5199. {
  5200. tree real_part, imag_part, real_temp;
  5201. real_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
  5202. offset, pre_p);
  5203. /* Copy the value into a new temporary, lest the formal temporary
  5204. be reused out from under us. */
  5205. real_temp = get_initialized_tmp_var (real_part, pre_p, NULL);
  5206. imag_part = alpha_gimplify_va_arg_1 (TREE_TYPE (type), base,
  5207. offset, pre_p);
  5208. return build2 (COMPLEX_EXPR, type, real_temp, imag_part);
  5209. }
  5210. else if (TREE_CODE (type) == REAL_TYPE)
  5211. {
  5212. tree fpaddend, cond, fourtyeight;
  5213. fourtyeight = build_int_cst (TREE_TYPE (addend), 6*8);
  5214. fpaddend = fold_build2 (MINUS_EXPR, TREE_TYPE (addend),
  5215. addend, fourtyeight);
  5216. cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight);
  5217. addend = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond,
  5218. fpaddend, addend);
  5219. }
  5220. /* Build the final address and force that value into a temporary. */
  5221. addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend);
  5222. internal_post = NULL;
  5223. gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue);
  5224. gimple_seq_add_seq (pre_p, internal_post);
  5225. /* Update the offset field. */
  5226. type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
  5227. if (type_size == NULL || TREE_OVERFLOW (type_size))
  5228. t = size_zero_node;
  5229. else
  5230. {
  5231. t = size_binop (PLUS_EXPR, type_size, size_int (7));
  5232. t = size_binop (TRUNC_DIV_EXPR, t, size_int (8));
  5233. t = size_binop (MULT_EXPR, t, size_int (8));
  5234. }
  5235. t = fold_convert (TREE_TYPE (offset), t);
  5236. gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t),
  5237. pre_p);
  5238. return build_va_arg_indirect_ref (addr);
  5239. }
  5240. static tree
  5241. alpha_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
  5242. gimple_seq *post_p)
  5243. {
  5244. tree offset_field, base_field, offset, base, t, r;
  5245. bool indirect;
  5246. if (TARGET_ABI_OPEN_VMS)
  5247. return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
  5248. base_field = TYPE_FIELDS (va_list_type_node);
  5249. offset_field = DECL_CHAIN (base_field);
  5250. base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field),
  5251. valist, base_field, NULL_TREE);
  5252. offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field),
  5253. valist, offset_field, NULL_TREE);
  5254. /* Pull the fields of the structure out into temporaries. Since we never
  5255. modify the base field, we can use a formal temporary. Sign-extend the
  5256. offset field so that it's the proper width for pointer arithmetic. */
  5257. base = get_formal_tmp_var (base_field, pre_p);
  5258. t = fold_convert (build_nonstandard_integer_type (64, 0), offset_field);
  5259. offset = get_initialized_tmp_var (t, pre_p, NULL);
  5260. indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
  5261. if (indirect)
  5262. type = build_pointer_type_for_mode (type, ptr_mode, true);
  5263. /* Find the value. Note that this will be a stable indirection, or
  5264. a composite of stable indirections in the case of complex. */
  5265. r = alpha_gimplify_va_arg_1 (type, base, offset, pre_p);
  5266. /* Stuff the offset temporary back into its field. */
  5267. gimplify_assign (unshare_expr (offset_field),
  5268. fold_convert (TREE_TYPE (offset_field), offset), pre_p);
  5269. if (indirect)
  5270. r = build_va_arg_indirect_ref (r);
  5271. return r;
  5272. }
  5273. /* Builtins. */
  5274. enum alpha_builtin
  5275. {
  5276. ALPHA_BUILTIN_CMPBGE,
  5277. ALPHA_BUILTIN_EXTBL,
  5278. ALPHA_BUILTIN_EXTWL,
  5279. ALPHA_BUILTIN_EXTLL,
  5280. ALPHA_BUILTIN_EXTQL,
  5281. ALPHA_BUILTIN_EXTWH,
  5282. ALPHA_BUILTIN_EXTLH,
  5283. ALPHA_BUILTIN_EXTQH,
  5284. ALPHA_BUILTIN_INSBL,
  5285. ALPHA_BUILTIN_INSWL,
  5286. ALPHA_BUILTIN_INSLL,
  5287. ALPHA_BUILTIN_INSQL,
  5288. ALPHA_BUILTIN_INSWH,
  5289. ALPHA_BUILTIN_INSLH,
  5290. ALPHA_BUILTIN_INSQH,
  5291. ALPHA_BUILTIN_MSKBL,
  5292. ALPHA_BUILTIN_MSKWL,
  5293. ALPHA_BUILTIN_MSKLL,
  5294. ALPHA_BUILTIN_MSKQL,
  5295. ALPHA_BUILTIN_MSKWH,
  5296. ALPHA_BUILTIN_MSKLH,
  5297. ALPHA_BUILTIN_MSKQH,
  5298. ALPHA_BUILTIN_UMULH,
  5299. ALPHA_BUILTIN_ZAP,
  5300. ALPHA_BUILTIN_ZAPNOT,
  5301. ALPHA_BUILTIN_AMASK,
  5302. ALPHA_BUILTIN_IMPLVER,
  5303. ALPHA_BUILTIN_RPCC,
  5304. ALPHA_BUILTIN_THREAD_POINTER,
  5305. ALPHA_BUILTIN_SET_THREAD_POINTER,
  5306. ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
  5307. ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER,
  5308. /* TARGET_MAX */
  5309. ALPHA_BUILTIN_MINUB8,
  5310. ALPHA_BUILTIN_MINSB8,
  5311. ALPHA_BUILTIN_MINUW4,
  5312. ALPHA_BUILTIN_MINSW4,
  5313. ALPHA_BUILTIN_MAXUB8,
  5314. ALPHA_BUILTIN_MAXSB8,
  5315. ALPHA_BUILTIN_MAXUW4,
  5316. ALPHA_BUILTIN_MAXSW4,
  5317. ALPHA_BUILTIN_PERR,
  5318. ALPHA_BUILTIN_PKLB,
  5319. ALPHA_BUILTIN_PKWB,
  5320. ALPHA_BUILTIN_UNPKBL,
  5321. ALPHA_BUILTIN_UNPKBW,
  5322. /* TARGET_CIX */
  5323. ALPHA_BUILTIN_CTTZ,
  5324. ALPHA_BUILTIN_CTLZ,
  5325. ALPHA_BUILTIN_CTPOP,
  5326. ALPHA_BUILTIN_max
  5327. };
  5328. static enum insn_code const code_for_builtin[ALPHA_BUILTIN_max] = {
  5329. CODE_FOR_builtin_cmpbge,
  5330. CODE_FOR_extbl,
  5331. CODE_FOR_extwl,
  5332. CODE_FOR_extll,
  5333. CODE_FOR_extql,
  5334. CODE_FOR_extwh,
  5335. CODE_FOR_extlh,
  5336. CODE_FOR_extqh,
  5337. CODE_FOR_builtin_insbl,
  5338. CODE_FOR_builtin_inswl,
  5339. CODE_FOR_builtin_insll,
  5340. CODE_FOR_insql,
  5341. CODE_FOR_inswh,
  5342. CODE_FOR_inslh,
  5343. CODE_FOR_insqh,
  5344. CODE_FOR_mskbl,
  5345. CODE_FOR_mskwl,
  5346. CODE_FOR_mskll,
  5347. CODE_FOR_mskql,
  5348. CODE_FOR_mskwh,
  5349. CODE_FOR_msklh,
  5350. CODE_FOR_mskqh,
  5351. CODE_FOR_umuldi3_highpart,
  5352. CODE_FOR_builtin_zap,
  5353. CODE_FOR_builtin_zapnot,
  5354. CODE_FOR_builtin_amask,
  5355. CODE_FOR_builtin_implver,
  5356. CODE_FOR_builtin_rpcc,
  5357. CODE_FOR_load_tp,
  5358. CODE_FOR_set_tp,
  5359. CODE_FOR_builtin_establish_vms_condition_handler,
  5360. CODE_FOR_builtin_revert_vms_condition_handler,
  5361. /* TARGET_MAX */
  5362. CODE_FOR_builtin_minub8,
  5363. CODE_FOR_builtin_minsb8,
  5364. CODE_FOR_builtin_minuw4,
  5365. CODE_FOR_builtin_minsw4,
  5366. CODE_FOR_builtin_maxub8,
  5367. CODE_FOR_builtin_maxsb8,
  5368. CODE_FOR_builtin_maxuw4,
  5369. CODE_FOR_builtin_maxsw4,
  5370. CODE_FOR_builtin_perr,
  5371. CODE_FOR_builtin_pklb,
  5372. CODE_FOR_builtin_pkwb,
  5373. CODE_FOR_builtin_unpkbl,
  5374. CODE_FOR_builtin_unpkbw,
  5375. /* TARGET_CIX */
  5376. CODE_FOR_ctzdi2,
  5377. CODE_FOR_clzdi2,
  5378. CODE_FOR_popcountdi2
  5379. };
  5380. struct alpha_builtin_def
  5381. {
  5382. const char *name;
  5383. enum alpha_builtin code;
  5384. unsigned int target_mask;
  5385. bool is_const;
  5386. };
  5387. static struct alpha_builtin_def const zero_arg_builtins[] = {
  5388. { "__builtin_alpha_implver", ALPHA_BUILTIN_IMPLVER, 0, true },
  5389. { "__builtin_alpha_rpcc", ALPHA_BUILTIN_RPCC, 0, false }
  5390. };
  5391. static struct alpha_builtin_def const one_arg_builtins[] = {
  5392. { "__builtin_alpha_amask", ALPHA_BUILTIN_AMASK, 0, true },
  5393. { "__builtin_alpha_pklb", ALPHA_BUILTIN_PKLB, MASK_MAX, true },
  5394. { "__builtin_alpha_pkwb", ALPHA_BUILTIN_PKWB, MASK_MAX, true },
  5395. { "__builtin_alpha_unpkbl", ALPHA_BUILTIN_UNPKBL, MASK_MAX, true },
  5396. { "__builtin_alpha_unpkbw", ALPHA_BUILTIN_UNPKBW, MASK_MAX, true },
  5397. { "__builtin_alpha_cttz", ALPHA_BUILTIN_CTTZ, MASK_CIX, true },
  5398. { "__builtin_alpha_ctlz", ALPHA_BUILTIN_CTLZ, MASK_CIX, true },
  5399. { "__builtin_alpha_ctpop", ALPHA_BUILTIN_CTPOP, MASK_CIX, true }
  5400. };
  5401. static struct alpha_builtin_def const two_arg_builtins[] = {
  5402. { "__builtin_alpha_cmpbge", ALPHA_BUILTIN_CMPBGE, 0, true },
  5403. { "__builtin_alpha_extbl", ALPHA_BUILTIN_EXTBL, 0, true },
  5404. { "__builtin_alpha_extwl", ALPHA_BUILTIN_EXTWL, 0, true },
  5405. { "__builtin_alpha_extll", ALPHA_BUILTIN_EXTLL, 0, true },
  5406. { "__builtin_alpha_extql", ALPHA_BUILTIN_EXTQL, 0, true },
  5407. { "__builtin_alpha_extwh", ALPHA_BUILTIN_EXTWH, 0, true },
  5408. { "__builtin_alpha_extlh", ALPHA_BUILTIN_EXTLH, 0, true },
  5409. { "__builtin_alpha_extqh", ALPHA_BUILTIN_EXTQH, 0, true },
  5410. { "__builtin_alpha_insbl", ALPHA_BUILTIN_INSBL, 0, true },
  5411. { "__builtin_alpha_inswl", ALPHA_BUILTIN_INSWL, 0, true },
  5412. { "__builtin_alpha_insll", ALPHA_BUILTIN_INSLL, 0, true },
  5413. { "__builtin_alpha_insql", ALPHA_BUILTIN_INSQL, 0, true },
  5414. { "__builtin_alpha_inswh", ALPHA_BUILTIN_INSWH, 0, true },
  5415. { "__builtin_alpha_inslh", ALPHA_BUILTIN_INSLH, 0, true },
  5416. { "__builtin_alpha_insqh", ALPHA_BUILTIN_INSQH, 0, true },
  5417. { "__builtin_alpha_mskbl", ALPHA_BUILTIN_MSKBL, 0, true },
  5418. { "__builtin_alpha_mskwl", ALPHA_BUILTIN_MSKWL, 0, true },
  5419. { "__builtin_alpha_mskll", ALPHA_BUILTIN_MSKLL, 0, true },
  5420. { "__builtin_alpha_mskql", ALPHA_BUILTIN_MSKQL, 0, true },
  5421. { "__builtin_alpha_mskwh", ALPHA_BUILTIN_MSKWH, 0, true },
  5422. { "__builtin_alpha_msklh", ALPHA_BUILTIN_MSKLH, 0, true },
  5423. { "__builtin_alpha_mskqh", ALPHA_BUILTIN_MSKQH, 0, true },
  5424. { "__builtin_alpha_umulh", ALPHA_BUILTIN_UMULH, 0, true },
  5425. { "__builtin_alpha_zap", ALPHA_BUILTIN_ZAP, 0, true },
  5426. { "__builtin_alpha_zapnot", ALPHA_BUILTIN_ZAPNOT, 0, true },
  5427. { "__builtin_alpha_minub8", ALPHA_BUILTIN_MINUB8, MASK_MAX, true },
  5428. { "__builtin_alpha_minsb8", ALPHA_BUILTIN_MINSB8, MASK_MAX, true },
  5429. { "__builtin_alpha_minuw4", ALPHA_BUILTIN_MINUW4, MASK_MAX, true },
  5430. { "__builtin_alpha_minsw4", ALPHA_BUILTIN_MINSW4, MASK_MAX, true },
  5431. { "__builtin_alpha_maxub8", ALPHA_BUILTIN_MAXUB8, MASK_MAX, true },
  5432. { "__builtin_alpha_maxsb8", ALPHA_BUILTIN_MAXSB8, MASK_MAX, true },
  5433. { "__builtin_alpha_maxuw4", ALPHA_BUILTIN_MAXUW4, MASK_MAX, true },
  5434. { "__builtin_alpha_maxsw4", ALPHA_BUILTIN_MAXSW4, MASK_MAX, true },
  5435. { "__builtin_alpha_perr", ALPHA_BUILTIN_PERR, MASK_MAX, true }
  5436. };
  5437. static GTY(()) tree alpha_dimode_u;
  5438. static GTY(()) tree alpha_v8qi_u;
  5439. static GTY(()) tree alpha_v8qi_s;
  5440. static GTY(()) tree alpha_v4hi_u;
  5441. static GTY(()) tree alpha_v4hi_s;
  5442. static GTY(()) tree alpha_builtins[(int) ALPHA_BUILTIN_max];
  5443. /* Return the alpha builtin for CODE. */
  5444. static tree
  5445. alpha_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
  5446. {
  5447. if (code >= ALPHA_BUILTIN_max)
  5448. return error_mark_node;
  5449. return alpha_builtins[code];
  5450. }
  5451. /* Helper function of alpha_init_builtins. Add the built-in specified
  5452. by NAME, TYPE, CODE, and ECF. */
  5453. static void
  5454. alpha_builtin_function (const char *name, tree ftype,
  5455. enum alpha_builtin code, unsigned ecf)
  5456. {
  5457. tree decl = add_builtin_function (name, ftype, (int) code,
  5458. BUILT_IN_MD, NULL, NULL_TREE);
  5459. if (ecf & ECF_CONST)
  5460. TREE_READONLY (decl) = 1;
  5461. if (ecf & ECF_NOTHROW)
  5462. TREE_NOTHROW (decl) = 1;
  5463. alpha_builtins [(int) code] = decl;
  5464. }
  5465. /* Helper function of alpha_init_builtins. Add the COUNT built-in
  5466. functions pointed to by P, with function type FTYPE. */
  5467. static void
  5468. alpha_add_builtins (const struct alpha_builtin_def *p, size_t count,
  5469. tree ftype)
  5470. {
  5471. size_t i;
  5472. for (i = 0; i < count; ++i, ++p)
  5473. if ((target_flags & p->target_mask) == p->target_mask)
  5474. alpha_builtin_function (p->name, ftype, p->code,
  5475. (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW);
  5476. }
  5477. static void
  5478. alpha_init_builtins (void)
  5479. {
  5480. tree ftype;
  5481. alpha_dimode_u = lang_hooks.types.type_for_mode (DImode, 1);
  5482. alpha_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8);
  5483. alpha_v8qi_s = build_vector_type (intQI_type_node, 8);
  5484. alpha_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4);
  5485. alpha_v4hi_s = build_vector_type (intHI_type_node, 4);
  5486. ftype = build_function_type_list (alpha_dimode_u, NULL_TREE);
  5487. alpha_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins), ftype);
  5488. ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u, NULL_TREE);
  5489. alpha_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins), ftype);
  5490. ftype = build_function_type_list (alpha_dimode_u, alpha_dimode_u,
  5491. alpha_dimode_u, NULL_TREE);
  5492. alpha_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins), ftype);
  5493. ftype = build_function_type_list (ptr_type_node, NULL_TREE);
  5494. alpha_builtin_function ("__builtin_thread_pointer", ftype,
  5495. ALPHA_BUILTIN_THREAD_POINTER, ECF_NOTHROW);
  5496. ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
  5497. alpha_builtin_function ("__builtin_set_thread_pointer", ftype,
  5498. ALPHA_BUILTIN_SET_THREAD_POINTER, ECF_NOTHROW);
  5499. if (TARGET_ABI_OPEN_VMS)
  5500. {
  5501. ftype = build_function_type_list (ptr_type_node, ptr_type_node,
  5502. NULL_TREE);
  5503. alpha_builtin_function ("__builtin_establish_vms_condition_handler",
  5504. ftype,
  5505. ALPHA_BUILTIN_ESTABLISH_VMS_CONDITION_HANDLER,
  5506. 0);
  5507. ftype = build_function_type_list (ptr_type_node, void_type_node,
  5508. NULL_TREE);
  5509. alpha_builtin_function ("__builtin_revert_vms_condition_handler", ftype,
  5510. ALPHA_BUILTIN_REVERT_VMS_CONDITION_HANDLER, 0);
  5511. vms_patch_builtins ();
  5512. }
  5513. }
  5514. /* Expand an expression EXP that calls a built-in function,
  5515. with result going to TARGET if that's convenient
  5516. (and in mode MODE if that's convenient).
  5517. SUBTARGET may be used as the target for computing one of EXP's operands.
  5518. IGNORE is nonzero if the value is to be ignored. */
  5519. static rtx
  5520. alpha_expand_builtin (tree exp, rtx target,
  5521. rtx subtarget ATTRIBUTE_UNUSED,
  5522. enum machine_mode mode ATTRIBUTE_UNUSED,
  5523. int ignore ATTRIBUTE_UNUSED)
  5524. {
  5525. #define MAX_ARGS 2
  5526. tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
  5527. unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
  5528. tree arg;
  5529. call_expr_arg_iterator iter;
  5530. enum insn_code icode;
  5531. rtx op[MAX_ARGS], pat;
  5532. int arity;
  5533. bool nonvoid;
  5534. if (fcode >= ALPHA_BUILTIN_max)
  5535. internal_error ("bad builtin fcode");
  5536. icode = code_for_builtin[fcode];
  5537. if (icode == 0)
  5538. internal_error ("bad builtin fcode");
  5539. nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
  5540. arity = 0;
  5541. FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
  5542. {
  5543. const struct insn_operand_data *insn_op;
  5544. if (arg == error_mark_node)
  5545. return NULL_RTX;
  5546. if (arity > MAX_ARGS)
  5547. return NULL_RTX;
  5548. insn_op = &insn_data[icode].operand[arity + nonvoid];
  5549. op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL);
  5550. if (!(*insn_op->predicate) (op[arity], insn_op->mode))
  5551. op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]);
  5552. arity++;
  5553. }
  5554. if (nonvoid)
  5555. {
  5556. enum machine_mode tmode = insn_data[icode].operand[0].mode;
  5557. if (!target
  5558. || GET_MODE (target) != tmode
  5559. || !(*insn_data[icode].operand[0].predicate) (target, tmode))
  5560. target = gen_reg_rtx (tmode);
  5561. }
  5562. switch (arity)
  5563. {
  5564. case 0:
  5565. pat = GEN_FCN (icode) (target);
  5566. break;
  5567. case 1:
  5568. if (nonvoid)
  5569. pat = GEN_FCN (icode) (target, op[0]);
  5570. else
  5571. pat = GEN_FCN (icode) (op[0]);
  5572. break;
  5573. case 2:
  5574. pat = GEN_FCN (icode) (target, op[0], op[1]);
  5575. break;
  5576. default:
  5577. gcc_unreachable ();
  5578. }
  5579. if (!pat)
  5580. return NULL_RTX;
  5581. emit_insn (pat);
  5582. if (nonvoid)
  5583. return target;
  5584. else
  5585. return const0_rtx;
  5586. }
  5587. /* Several bits below assume HWI >= 64 bits. This should be enforced
  5588. by config.gcc. */
  5589. #if HOST_BITS_PER_WIDE_INT < 64
  5590. # error "HOST_WIDE_INT too small"
  5591. #endif
  5592. /* Fold the builtin for the CMPBGE instruction. This is a vector comparison
  5593. with an 8-bit output vector. OPINT contains the integer operands; bit N
  5594. of OP_CONST is set if OPINT[N] is valid. */
  5595. static tree
  5596. alpha_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const)
  5597. {
  5598. if (op_const == 3)
  5599. {
  5600. int i, val;
  5601. for (i = 0, val = 0; i < 8; ++i)
  5602. {
  5603. unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff;
  5604. unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff;
  5605. if (c0 >= c1)
  5606. val |= 1 << i;
  5607. }
  5608. return build_int_cst (alpha_dimode_u, val);
  5609. }
  5610. else if (op_const == 2 && opint[1] == 0)
  5611. return build_int_cst (alpha_dimode_u, 0xff);
  5612. return NULL;
  5613. }
  5614. /* Fold the builtin for the ZAPNOT instruction. This is essentially a
  5615. specialized form of an AND operation. Other byte manipulation instructions
  5616. are defined in terms of this instruction, so this is also used as a
  5617. subroutine for other builtins.
  5618. OP contains the tree operands; OPINT contains the extracted integer values.
  5619. Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only
  5620. OPINT may be considered. */
  5621. static tree
  5622. alpha_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[],
  5623. long op_const)
  5624. {
  5625. if (op_const & 2)
  5626. {
  5627. unsigned HOST_WIDE_INT mask = 0;
  5628. int i;
  5629. for (i = 0; i < 8; ++i)
  5630. if ((opint[1] >> i) & 1)
  5631. mask |= (unsigned HOST_WIDE_INT)0xff << (i * 8);
  5632. if (op_const & 1)
  5633. return build_int_cst (alpha_dimode_u, opint[0] & mask);
  5634. if (op)
  5635. return fold_build2 (BIT_AND_EXPR, alpha_dimode_u, op[0],
  5636. build_int_cst (alpha_dimode_u, mask));
  5637. }
  5638. else if ((op_const & 1) && opint[0] == 0)
  5639. return build_int_cst (alpha_dimode_u, 0);
  5640. return NULL;
  5641. }
  5642. /* Fold the builtins for the EXT family of instructions. */
  5643. static tree
  5644. alpha_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[],
  5645. long op_const, unsigned HOST_WIDE_INT bytemask,
  5646. bool is_high)
  5647. {
  5648. long zap_const = 2;
  5649. tree *zap_op = NULL;
  5650. if (op_const & 2)
  5651. {
  5652. unsigned HOST_WIDE_INT loc;
  5653. loc = opint[1] & 7;
  5654. loc *= BITS_PER_UNIT;
  5655. if (loc != 0)
  5656. {
  5657. if (op_const & 1)
  5658. {
  5659. unsigned HOST_WIDE_INT temp = opint[0];
  5660. if (is_high)
  5661. temp <<= loc;
  5662. else
  5663. temp >>= loc;
  5664. opint[0] = temp;
  5665. zap_const = 3;
  5666. }
  5667. }
  5668. else
  5669. zap_op = op;
  5670. }
  5671. opint[1] = bytemask;
  5672. return alpha_fold_builtin_zapnot (zap_op, opint, zap_const);
  5673. }
  5674. /* Fold the builtins for the INS family of instructions. */
  5675. static tree
  5676. alpha_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[],
  5677. long op_const, unsigned HOST_WIDE_INT bytemask,
  5678. bool is_high)
  5679. {
  5680. if ((op_const & 1) && opint[0] == 0)
  5681. return build_int_cst (alpha_dimode_u, 0);
  5682. if (op_const & 2)
  5683. {
  5684. unsigned HOST_WIDE_INT temp, loc, byteloc;
  5685. tree *zap_op = NULL;
  5686. loc = opint[1] & 7;
  5687. bytemask <<= loc;
  5688. temp = opint[0];
  5689. if (is_high)
  5690. {
  5691. byteloc = (64 - (loc * 8)) & 0x3f;
  5692. if (byteloc == 0)
  5693. zap_op = op;
  5694. else
  5695. temp >>= byteloc;
  5696. bytemask >>= 8;
  5697. }
  5698. else
  5699. {
  5700. byteloc = loc * 8;
  5701. if (byteloc == 0)
  5702. zap_op = op;
  5703. else
  5704. temp <<= byteloc;
  5705. }
  5706. opint[0] = temp;
  5707. opint[1] = bytemask;
  5708. return alpha_fold_builtin_zapnot (zap_op, opint, op_const);
  5709. }
  5710. return NULL;
  5711. }
  5712. static tree
  5713. alpha_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[],
  5714. long op_const, unsigned HOST_WIDE_INT bytemask,
  5715. bool is_high)
  5716. {
  5717. if (op_const & 2)
  5718. {
  5719. unsigned HOST_WIDE_INT loc;
  5720. loc = opint[1] & 7;
  5721. bytemask <<= loc;
  5722. if (is_high)
  5723. bytemask >>= 8;
  5724. opint[1] = bytemask ^ 0xff;
  5725. }
  5726. return alpha_fold_builtin_zapnot (op, opint, op_const);
  5727. }
  5728. static tree
  5729. alpha_fold_vector_minmax (enum tree_code code, tree op[], tree vtype)
  5730. {
  5731. tree op0 = fold_convert (vtype, op[0]);
  5732. tree op1 = fold_convert (vtype, op[1]);
  5733. tree val = fold_build2 (code, vtype, op0, op1);
  5734. return fold_build1 (VIEW_CONVERT_EXPR, alpha_dimode_u, val);
  5735. }
  5736. static tree
  5737. alpha_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const)
  5738. {
  5739. unsigned HOST_WIDE_INT temp = 0;
  5740. int i;
  5741. if (op_const != 3)
  5742. return NULL;
  5743. for (i = 0; i < 8; ++i)
  5744. {
  5745. unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff;
  5746. unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff;
  5747. if (a >= b)
  5748. temp += a - b;
  5749. else
  5750. temp += b - a;
  5751. }
  5752. return build_int_cst (alpha_dimode_u, temp);
  5753. }
  5754. static tree
  5755. alpha_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const)
  5756. {
  5757. unsigned HOST_WIDE_INT temp;
  5758. if (op_const == 0)
  5759. return NULL;
  5760. temp = opint[0] & 0xff;
  5761. temp |= (opint[0] >> 24) & 0xff00;
  5762. return build_int_cst (alpha_dimode_u, temp);
  5763. }
  5764. static tree
  5765. alpha_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const)
  5766. {
  5767. unsigned HOST_WIDE_INT temp;
  5768. if (op_const == 0)
  5769. return NULL;
  5770. temp = opint[0] & 0xff;
  5771. temp |= (opint[0] >> 8) & 0xff00;
  5772. temp |= (opint[0] >> 16) & 0xff0000;
  5773. temp |= (opint[0] >> 24) & 0xff000000;
  5774. return build_int_cst (alpha_dimode_u, temp);
  5775. }
  5776. static tree
  5777. alpha_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const)
  5778. {
  5779. unsigned HOST_WIDE_INT temp;
  5780. if (op_const == 0)
  5781. return NULL;
  5782. temp = opint[0] & 0xff;
  5783. temp |= (opint[0] & 0xff00) << 24;
  5784. return build_int_cst (alpha_dimode_u, temp);
  5785. }
  5786. static tree
  5787. alpha_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const)
  5788. {
  5789. unsigned HOST_WIDE_INT temp;
  5790. if (op_const == 0)
  5791. return NULL;
  5792. temp = opint[0] & 0xff;
  5793. temp |= (opint[0] & 0x0000ff00) << 8;
  5794. temp |= (opint[0] & 0x00ff0000) << 16;
  5795. temp |= (opint[0] & 0xff000000) << 24;
  5796. return build_int_cst (alpha_dimode_u, temp);
  5797. }
  5798. static tree
  5799. alpha_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const)
  5800. {
  5801. unsigned HOST_WIDE_INT temp;
  5802. if (op_const == 0)
  5803. return NULL;
  5804. if (opint[0] == 0)
  5805. temp = 64;
  5806. else
  5807. temp = exact_log2 (opint[0] & -opint[0]);
  5808. return build_int_cst (alpha_dimode_u, temp);
  5809. }
  5810. static tree
  5811. alpha_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const)
  5812. {
  5813. unsigned HOST_WIDE_INT temp;
  5814. if (op_const == 0)
  5815. return NULL;
  5816. if (opint[0] == 0)
  5817. temp = 64;
  5818. else
  5819. temp = 64 - floor_log2 (opint[0]) - 1;
  5820. return build_int_cst (alpha_dimode_u, temp);
  5821. }
  5822. static tree
  5823. alpha_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const)
  5824. {
  5825. unsigned HOST_WIDE_INT temp, op;
  5826. if (op_const == 0)
  5827. return NULL;
  5828. op = opint[0];
  5829. temp = 0;
  5830. while (op)
  5831. temp++, op &= op - 1;
  5832. return build_int_cst (alpha_dimode_u, temp);
  5833. }
  5834. /* Fold one of our builtin functions. */
  5835. static tree
  5836. alpha_fold_builtin (tree fndecl, int n_args, tree *op,
  5837. bool ignore ATTRIBUTE_UNUSED)
  5838. {
  5839. unsigned HOST_WIDE_INT opint[MAX_ARGS];
  5840. long op_const = 0;
  5841. int i;
  5842. if (n_args > MAX_ARGS)
  5843. return NULL;
  5844. for (i = 0; i < n_args; i++)
  5845. {
  5846. tree arg = op[i];
  5847. if (arg == error_mark_node)
  5848. return NULL;
  5849. opint[i] = 0;
  5850. if (TREE_CODE (arg) == INTEGER_CST)
  5851. {
  5852. op_const |= 1L << i;
  5853. opint[i] = int_cst_value (arg);
  5854. }
  5855. }
  5856. switch (DECL_FUNCTION_CODE (fndecl))
  5857. {
  5858. case ALPHA_BUILTIN_CMPBGE:
  5859. return alpha_fold_builtin_cmpbge (opint, op_const);
  5860. case ALPHA_BUILTIN_EXTBL:
  5861. return alpha_fold_builtin_extxx (op, opint, op_const, 0x01, false);
  5862. case ALPHA_BUILTIN_EXTWL:
  5863. return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, false);
  5864. case ALPHA_BUILTIN_EXTLL:
  5865. return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, false);
  5866. case ALPHA_BUILTIN_EXTQL:
  5867. return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, false);
  5868. case ALPHA_BUILTIN_EXTWH:
  5869. return alpha_fold_builtin_extxx (op, opint, op_const, 0x03, true);
  5870. case ALPHA_BUILTIN_EXTLH:
  5871. return alpha_fold_builtin_extxx (op, opint, op_const, 0x0f, true);
  5872. case ALPHA_BUILTIN_EXTQH:
  5873. return alpha_fold_builtin_extxx (op, opint, op_const, 0xff, true);
  5874. case ALPHA_BUILTIN_INSBL:
  5875. return alpha_fold_builtin_insxx (op, opint, op_const, 0x01, false);
  5876. case ALPHA_BUILTIN_INSWL:
  5877. return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, false);
  5878. case ALPHA_BUILTIN_INSLL:
  5879. return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, false);
  5880. case ALPHA_BUILTIN_INSQL:
  5881. return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, false);
  5882. case ALPHA_BUILTIN_INSWH:
  5883. return alpha_fold_builtin_insxx (op, opint, op_const, 0x03, true);
  5884. case ALPHA_BUILTIN_INSLH:
  5885. return alpha_fold_builtin_insxx (op, opint, op_const, 0x0f, true);
  5886. case ALPHA_BUILTIN_INSQH:
  5887. return alpha_fold_builtin_insxx (op, opint, op_const, 0xff, true);
  5888. case ALPHA_BUILTIN_MSKBL:
  5889. return alpha_fold_builtin_mskxx (op, opint, op_const, 0x01, false);
  5890. case ALPHA_BUILTIN_MSKWL:
  5891. return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, false);
  5892. case ALPHA_BUILTIN_MSKLL:
  5893. return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, false);
  5894. case ALPHA_BUILTIN_MSKQL:
  5895. return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, false);
  5896. case ALPHA_BUILTIN_MSKWH:
  5897. return alpha_fold_builtin_mskxx (op, opint, op_const, 0x03, true);
  5898. case ALPHA_BUILTIN_MSKLH:
  5899. return alpha_fold_builtin_mskxx (op, opint, op_const, 0x0f, true);
  5900. case ALPHA_BUILTIN_MSKQH:
  5901. return alpha_fold_builtin_mskxx (op, opint, op_const, 0xff, true);
  5902. case ALPHA_BUILTIN_UMULH:
  5903. return fold_build2 (MULT_HIGHPART_EXPR, alpha_dimode_u, op[0], op[1]);
  5904. case ALPHA_BUILTIN_ZAP:
  5905. opint[1] ^= 0xff;
  5906. /* FALLTHRU */
  5907. case ALPHA_BUILTIN_ZAPNOT:
  5908. return alpha_fold_builtin_zapnot (op, opint, op_const);
  5909. case ALPHA_BUILTIN_MINUB8:
  5910. return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_u);
  5911. case ALPHA_BUILTIN_MINSB8:
  5912. return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v8qi_s);
  5913. case ALPHA_BUILTIN_MINUW4:
  5914. return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_u);
  5915. case ALPHA_BUILTIN_MINSW4:
  5916. return alpha_fold_vector_minmax (MIN_EXPR, op, alpha_v4hi_s);
  5917. case ALPHA_BUILTIN_MAXUB8:
  5918. return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_u);
  5919. case ALPHA_BUILTIN_MAXSB8:
  5920. return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v8qi_s);
  5921. case ALPHA_BUILTIN_MAXUW4:
  5922. return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_u);
  5923. case ALPHA_BUILTIN_MAXSW4:
  5924. return alpha_fold_vector_minmax (MAX_EXPR, op, alpha_v4hi_s);
  5925. case ALPHA_BUILTIN_PERR:
  5926. return alpha_fold_builtin_perr (opint, op_const);
  5927. case ALPHA_BUILTIN_PKLB:
  5928. return alpha_fold_builtin_pklb (opint, op_const);
  5929. case ALPHA_BUILTIN_PKWB:
  5930. return alpha_fold_builtin_pkwb (opint, op_const);
  5931. case ALPHA_BUILTIN_UNPKBL:
  5932. return alpha_fold_builtin_unpkbl (opint, op_const);
  5933. case ALPHA_BUILTIN_UNPKBW:
  5934. return alpha_fold_builtin_unpkbw (opint, op_const);
  5935. case ALPHA_BUILTIN_CTTZ:
  5936. return alpha_fold_builtin_cttz (opint, op_const);
  5937. case ALPHA_BUILTIN_CTLZ:
  5938. return alpha_fold_builtin_ctlz (opint, op_const);
  5939. case ALPHA_BUILTIN_CTPOP:
  5940. return alpha_fold_builtin_ctpop (opint, op_const);
  5941. case ALPHA_BUILTIN_AMASK:
  5942. case ALPHA_BUILTIN_IMPLVER:
  5943. case ALPHA_BUILTIN_RPCC:
  5944. case ALPHA_BUILTIN_THREAD_POINTER:
  5945. case ALPHA_BUILTIN_SET_THREAD_POINTER:
  5946. /* None of these are foldable at compile-time. */
  5947. default:
  5948. return NULL;
  5949. }
  5950. }
  5951. /* This page contains routines that are used to determine what the function
  5952. prologue and epilogue code will do and write them out. */
  5953. /* Compute the size of the save area in the stack. */
  5954. /* These variables are used for communication between the following functions.
  5955. They indicate various things about the current function being compiled
  5956. that are used to tell what kind of prologue, epilogue and procedure
  5957. descriptor to generate. */
  5958. /* Nonzero if we need a stack procedure. */
  5959. enum alpha_procedure_types {PT_NULL = 0, PT_REGISTER = 1, PT_STACK = 2};
  5960. static enum alpha_procedure_types alpha_procedure_type;
  5961. /* Register number (either FP or SP) that is used to unwind the frame. */
  5962. static int vms_unwind_regno;
  5963. /* Register number used to save FP. We need not have one for RA since
  5964. we don't modify it for register procedures. This is only defined
  5965. for register frame procedures. */
  5966. static int vms_save_fp_regno;
  5967. /* Register number used to reference objects off our PV. */
  5968. static int vms_base_regno;
  5969. /* Compute register masks for saved registers. */
  5970. static void
  5971. alpha_sa_mask (unsigned long *imaskP, unsigned long *fmaskP)
  5972. {
  5973. unsigned long imask = 0;
  5974. unsigned long fmask = 0;
  5975. unsigned int i;
  5976. /* When outputting a thunk, we don't have valid register life info,
  5977. but assemble_start_function wants to output .frame and .mask
  5978. directives. */
  5979. if (cfun->is_thunk)
  5980. {
  5981. *imaskP = 0;
  5982. *fmaskP = 0;
  5983. return;
  5984. }
  5985. if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
  5986. imask |= (1UL << HARD_FRAME_POINTER_REGNUM);
  5987. /* One for every register we have to save. */
  5988. for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
  5989. if (! fixed_regs[i] && ! call_used_regs[i]
  5990. && df_regs_ever_live_p (i) && i != REG_RA)
  5991. {
  5992. if (i < 32)
  5993. imask |= (1UL << i);
  5994. else
  5995. fmask |= (1UL << (i - 32));
  5996. }
  5997. /* We need to restore these for the handler. */
  5998. if (crtl->calls_eh_return)
  5999. {
  6000. for (i = 0; ; ++i)
  6001. {
  6002. unsigned regno = EH_RETURN_DATA_REGNO (i);
  6003. if (regno == INVALID_REGNUM)
  6004. break;
  6005. imask |= 1UL << regno;
  6006. }
  6007. }
  6008. /* If any register spilled, then spill the return address also. */
  6009. /* ??? This is required by the Digital stack unwind specification
  6010. and isn't needed if we're doing Dwarf2 unwinding. */
  6011. if (imask || fmask || alpha_ra_ever_killed ())
  6012. imask |= (1UL << REG_RA);
  6013. *imaskP = imask;
  6014. *fmaskP = fmask;
  6015. }
  6016. int
  6017. alpha_sa_size (void)
  6018. {
  6019. unsigned long mask[2];
  6020. int sa_size = 0;
  6021. int i, j;
  6022. alpha_sa_mask (&mask[0], &mask[1]);
  6023. for (j = 0; j < 2; ++j)
  6024. for (i = 0; i < 32; ++i)
  6025. if ((mask[j] >> i) & 1)
  6026. sa_size++;
  6027. if (TARGET_ABI_OPEN_VMS)
  6028. {
  6029. /* Start with a stack procedure if we make any calls (REG_RA used), or
  6030. need a frame pointer, with a register procedure if we otherwise need
  6031. at least a slot, and with a null procedure in other cases. */
  6032. if ((mask[0] >> REG_RA) & 1 || frame_pointer_needed)
  6033. alpha_procedure_type = PT_STACK;
  6034. else if (get_frame_size() != 0)
  6035. alpha_procedure_type = PT_REGISTER;
  6036. else
  6037. alpha_procedure_type = PT_NULL;
  6038. /* Don't reserve space for saving FP & RA yet. Do that later after we've
  6039. made the final decision on stack procedure vs register procedure. */
  6040. if (alpha_procedure_type == PT_STACK)
  6041. sa_size -= 2;
  6042. /* Decide whether to refer to objects off our PV via FP or PV.
  6043. If we need FP for something else or if we receive a nonlocal
  6044. goto (which expects PV to contain the value), we must use PV.
  6045. Otherwise, start by assuming we can use FP. */
  6046. vms_base_regno
  6047. = (frame_pointer_needed
  6048. || cfun->has_nonlocal_label
  6049. || alpha_procedure_type == PT_STACK
  6050. || crtl->outgoing_args_size)
  6051. ? REG_PV : HARD_FRAME_POINTER_REGNUM;
  6052. /* If we want to copy PV into FP, we need to find some register
  6053. in which to save FP. */
  6054. vms_save_fp_regno = -1;
  6055. if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
  6056. for (i = 0; i < 32; i++)
  6057. if (! fixed_regs[i] && call_used_regs[i] && ! df_regs_ever_live_p (i))
  6058. vms_save_fp_regno = i;
  6059. /* A VMS condition handler requires a stack procedure in our
  6060. implementation. (not required by the calling standard). */
  6061. if ((vms_save_fp_regno == -1 && alpha_procedure_type == PT_REGISTER)
  6062. || cfun->machine->uses_condition_handler)
  6063. vms_base_regno = REG_PV, alpha_procedure_type = PT_STACK;
  6064. else if (alpha_procedure_type == PT_NULL)
  6065. vms_base_regno = REG_PV;
  6066. /* Stack unwinding should be done via FP unless we use it for PV. */
  6067. vms_unwind_regno = (vms_base_regno == REG_PV
  6068. ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
  6069. /* If this is a stack procedure, allow space for saving FP, RA and
  6070. a condition handler slot if needed. */
  6071. if (alpha_procedure_type == PT_STACK)
  6072. sa_size += 2 + cfun->machine->uses_condition_handler;
  6073. }
  6074. else
  6075. {
  6076. /* Our size must be even (multiple of 16 bytes). */
  6077. if (sa_size & 1)
  6078. sa_size++;
  6079. }
  6080. return sa_size * 8;
  6081. }
  6082. /* Define the offset between two registers, one to be eliminated,
  6083. and the other its replacement, at the start of a routine. */
  6084. HOST_WIDE_INT
  6085. alpha_initial_elimination_offset (unsigned int from,
  6086. unsigned int to ATTRIBUTE_UNUSED)
  6087. {
  6088. HOST_WIDE_INT ret;
  6089. ret = alpha_sa_size ();
  6090. ret += ALPHA_ROUND (crtl->outgoing_args_size);
  6091. switch (from)
  6092. {
  6093. case FRAME_POINTER_REGNUM:
  6094. break;
  6095. case ARG_POINTER_REGNUM:
  6096. ret += (ALPHA_ROUND (get_frame_size ()
  6097. + crtl->args.pretend_args_size)
  6098. - crtl->args.pretend_args_size);
  6099. break;
  6100. default:
  6101. gcc_unreachable ();
  6102. }
  6103. return ret;
  6104. }
  6105. #if TARGET_ABI_OPEN_VMS
  6106. /* Worker function for TARGET_CAN_ELIMINATE. */
  6107. static bool
  6108. alpha_vms_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
  6109. {
  6110. /* We need the alpha_procedure_type to decide. Evaluate it now. */
  6111. alpha_sa_size ();
  6112. switch (alpha_procedure_type)
  6113. {
  6114. case PT_NULL:
  6115. /* NULL procedures have no frame of their own and we only
  6116. know how to resolve from the current stack pointer. */
  6117. return to == STACK_POINTER_REGNUM;
  6118. case PT_REGISTER:
  6119. case PT_STACK:
  6120. /* We always eliminate except to the stack pointer if there is no
  6121. usable frame pointer at hand. */
  6122. return (to != STACK_POINTER_REGNUM
  6123. || vms_unwind_regno != HARD_FRAME_POINTER_REGNUM);
  6124. }
  6125. gcc_unreachable ();
  6126. }
  6127. /* FROM is to be eliminated for TO. Return the offset so that TO+offset
  6128. designates the same location as FROM. */
  6129. HOST_WIDE_INT
  6130. alpha_vms_initial_elimination_offset (unsigned int from, unsigned int to)
  6131. {
  6132. /* The only possible attempts we ever expect are ARG or FRAME_PTR to
  6133. HARD_FRAME or STACK_PTR. We need the alpha_procedure_type to decide
  6134. on the proper computations and will need the register save area size
  6135. in most cases. */
  6136. HOST_WIDE_INT sa_size = alpha_sa_size ();
  6137. /* PT_NULL procedures have no frame of their own and we only allow
  6138. elimination to the stack pointer. This is the argument pointer and we
  6139. resolve the soft frame pointer to that as well. */
  6140. if (alpha_procedure_type == PT_NULL)
  6141. return 0;
  6142. /* For a PT_STACK procedure the frame layout looks as follows
  6143. -----> decreasing addresses
  6144. < size rounded up to 16 | likewise >
  6145. --------------#------------------------------+++--------------+++-------#
  6146. incoming args # pretended args | "frame" | regs sa | PV | outgoing args #
  6147. --------------#---------------------------------------------------------#
  6148. ^ ^ ^ ^
  6149. ARG_PTR FRAME_PTR HARD_FRAME_PTR STACK_PTR
  6150. PT_REGISTER procedures are similar in that they may have a frame of their
  6151. own. They have no regs-sa/pv/outgoing-args area.
  6152. We first compute offset to HARD_FRAME_PTR, then add what we need to get
  6153. to STACK_PTR if need be. */
  6154. {
  6155. HOST_WIDE_INT offset;
  6156. HOST_WIDE_INT pv_save_size = alpha_procedure_type == PT_STACK ? 8 : 0;
  6157. switch (from)
  6158. {
  6159. case FRAME_POINTER_REGNUM:
  6160. offset = ALPHA_ROUND (sa_size + pv_save_size);
  6161. break;
  6162. case ARG_POINTER_REGNUM:
  6163. offset = (ALPHA_ROUND (sa_size + pv_save_size
  6164. + get_frame_size ()
  6165. + crtl->args.pretend_args_size)
  6166. - crtl->args.pretend_args_size);
  6167. break;
  6168. default:
  6169. gcc_unreachable ();
  6170. }
  6171. if (to == STACK_POINTER_REGNUM)
  6172. offset += ALPHA_ROUND (crtl->outgoing_args_size);
  6173. return offset;
  6174. }
  6175. }
  6176. #define COMMON_OBJECT "common_object"
  6177. static tree
  6178. common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
  6179. tree args ATTRIBUTE_UNUSED, int flags ATTRIBUTE_UNUSED,
  6180. bool *no_add_attrs ATTRIBUTE_UNUSED)
  6181. {
  6182. tree decl = *node;
  6183. gcc_assert (DECL_P (decl));
  6184. DECL_COMMON (decl) = 1;
  6185. return NULL_TREE;
  6186. }
  6187. static const struct attribute_spec vms_attribute_table[] =
  6188. {
  6189. /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
  6190. affects_type_identity } */
  6191. { COMMON_OBJECT, 0, 1, true, false, false, common_object_handler, false },
  6192. { NULL, 0, 0, false, false, false, NULL, false }
  6193. };
  6194. void
  6195. vms_output_aligned_decl_common(FILE *file, tree decl, const char *name,
  6196. unsigned HOST_WIDE_INT size,
  6197. unsigned int align)
  6198. {
  6199. tree attr = DECL_ATTRIBUTES (decl);
  6200. fprintf (file, "%s", COMMON_ASM_OP);
  6201. assemble_name (file, name);
  6202. fprintf (file, "," HOST_WIDE_INT_PRINT_UNSIGNED, size);
  6203. /* ??? Unlike on OSF/1, the alignment factor is not in log units. */
  6204. fprintf (file, ",%u", align / BITS_PER_UNIT);
  6205. if (attr)
  6206. {
  6207. attr = lookup_attribute (COMMON_OBJECT, attr);
  6208. if (attr)
  6209. fprintf (file, ",%s",
  6210. IDENTIFIER_POINTER (TREE_VALUE (TREE_VALUE (attr))));
  6211. }
  6212. fputc ('\n', file);
  6213. }
  6214. #undef COMMON_OBJECT
  6215. #endif
  6216. static int
  6217. find_lo_sum_using_gp (rtx *px, void *data ATTRIBUTE_UNUSED)
  6218. {
  6219. return GET_CODE (*px) == LO_SUM && XEXP (*px, 0) == pic_offset_table_rtx;
  6220. }
  6221. int
  6222. alpha_find_lo_sum_using_gp (rtx insn)
  6223. {
  6224. return for_each_rtx (&PATTERN (insn), find_lo_sum_using_gp, NULL) > 0;
  6225. }
  6226. static int
  6227. alpha_does_function_need_gp (void)
  6228. {
  6229. rtx insn;
  6230. /* The GP being variable is an OSF abi thing. */
  6231. if (! TARGET_ABI_OSF)
  6232. return 0;
  6233. /* We need the gp to load the address of __mcount. */
  6234. if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
  6235. return 1;
  6236. /* The code emitted by alpha_output_mi_thunk_osf uses the gp. */
  6237. if (cfun->is_thunk)
  6238. return 1;
  6239. /* The nonlocal receiver pattern assumes that the gp is valid for
  6240. the nested function. Reasonable because it's almost always set
  6241. correctly already. For the cases where that's wrong, make sure
  6242. the nested function loads its gp on entry. */
  6243. if (crtl->has_nonlocal_goto)
  6244. return 1;
  6245. /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
  6246. Even if we are a static function, we still need to do this in case
  6247. our address is taken and passed to something like qsort. */
  6248. push_topmost_sequence ();
  6249. insn = get_insns ();
  6250. pop_topmost_sequence ();
  6251. for (; insn; insn = NEXT_INSN (insn))
  6252. if (NONDEBUG_INSN_P (insn)
  6253. && ! JUMP_TABLE_DATA_P (insn)
  6254. && GET_CODE (PATTERN (insn)) != USE
  6255. && GET_CODE (PATTERN (insn)) != CLOBBER
  6256. && get_attr_usegp (insn))
  6257. return 1;
  6258. return 0;
  6259. }
  6260. /* Helper function to set RTX_FRAME_RELATED_P on instructions, including
  6261. sequences. */
  6262. static rtx
  6263. set_frame_related_p (void)
  6264. {
  6265. rtx seq = get_insns ();
  6266. rtx insn;
  6267. end_sequence ();
  6268. if (!seq)
  6269. return NULL_RTX;
  6270. if (INSN_P (seq))
  6271. {
  6272. insn = seq;
  6273. while (insn != NULL_RTX)
  6274. {
  6275. RTX_FRAME_RELATED_P (insn) = 1;
  6276. insn = NEXT_INSN (insn);
  6277. }
  6278. seq = emit_insn (seq);
  6279. }
  6280. else
  6281. {
  6282. seq = emit_insn (seq);
  6283. RTX_FRAME_RELATED_P (seq) = 1;
  6284. }
  6285. return seq;
  6286. }
  6287. #define FRP(exp) (start_sequence (), exp, set_frame_related_p ())
  6288. /* Generates a store with the proper unwind info attached. VALUE is
  6289. stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG
  6290. contains SP+FRAME_BIAS, and that is the unwind info that should be
  6291. generated. If FRAME_REG != VALUE, then VALUE is being stored on
  6292. behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */
  6293. static void
  6294. emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias,
  6295. HOST_WIDE_INT base_ofs, rtx frame_reg)
  6296. {
  6297. rtx addr, mem, insn;
  6298. addr = plus_constant (Pmode, base_reg, base_ofs);
  6299. mem = gen_frame_mem (DImode, addr);
  6300. insn = emit_move_insn (mem, value);
  6301. RTX_FRAME_RELATED_P (insn) = 1;
  6302. if (frame_bias || value != frame_reg)
  6303. {
  6304. if (frame_bias)
  6305. {
  6306. addr = plus_constant (Pmode, stack_pointer_rtx,
  6307. frame_bias + base_ofs);
  6308. mem = gen_rtx_MEM (DImode, addr);
  6309. }
  6310. add_reg_note (insn, REG_FRAME_RELATED_EXPR,
  6311. gen_rtx_SET (VOIDmode, mem, frame_reg));
  6312. }
  6313. }
  6314. static void
  6315. emit_frame_store (unsigned int regno, rtx base_reg,
  6316. HOST_WIDE_INT frame_bias, HOST_WIDE_INT base_ofs)
  6317. {
  6318. rtx reg = gen_rtx_REG (DImode, regno);
  6319. emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg);
  6320. }
  6321. /* Compute the frame size. SIZE is the size of the "naked" frame
  6322. and SA_SIZE is the size of the register save area. */
  6323. static HOST_WIDE_INT
  6324. compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size)
  6325. {
  6326. if (TARGET_ABI_OPEN_VMS)
  6327. return ALPHA_ROUND (sa_size
  6328. + (alpha_procedure_type == PT_STACK ? 8 : 0)
  6329. + size
  6330. + crtl->args.pretend_args_size);
  6331. else
  6332. return ALPHA_ROUND (crtl->outgoing_args_size)
  6333. + sa_size
  6334. + ALPHA_ROUND (size
  6335. + crtl->args.pretend_args_size);
  6336. }
  6337. /* Write function prologue. */
  6338. /* On vms we have two kinds of functions:
  6339. - stack frame (PROC_STACK)
  6340. these are 'normal' functions with local vars and which are
  6341. calling other functions
  6342. - register frame (PROC_REGISTER)
  6343. keeps all data in registers, needs no stack
  6344. We must pass this to the assembler so it can generate the
  6345. proper pdsc (procedure descriptor)
  6346. This is done with the '.pdesc' command.
  6347. On not-vms, we don't really differentiate between the two, as we can
  6348. simply allocate stack without saving registers. */
  6349. void
  6350. alpha_expand_prologue (void)
  6351. {
  6352. /* Registers to save. */
  6353. unsigned long imask = 0;
  6354. unsigned long fmask = 0;
  6355. /* Stack space needed for pushing registers clobbered by us. */
  6356. HOST_WIDE_INT sa_size, sa_bias;
  6357. /* Complete stack size needed. */
  6358. HOST_WIDE_INT frame_size;
  6359. /* Probed stack size; it additionally includes the size of
  6360. the "reserve region" if any. */
  6361. HOST_WIDE_INT probed_size;
  6362. /* Offset from base reg to register save area. */
  6363. HOST_WIDE_INT reg_offset;
  6364. rtx sa_reg;
  6365. int i;
  6366. sa_size = alpha_sa_size ();
  6367. frame_size = compute_frame_size (get_frame_size (), sa_size);
  6368. if (flag_stack_usage_info)
  6369. current_function_static_stack_size = frame_size;
  6370. if (TARGET_ABI_OPEN_VMS)
  6371. reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
  6372. else
  6373. reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
  6374. alpha_sa_mask (&imask, &fmask);
  6375. /* Emit an insn to reload GP, if needed. */
  6376. if (TARGET_ABI_OSF)
  6377. {
  6378. alpha_function_needs_gp = alpha_does_function_need_gp ();
  6379. if (alpha_function_needs_gp)
  6380. emit_insn (gen_prologue_ldgp ());
  6381. }
  6382. /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert
  6383. the call to mcount ourselves, rather than having the linker do it
  6384. magically in response to -pg. Since _mcount has special linkage,
  6385. don't represent the call as a call. */
  6386. if (TARGET_PROFILING_NEEDS_GP && crtl->profile)
  6387. emit_insn (gen_prologue_mcount ());
  6388. /* Adjust the stack by the frame size. If the frame size is > 4096
  6389. bytes, we need to be sure we probe somewhere in the first and last
  6390. 4096 bytes (we can probably get away without the latter test) and
  6391. every 8192 bytes in between. If the frame size is > 32768, we
  6392. do this in a loop. Otherwise, we generate the explicit probe
  6393. instructions.
  6394. Note that we are only allowed to adjust sp once in the prologue. */
  6395. probed_size = frame_size;
  6396. if (flag_stack_check)
  6397. probed_size += STACK_CHECK_PROTECT;
  6398. if (probed_size <= 32768)
  6399. {
  6400. if (probed_size > 4096)
  6401. {
  6402. int probed;
  6403. for (probed = 4096; probed < probed_size; probed += 8192)
  6404. emit_insn (gen_probe_stack (GEN_INT (-probed)));
  6405. /* We only have to do this probe if we aren't saving registers or
  6406. if we are probing beyond the frame because of -fstack-check. */
  6407. if ((sa_size == 0 && probed_size > probed - 4096)
  6408. || flag_stack_check)
  6409. emit_insn (gen_probe_stack (GEN_INT (-probed_size)));
  6410. }
  6411. if (frame_size != 0)
  6412. FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
  6413. GEN_INT (-frame_size))));
  6414. }
  6415. else
  6416. {
  6417. /* Here we generate code to set R22 to SP + 4096 and set R23 to the
  6418. number of 8192 byte blocks to probe. We then probe each block
  6419. in the loop and then set SP to the proper location. If the
  6420. amount remaining is > 4096, we have to do one more probe if we
  6421. are not saving any registers or if we are probing beyond the
  6422. frame because of -fstack-check. */
  6423. HOST_WIDE_INT blocks = (probed_size + 4096) / 8192;
  6424. HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192;
  6425. rtx ptr = gen_rtx_REG (DImode, 22);
  6426. rtx count = gen_rtx_REG (DImode, 23);
  6427. rtx seq;
  6428. emit_move_insn (count, GEN_INT (blocks));
  6429. emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096)));
  6430. /* Because of the difficulty in emitting a new basic block this
  6431. late in the compilation, generate the loop as a single insn. */
  6432. emit_insn (gen_prologue_stack_probe_loop (count, ptr));
  6433. if ((leftover > 4096 && sa_size == 0) || flag_stack_check)
  6434. {
  6435. rtx last = gen_rtx_MEM (DImode,
  6436. plus_constant (Pmode, ptr, -leftover));
  6437. MEM_VOLATILE_P (last) = 1;
  6438. emit_move_insn (last, const0_rtx);
  6439. }
  6440. if (flag_stack_check)
  6441. {
  6442. /* If -fstack-check is specified we have to load the entire
  6443. constant into a register and subtract from the sp in one go,
  6444. because the probed stack size is not equal to the frame size. */
  6445. HOST_WIDE_INT lo, hi;
  6446. lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
  6447. hi = frame_size - lo;
  6448. emit_move_insn (ptr, GEN_INT (hi));
  6449. emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo)));
  6450. seq = emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx,
  6451. ptr));
  6452. }
  6453. else
  6454. {
  6455. seq = emit_insn (gen_adddi3 (stack_pointer_rtx, ptr,
  6456. GEN_INT (-leftover)));
  6457. }
  6458. /* This alternative is special, because the DWARF code cannot
  6459. possibly intuit through the loop above. So we invent this
  6460. note it looks at instead. */
  6461. RTX_FRAME_RELATED_P (seq) = 1;
  6462. add_reg_note (seq, REG_FRAME_RELATED_EXPR,
  6463. gen_rtx_SET (VOIDmode, stack_pointer_rtx,
  6464. plus_constant (Pmode, stack_pointer_rtx,
  6465. -frame_size)));
  6466. }
  6467. /* Cope with very large offsets to the register save area. */
  6468. sa_bias = 0;
  6469. sa_reg = stack_pointer_rtx;
  6470. if (reg_offset + sa_size > 0x8000)
  6471. {
  6472. int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
  6473. rtx sa_bias_rtx;
  6474. if (low + sa_size <= 0x8000)
  6475. sa_bias = reg_offset - low, reg_offset = low;
  6476. else
  6477. sa_bias = reg_offset, reg_offset = 0;
  6478. sa_reg = gen_rtx_REG (DImode, 24);
  6479. sa_bias_rtx = GEN_INT (sa_bias);
  6480. if (add_operand (sa_bias_rtx, DImode))
  6481. emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx));
  6482. else
  6483. {
  6484. emit_move_insn (sa_reg, sa_bias_rtx);
  6485. emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg));
  6486. }
  6487. }
  6488. /* Save regs in stack order. Beginning with VMS PV. */
  6489. if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_STACK)
  6490. emit_frame_store (REG_PV, stack_pointer_rtx, 0, 0);
  6491. /* Save register RA next. */
  6492. if (imask & (1UL << REG_RA))
  6493. {
  6494. emit_frame_store (REG_RA, sa_reg, sa_bias, reg_offset);
  6495. imask &= ~(1UL << REG_RA);
  6496. reg_offset += 8;
  6497. }
  6498. /* Now save any other registers required to be saved. */
  6499. for (i = 0; i < 31; i++)
  6500. if (imask & (1UL << i))
  6501. {
  6502. emit_frame_store (i, sa_reg, sa_bias, reg_offset);
  6503. reg_offset += 8;
  6504. }
  6505. for (i = 0; i < 31; i++)
  6506. if (fmask & (1UL << i))
  6507. {
  6508. emit_frame_store (i+32, sa_reg, sa_bias, reg_offset);
  6509. reg_offset += 8;
  6510. }
  6511. if (TARGET_ABI_OPEN_VMS)
  6512. {
  6513. /* Register frame procedures save the fp. */
  6514. if (alpha_procedure_type == PT_REGISTER)
  6515. {
  6516. rtx insn = emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
  6517. hard_frame_pointer_rtx);
  6518. add_reg_note (insn, REG_CFA_REGISTER, NULL);
  6519. RTX_FRAME_RELATED_P (insn) = 1;
  6520. }
  6521. if (alpha_procedure_type != PT_NULL && vms_base_regno != REG_PV)
  6522. emit_insn (gen_force_movdi (gen_rtx_REG (DImode, vms_base_regno),
  6523. gen_rtx_REG (DImode, REG_PV)));
  6524. if (alpha_procedure_type != PT_NULL
  6525. && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
  6526. FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
  6527. /* If we have to allocate space for outgoing args, do it now. */
  6528. if (crtl->outgoing_args_size != 0)
  6529. {
  6530. rtx seq
  6531. = emit_move_insn (stack_pointer_rtx,
  6532. plus_constant
  6533. (Pmode, hard_frame_pointer_rtx,
  6534. - (ALPHA_ROUND
  6535. (crtl->outgoing_args_size))));
  6536. /* Only set FRAME_RELATED_P on the stack adjustment we just emitted
  6537. if ! frame_pointer_needed. Setting the bit will change the CFA
  6538. computation rule to use sp again, which would be wrong if we had
  6539. frame_pointer_needed, as this means sp might move unpredictably
  6540. later on.
  6541. Also, note that
  6542. frame_pointer_needed
  6543. => vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
  6544. and
  6545. crtl->outgoing_args_size != 0
  6546. => alpha_procedure_type != PT_NULL,
  6547. so when we are not setting the bit here, we are guaranteed to
  6548. have emitted an FRP frame pointer update just before. */
  6549. RTX_FRAME_RELATED_P (seq) = ! frame_pointer_needed;
  6550. }
  6551. }
  6552. else
  6553. {
  6554. /* If we need a frame pointer, set it from the stack pointer. */
  6555. if (frame_pointer_needed)
  6556. {
  6557. if (TARGET_CAN_FAULT_IN_PROLOGUE)
  6558. FRP (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
  6559. else
  6560. /* This must always be the last instruction in the
  6561. prologue, thus we emit a special move + clobber. */
  6562. FRP (emit_insn (gen_init_fp (hard_frame_pointer_rtx,
  6563. stack_pointer_rtx, sa_reg)));
  6564. }
  6565. }
  6566. /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
  6567. the prologue, for exception handling reasons, we cannot do this for
  6568. any insn that might fault. We could prevent this for mems with a
  6569. (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
  6570. have to prevent all such scheduling with a blockage.
  6571. Linux, on the other hand, never bothered to implement OSF/1's
  6572. exception handling, and so doesn't care about such things. Anyone
  6573. planning to use dwarf2 frame-unwind info can also omit the blockage. */
  6574. if (! TARGET_CAN_FAULT_IN_PROLOGUE)
  6575. emit_insn (gen_blockage ());
  6576. }
  6577. /* Count the number of .file directives, so that .loc is up to date. */
  6578. int num_source_filenames = 0;
  6579. /* Output the textual info surrounding the prologue. */
  6580. void
  6581. alpha_start_function (FILE *file, const char *fnname,
  6582. tree decl ATTRIBUTE_UNUSED)
  6583. {
  6584. unsigned long imask = 0;
  6585. unsigned long fmask = 0;
  6586. /* Stack space needed for pushing registers clobbered by us. */
  6587. HOST_WIDE_INT sa_size;
  6588. /* Complete stack size needed. */
  6589. unsigned HOST_WIDE_INT frame_size;
  6590. /* The maximum debuggable frame size. */
  6591. unsigned HOST_WIDE_INT max_frame_size = 1UL << 31;
  6592. /* Offset from base reg to register save area. */
  6593. HOST_WIDE_INT reg_offset;
  6594. char *entry_label = (char *) alloca (strlen (fnname) + 6);
  6595. char *tramp_label = (char *) alloca (strlen (fnname) + 6);
  6596. int i;
  6597. #if TARGET_ABI_OPEN_VMS
  6598. vms_start_function (fnname);
  6599. #endif
  6600. alpha_fnname = fnname;
  6601. sa_size = alpha_sa_size ();
  6602. frame_size = compute_frame_size (get_frame_size (), sa_size);
  6603. if (TARGET_ABI_OPEN_VMS)
  6604. reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
  6605. else
  6606. reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
  6607. alpha_sa_mask (&imask, &fmask);
  6608. /* Issue function start and label. */
  6609. if (TARGET_ABI_OPEN_VMS || !flag_inhibit_size_directive)
  6610. {
  6611. fputs ("\t.ent ", file);
  6612. assemble_name (file, fnname);
  6613. putc ('\n', file);
  6614. /* If the function needs GP, we'll write the "..ng" label there.
  6615. Otherwise, do it here. */
  6616. if (TARGET_ABI_OSF
  6617. && ! alpha_function_needs_gp
  6618. && ! cfun->is_thunk)
  6619. {
  6620. putc ('$', file);
  6621. assemble_name (file, fnname);
  6622. fputs ("..ng:\n", file);
  6623. }
  6624. }
  6625. /* Nested functions on VMS that are potentially called via trampoline
  6626. get a special transfer entry point that loads the called functions
  6627. procedure descriptor and static chain. */
  6628. if (TARGET_ABI_OPEN_VMS
  6629. && !TREE_PUBLIC (decl)
  6630. && DECL_CONTEXT (decl)
  6631. && !TYPE_P (DECL_CONTEXT (decl))
  6632. && TREE_CODE (DECL_CONTEXT (decl)) != TRANSLATION_UNIT_DECL)
  6633. {
  6634. strcpy (tramp_label, fnname);
  6635. strcat (tramp_label, "..tr");
  6636. ASM_OUTPUT_LABEL (file, tramp_label);
  6637. fprintf (file, "\tldq $1,24($27)\n");
  6638. fprintf (file, "\tldq $27,16($27)\n");
  6639. }
  6640. strcpy (entry_label, fnname);
  6641. if (TARGET_ABI_OPEN_VMS)
  6642. strcat (entry_label, "..en");
  6643. ASM_OUTPUT_LABEL (file, entry_label);
  6644. inside_function = TRUE;
  6645. if (TARGET_ABI_OPEN_VMS)
  6646. fprintf (file, "\t.base $%d\n", vms_base_regno);
  6647. if (TARGET_ABI_OSF
  6648. && TARGET_IEEE_CONFORMANT
  6649. && !flag_inhibit_size_directive)
  6650. {
  6651. /* Set flags in procedure descriptor to request IEEE-conformant
  6652. math-library routines. The value we set it to is PDSC_EXC_IEEE
  6653. (/usr/include/pdsc.h). */
  6654. fputs ("\t.eflag 48\n", file);
  6655. }
  6656. /* Set up offsets to alpha virtual arg/local debugging pointer. */
  6657. alpha_auto_offset = -frame_size + crtl->args.pretend_args_size;
  6658. alpha_arg_offset = -frame_size + 48;
  6659. /* Describe our frame. If the frame size is larger than an integer,
  6660. print it as zero to avoid an assembler error. We won't be
  6661. properly describing such a frame, but that's the best we can do. */
  6662. if (TARGET_ABI_OPEN_VMS)
  6663. fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,"
  6664. HOST_WIDE_INT_PRINT_DEC "\n",
  6665. vms_unwind_regno,
  6666. frame_size >= (1UL << 31) ? 0 : frame_size,
  6667. reg_offset);
  6668. else if (!flag_inhibit_size_directive)
  6669. fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n",
  6670. (frame_pointer_needed
  6671. ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
  6672. frame_size >= max_frame_size ? 0 : frame_size,
  6673. crtl->args.pretend_args_size);
  6674. /* Describe which registers were spilled. */
  6675. if (TARGET_ABI_OPEN_VMS)
  6676. {
  6677. if (imask)
  6678. /* ??? Does VMS care if mask contains ra? The old code didn't
  6679. set it, so I don't here. */
  6680. fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1UL << REG_RA));
  6681. if (fmask)
  6682. fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
  6683. if (alpha_procedure_type == PT_REGISTER)
  6684. fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
  6685. }
  6686. else if (!flag_inhibit_size_directive)
  6687. {
  6688. if (imask)
  6689. {
  6690. fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask,
  6691. frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
  6692. for (i = 0; i < 32; ++i)
  6693. if (imask & (1UL << i))
  6694. reg_offset += 8;
  6695. }
  6696. if (fmask)
  6697. fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask,
  6698. frame_size >= max_frame_size ? 0 : reg_offset - frame_size);
  6699. }
  6700. #if TARGET_ABI_OPEN_VMS
  6701. /* If a user condition handler has been installed at some point, emit
  6702. the procedure descriptor bits to point the Condition Handling Facility
  6703. at the indirection wrapper, and state the fp offset at which the user
  6704. handler may be found. */
  6705. if (cfun->machine->uses_condition_handler)
  6706. {
  6707. fprintf (file, "\t.handler __gcc_shell_handler\n");
  6708. fprintf (file, "\t.handler_data %d\n", VMS_COND_HANDLER_FP_OFFSET);
  6709. }
  6710. #ifdef TARGET_VMS_CRASH_DEBUG
  6711. /* Support of minimal traceback info. */
  6712. switch_to_section (readonly_data_section);
  6713. fprintf (file, "\t.align 3\n");
  6714. assemble_name (file, fnname); fputs ("..na:\n", file);
  6715. fputs ("\t.ascii \"", file);
  6716. assemble_name (file, fnname);
  6717. fputs ("\\0\"\n", file);
  6718. switch_to_section (text_section);
  6719. #endif
  6720. #endif /* TARGET_ABI_OPEN_VMS */
  6721. }
  6722. /* Emit the .prologue note at the scheduled end of the prologue. */
  6723. static void
  6724. alpha_output_function_end_prologue (FILE *file)
  6725. {
  6726. if (TARGET_ABI_OPEN_VMS)
  6727. fputs ("\t.prologue\n", file);
  6728. else if (!flag_inhibit_size_directive)
  6729. fprintf (file, "\t.prologue %d\n",
  6730. alpha_function_needs_gp || cfun->is_thunk);
  6731. }
  6732. /* Write function epilogue. */
  6733. void
  6734. alpha_expand_epilogue (void)
  6735. {
  6736. /* Registers to save. */
  6737. unsigned long imask = 0;
  6738. unsigned long fmask = 0;
  6739. /* Stack space needed for pushing registers clobbered by us. */
  6740. HOST_WIDE_INT sa_size;
  6741. /* Complete stack size needed. */
  6742. HOST_WIDE_INT frame_size;
  6743. /* Offset from base reg to register save area. */
  6744. HOST_WIDE_INT reg_offset;
  6745. int fp_is_frame_pointer, fp_offset;
  6746. rtx sa_reg, sa_reg_exp = NULL;
  6747. rtx sp_adj1, sp_adj2, mem, reg, insn;
  6748. rtx eh_ofs;
  6749. rtx cfa_restores = NULL_RTX;
  6750. int i;
  6751. sa_size = alpha_sa_size ();
  6752. frame_size = compute_frame_size (get_frame_size (), sa_size);
  6753. if (TARGET_ABI_OPEN_VMS)
  6754. {
  6755. if (alpha_procedure_type == PT_STACK)
  6756. reg_offset = 8 + 8 * cfun->machine->uses_condition_handler;
  6757. else
  6758. reg_offset = 0;
  6759. }
  6760. else
  6761. reg_offset = ALPHA_ROUND (crtl->outgoing_args_size);
  6762. alpha_sa_mask (&imask, &fmask);
  6763. fp_is_frame_pointer
  6764. = (TARGET_ABI_OPEN_VMS
  6765. ? alpha_procedure_type == PT_STACK
  6766. : frame_pointer_needed);
  6767. fp_offset = 0;
  6768. sa_reg = stack_pointer_rtx;
  6769. if (crtl->calls_eh_return)
  6770. eh_ofs = EH_RETURN_STACKADJ_RTX;
  6771. else
  6772. eh_ofs = NULL_RTX;
  6773. if (sa_size)
  6774. {
  6775. /* If we have a frame pointer, restore SP from it. */
  6776. if (TARGET_ABI_OPEN_VMS
  6777. ? vms_unwind_regno == HARD_FRAME_POINTER_REGNUM
  6778. : frame_pointer_needed)
  6779. emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
  6780. /* Cope with very large offsets to the register save area. */
  6781. if (reg_offset + sa_size > 0x8000)
  6782. {
  6783. int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
  6784. HOST_WIDE_INT bias;
  6785. if (low + sa_size <= 0x8000)
  6786. bias = reg_offset - low, reg_offset = low;
  6787. else
  6788. bias = reg_offset, reg_offset = 0;
  6789. sa_reg = gen_rtx_REG (DImode, 22);
  6790. sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias);
  6791. emit_move_insn (sa_reg, sa_reg_exp);
  6792. }
  6793. /* Restore registers in order, excepting a true frame pointer. */
  6794. mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg, reg_offset));
  6795. reg = gen_rtx_REG (DImode, REG_RA);
  6796. emit_move_insn (reg, mem);
  6797. cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
  6798. reg_offset += 8;
  6799. imask &= ~(1UL << REG_RA);
  6800. for (i = 0; i < 31; ++i)
  6801. if (imask & (1UL << i))
  6802. {
  6803. if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
  6804. fp_offset = reg_offset;
  6805. else
  6806. {
  6807. mem = gen_frame_mem (DImode,
  6808. plus_constant (Pmode, sa_reg,
  6809. reg_offset));
  6810. reg = gen_rtx_REG (DImode, i);
  6811. emit_move_insn (reg, mem);
  6812. cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
  6813. cfa_restores);
  6814. }
  6815. reg_offset += 8;
  6816. }
  6817. for (i = 0; i < 31; ++i)
  6818. if (fmask & (1UL << i))
  6819. {
  6820. mem = gen_frame_mem (DFmode, plus_constant (Pmode, sa_reg,
  6821. reg_offset));
  6822. reg = gen_rtx_REG (DFmode, i+32);
  6823. emit_move_insn (reg, mem);
  6824. cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
  6825. reg_offset += 8;
  6826. }
  6827. }
  6828. if (frame_size || eh_ofs)
  6829. {
  6830. sp_adj1 = stack_pointer_rtx;
  6831. if (eh_ofs)
  6832. {
  6833. sp_adj1 = gen_rtx_REG (DImode, 23);
  6834. emit_move_insn (sp_adj1,
  6835. gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs));
  6836. }
  6837. /* If the stack size is large, begin computation into a temporary
  6838. register so as not to interfere with a potential fp restore,
  6839. which must be consecutive with an SP restore. */
  6840. if (frame_size < 32768 && !cfun->calls_alloca)
  6841. sp_adj2 = GEN_INT (frame_size);
  6842. else if (frame_size < 0x40007fffL)
  6843. {
  6844. int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
  6845. sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low);
  6846. if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
  6847. sp_adj1 = sa_reg;
  6848. else
  6849. {
  6850. sp_adj1 = gen_rtx_REG (DImode, 23);
  6851. emit_move_insn (sp_adj1, sp_adj2);
  6852. }
  6853. sp_adj2 = GEN_INT (low);
  6854. }
  6855. else
  6856. {
  6857. rtx tmp = gen_rtx_REG (DImode, 23);
  6858. sp_adj2 = alpha_emit_set_const (tmp, DImode, frame_size, 3, false);
  6859. if (!sp_adj2)
  6860. {
  6861. /* We can't drop new things to memory this late, afaik,
  6862. so build it up by pieces. */
  6863. sp_adj2 = alpha_emit_set_long_const (tmp, frame_size,
  6864. -(frame_size < 0));
  6865. gcc_assert (sp_adj2);
  6866. }
  6867. }
  6868. /* From now on, things must be in order. So emit blockages. */
  6869. /* Restore the frame pointer. */
  6870. if (fp_is_frame_pointer)
  6871. {
  6872. emit_insn (gen_blockage ());
  6873. mem = gen_frame_mem (DImode, plus_constant (Pmode, sa_reg,
  6874. fp_offset));
  6875. emit_move_insn (hard_frame_pointer_rtx, mem);
  6876. cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
  6877. hard_frame_pointer_rtx, cfa_restores);
  6878. }
  6879. else if (TARGET_ABI_OPEN_VMS)
  6880. {
  6881. emit_insn (gen_blockage ());
  6882. emit_move_insn (hard_frame_pointer_rtx,
  6883. gen_rtx_REG (DImode, vms_save_fp_regno));
  6884. cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
  6885. hard_frame_pointer_rtx, cfa_restores);
  6886. }
  6887. /* Restore the stack pointer. */
  6888. emit_insn (gen_blockage ());
  6889. if (sp_adj2 == const0_rtx)
  6890. insn = emit_move_insn (stack_pointer_rtx, sp_adj1);
  6891. else
  6892. insn = emit_move_insn (stack_pointer_rtx,
  6893. gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
  6894. REG_NOTES (insn) = cfa_restores;
  6895. add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx);
  6896. RTX_FRAME_RELATED_P (insn) = 1;
  6897. }
  6898. else
  6899. {
  6900. gcc_assert (cfa_restores == NULL);
  6901. if (TARGET_ABI_OPEN_VMS && alpha_procedure_type == PT_REGISTER)
  6902. {
  6903. emit_insn (gen_blockage ());
  6904. insn = emit_move_insn (hard_frame_pointer_rtx,
  6905. gen_rtx_REG (DImode, vms_save_fp_regno));
  6906. add_reg_note (insn, REG_CFA_RESTORE, hard_frame_pointer_rtx);
  6907. RTX_FRAME_RELATED_P (insn) = 1;
  6908. }
  6909. }
  6910. }
  6911. /* Output the rest of the textual info surrounding the epilogue. */
  6912. void
  6913. alpha_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED)
  6914. {
  6915. rtx insn;
  6916. /* We output a nop after noreturn calls at the very end of the function to
  6917. ensure that the return address always remains in the caller's code range,
  6918. as not doing so might confuse unwinding engines. */
  6919. insn = get_last_insn ();
  6920. if (!INSN_P (insn))
  6921. insn = prev_active_insn (insn);
  6922. if (insn && CALL_P (insn))
  6923. output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL);
  6924. #if TARGET_ABI_OPEN_VMS
  6925. /* Write the linkage entries. */
  6926. alpha_write_linkage (file, fnname);
  6927. #endif
  6928. /* End the function. */
  6929. if (TARGET_ABI_OPEN_VMS
  6930. || !flag_inhibit_size_directive)
  6931. {
  6932. fputs ("\t.end ", file);
  6933. assemble_name (file, fnname);
  6934. putc ('\n', file);
  6935. }
  6936. inside_function = FALSE;
  6937. }
  6938. #if TARGET_ABI_OSF
  6939. /* Emit a tail call to FUNCTION after adjusting THIS by DELTA.
  6940. In order to avoid the hordes of differences between generated code
  6941. with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating
  6942. lots of code loading up large constants, generate rtl and emit it
  6943. instead of going straight to text.
  6944. Not sure why this idea hasn't been explored before... */
  6945. static void
  6946. alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
  6947. HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
  6948. tree function)
  6949. {
  6950. HOST_WIDE_INT hi, lo;
  6951. rtx this_rtx, insn, funexp;
  6952. /* We always require a valid GP. */
  6953. emit_insn (gen_prologue_ldgp ());
  6954. emit_note (NOTE_INSN_PROLOGUE_END);
  6955. /* Find the "this" pointer. If the function returns a structure,
  6956. the structure return pointer is in $16. */
  6957. if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
  6958. this_rtx = gen_rtx_REG (Pmode, 17);
  6959. else
  6960. this_rtx = gen_rtx_REG (Pmode, 16);
  6961. /* Add DELTA. When possible we use ldah+lda. Otherwise load the
  6962. entire constant for the add. */
  6963. lo = ((delta & 0xffff) ^ 0x8000) - 0x8000;
  6964. hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
  6965. if (hi + lo == delta)
  6966. {
  6967. if (hi)
  6968. emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi)));
  6969. if (lo)
  6970. emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo)));
  6971. }
  6972. else
  6973. {
  6974. rtx tmp = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 0),
  6975. delta, -(delta < 0));
  6976. emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
  6977. }
  6978. /* Add a delta stored in the vtable at VCALL_OFFSET. */
  6979. if (vcall_offset)
  6980. {
  6981. rtx tmp, tmp2;
  6982. tmp = gen_rtx_REG (Pmode, 0);
  6983. emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
  6984. lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000;
  6985. hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000;
  6986. if (hi + lo == vcall_offset)
  6987. {
  6988. if (hi)
  6989. emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi)));
  6990. }
  6991. else
  6992. {
  6993. tmp2 = alpha_emit_set_long_const (gen_rtx_REG (Pmode, 1),
  6994. vcall_offset, -(vcall_offset < 0));
  6995. emit_insn (gen_adddi3 (tmp, tmp, tmp2));
  6996. lo = 0;
  6997. }
  6998. if (lo)
  6999. tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo));
  7000. else
  7001. tmp2 = tmp;
  7002. emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2));
  7003. emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
  7004. }
  7005. /* Generate a tail call to the target function. */
  7006. if (! TREE_USED (function))
  7007. {
  7008. assemble_external (function);
  7009. TREE_USED (function) = 1;
  7010. }
  7011. funexp = XEXP (DECL_RTL (function), 0);
  7012. funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
  7013. insn = emit_call_insn (gen_sibcall (funexp, const0_rtx));
  7014. SIBLING_CALL_P (insn) = 1;
  7015. /* Run just enough of rest_of_compilation to get the insns emitted.
  7016. There's not really enough bulk here to make other passes such as
  7017. instruction scheduling worth while. Note that use_thunk calls
  7018. assemble_start_function and assemble_end_function. */
  7019. insn = get_insns ();
  7020. insn_locators_alloc ();
  7021. shorten_branches (insn);
  7022. final_start_function (insn, file, 1);
  7023. final (insn, file, 1);
  7024. final_end_function ();
  7025. }
  7026. #endif /* TARGET_ABI_OSF */
  7027. /* Debugging support. */
  7028. #include "gstab.h"
  7029. /* Name of the file containing the current function. */
  7030. static const char *current_function_file = "";
  7031. /* Offsets to alpha virtual arg/local debugging pointers. */
  7032. long alpha_arg_offset;
  7033. long alpha_auto_offset;
  7034. /* Emit a new filename to a stream. */
  7035. void
  7036. alpha_output_filename (FILE *stream, const char *name)
  7037. {
  7038. static int first_time = TRUE;
  7039. if (first_time)
  7040. {
  7041. first_time = FALSE;
  7042. ++num_source_filenames;
  7043. current_function_file = name;
  7044. fprintf (stream, "\t.file\t%d ", num_source_filenames);
  7045. output_quoted_string (stream, name);
  7046. fprintf (stream, "\n");
  7047. }
  7048. else if (name != current_function_file
  7049. && strcmp (name, current_function_file) != 0)
  7050. {
  7051. ++num_source_filenames;
  7052. current_function_file = name;
  7053. fprintf (stream, "\t.file\t%d ", num_source_filenames);
  7054. output_quoted_string (stream, name);
  7055. fprintf (stream, "\n");
  7056. }
  7057. }
  7058. /* Structure to show the current status of registers and memory. */
  7059. struct shadow_summary
  7060. {
  7061. struct {
  7062. unsigned int i : 31; /* Mask of int regs */
  7063. unsigned int fp : 31; /* Mask of fp regs */
  7064. unsigned int mem : 1; /* mem == imem | fpmem */
  7065. } used, defd;
  7066. };
  7067. /* Summary the effects of expression X on the machine. Update SUM, a pointer
  7068. to the summary structure. SET is nonzero if the insn is setting the
  7069. object, otherwise zero. */
  7070. static void
  7071. summarize_insn (rtx x, struct shadow_summary *sum, int set)
  7072. {
  7073. const char *format_ptr;
  7074. int i, j;
  7075. if (x == 0)
  7076. return;
  7077. switch (GET_CODE (x))
  7078. {
  7079. /* ??? Note that this case would be incorrect if the Alpha had a
  7080. ZERO_EXTRACT in SET_DEST. */
  7081. case SET:
  7082. summarize_insn (SET_SRC (x), sum, 0);
  7083. summarize_insn (SET_DEST (x), sum, 1);
  7084. break;
  7085. case CLOBBER:
  7086. summarize_insn (XEXP (x, 0), sum, 1);
  7087. break;
  7088. case USE:
  7089. summarize_insn (XEXP (x, 0), sum, 0);
  7090. break;
  7091. case ASM_OPERANDS:
  7092. for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
  7093. summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
  7094. break;
  7095. case PARALLEL:
  7096. for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
  7097. summarize_insn (XVECEXP (x, 0, i), sum, 0);
  7098. break;
  7099. case SUBREG:
  7100. summarize_insn (SUBREG_REG (x), sum, 0);
  7101. break;
  7102. case REG:
  7103. {
  7104. int regno = REGNO (x);
  7105. unsigned long mask = ((unsigned long) 1) << (regno % 32);
  7106. if (regno == 31 || regno == 63)
  7107. break;
  7108. if (set)
  7109. {
  7110. if (regno < 32)
  7111. sum->defd.i |= mask;
  7112. else
  7113. sum->defd.fp |= mask;
  7114. }
  7115. else
  7116. {
  7117. if (regno < 32)
  7118. sum->used.i |= mask;
  7119. else
  7120. sum->used.fp |= mask;
  7121. }
  7122. }
  7123. break;
  7124. case MEM:
  7125. if (set)
  7126. sum->defd.mem = 1;
  7127. else
  7128. sum->used.mem = 1;
  7129. /* Find the regs used in memory address computation: */
  7130. summarize_insn (XEXP (x, 0), sum, 0);
  7131. break;
  7132. case CONST_INT: case CONST_DOUBLE:
  7133. case SYMBOL_REF: case LABEL_REF: case CONST:
  7134. case SCRATCH: case ASM_INPUT:
  7135. break;
  7136. /* Handle common unary and binary ops for efficiency. */
  7137. case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
  7138. case MOD: case UDIV: case UMOD: case AND: case IOR:
  7139. case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
  7140. case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
  7141. case NE: case EQ: case GE: case GT: case LE:
  7142. case LT: case GEU: case GTU: case LEU: case LTU:
  7143. summarize_insn (XEXP (x, 0), sum, 0);
  7144. summarize_insn (XEXP (x, 1), sum, 0);
  7145. break;
  7146. case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
  7147. case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
  7148. case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
  7149. case SQRT: case FFS:
  7150. summarize_insn (XEXP (x, 0), sum, 0);
  7151. break;
  7152. default:
  7153. format_ptr = GET_RTX_FORMAT (GET_CODE (x));
  7154. for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
  7155. switch (format_ptr[i])
  7156. {
  7157. case 'e':
  7158. summarize_insn (XEXP (x, i), sum, 0);
  7159. break;
  7160. case 'E':
  7161. for (j = XVECLEN (x, i) - 1; j >= 0; j--)
  7162. summarize_insn (XVECEXP (x, i, j), sum, 0);
  7163. break;
  7164. case 'i':
  7165. break;
  7166. default:
  7167. gcc_unreachable ();
  7168. }
  7169. }
  7170. }
  7171. /* Ensure a sufficient number of `trapb' insns are in the code when
  7172. the user requests code with a trap precision of functions or
  7173. instructions.
  7174. In naive mode, when the user requests a trap-precision of
  7175. "instruction", a trapb is needed after every instruction that may
  7176. generate a trap. This ensures that the code is resumption safe but
  7177. it is also slow.
  7178. When optimizations are turned on, we delay issuing a trapb as long
  7179. as possible. In this context, a trap shadow is the sequence of
  7180. instructions that starts with a (potentially) trap generating
  7181. instruction and extends to the next trapb or call_pal instruction
  7182. (but GCC never generates call_pal by itself). We can delay (and
  7183. therefore sometimes omit) a trapb subject to the following
  7184. conditions:
  7185. (a) On entry to the trap shadow, if any Alpha register or memory
  7186. location contains a value that is used as an operand value by some
  7187. instruction in the trap shadow (live on entry), then no instruction
  7188. in the trap shadow may modify the register or memory location.
  7189. (b) Within the trap shadow, the computation of the base register
  7190. for a memory load or store instruction may not involve using the
  7191. result of an instruction that might generate an UNPREDICTABLE
  7192. result.
  7193. (c) Within the trap shadow, no register may be used more than once
  7194. as a destination register. (This is to make life easier for the
  7195. trap-handler.)
  7196. (d) The trap shadow may not include any branch instructions. */
  7197. static void
  7198. alpha_handle_trap_shadows (void)
  7199. {
  7200. struct shadow_summary shadow;
  7201. int trap_pending, exception_nesting;
  7202. rtx i, n;
  7203. trap_pending = 0;
  7204. exception_nesting = 0;
  7205. shadow.used.i = 0;
  7206. shadow.used.fp = 0;
  7207. shadow.used.mem = 0;
  7208. shadow.defd = shadow.used;
  7209. for (i = get_insns (); i ; i = NEXT_INSN (i))
  7210. {
  7211. if (NOTE_P (i))
  7212. {
  7213. switch (NOTE_KIND (i))
  7214. {
  7215. case NOTE_INSN_EH_REGION_BEG:
  7216. exception_nesting++;
  7217. if (trap_pending)
  7218. goto close_shadow;
  7219. break;
  7220. case NOTE_INSN_EH_REGION_END:
  7221. exception_nesting--;
  7222. if (trap_pending)
  7223. goto close_shadow;
  7224. break;
  7225. case NOTE_INSN_EPILOGUE_BEG:
  7226. if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
  7227. goto close_shadow;
  7228. break;
  7229. }
  7230. }
  7231. else if (trap_pending)
  7232. {
  7233. if (alpha_tp == ALPHA_TP_FUNC)
  7234. {
  7235. if (JUMP_P (i)
  7236. && GET_CODE (PATTERN (i)) == RETURN)
  7237. goto close_shadow;
  7238. }
  7239. else if (alpha_tp == ALPHA_TP_INSN)
  7240. {
  7241. if (optimize > 0)
  7242. {
  7243. struct shadow_summary sum;
  7244. sum.used.i = 0;
  7245. sum.used.fp = 0;
  7246. sum.used.mem = 0;
  7247. sum.defd = sum.used;
  7248. switch (GET_CODE (i))
  7249. {
  7250. case INSN:
  7251. /* Annoyingly, get_attr_trap will die on these. */
  7252. if (GET_CODE (PATTERN (i)) == USE
  7253. || GET_CODE (PATTERN (i)) == CLOBBER)
  7254. break;
  7255. summarize_insn (PATTERN (i), &sum, 0);
  7256. if ((sum.defd.i & shadow.defd.i)
  7257. || (sum.defd.fp & shadow.defd.fp))
  7258. {
  7259. /* (c) would be violated */
  7260. goto close_shadow;
  7261. }
  7262. /* Combine shadow with summary of current insn: */
  7263. shadow.used.i |= sum.used.i;
  7264. shadow.used.fp |= sum.used.fp;
  7265. shadow.used.mem |= sum.used.mem;
  7266. shadow.defd.i |= sum.defd.i;
  7267. shadow.defd.fp |= sum.defd.fp;
  7268. shadow.defd.mem |= sum.defd.mem;
  7269. if ((sum.defd.i & shadow.used.i)
  7270. || (sum.defd.fp & shadow.used.fp)
  7271. || (sum.defd.mem & shadow.used.mem))
  7272. {
  7273. /* (a) would be violated (also takes care of (b)) */
  7274. gcc_assert (get_attr_trap (i) != TRAP_YES
  7275. || (!(sum.defd.i & sum.used.i)
  7276. && !(sum.defd.fp & sum.used.fp)));
  7277. goto close_shadow;
  7278. }
  7279. break;
  7280. case JUMP_INSN:
  7281. case CALL_INSN:
  7282. case CODE_LABEL:
  7283. goto close_shadow;
  7284. default:
  7285. gcc_unreachable ();
  7286. }
  7287. }
  7288. else
  7289. {
  7290. close_shadow:
  7291. n = emit_insn_before (gen_trapb (), i);
  7292. PUT_MODE (n, TImode);
  7293. PUT_MODE (i, TImode);
  7294. trap_pending = 0;
  7295. shadow.used.i = 0;
  7296. shadow.used.fp = 0;
  7297. shadow.used.mem = 0;
  7298. shadow.defd = shadow.used;
  7299. }
  7300. }
  7301. }
  7302. if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
  7303. && NONJUMP_INSN_P (i)
  7304. && GET_CODE (PATTERN (i)) != USE
  7305. && GET_CODE (PATTERN (i)) != CLOBBER
  7306. && get_attr_trap (i) == TRAP_YES)
  7307. {
  7308. if (optimize && !trap_pending)
  7309. summarize_insn (PATTERN (i), &shadow, 0);
  7310. trap_pending = 1;
  7311. }
  7312. }
  7313. }
  7314. /* Alpha can only issue instruction groups simultaneously if they are
  7315. suitably aligned. This is very processor-specific. */
  7316. /* There are a number of entries in alphaev4_insn_pipe and alphaev5_insn_pipe
  7317. that are marked "fake". These instructions do not exist on that target,
  7318. but it is possible to see these insns with deranged combinations of
  7319. command-line options, such as "-mtune=ev4 -mmax". Instead of aborting,
  7320. choose a result at random. */
  7321. enum alphaev4_pipe {
  7322. EV4_STOP = 0,
  7323. EV4_IB0 = 1,
  7324. EV4_IB1 = 2,
  7325. EV4_IBX = 4
  7326. };
  7327. enum alphaev5_pipe {
  7328. EV5_STOP = 0,
  7329. EV5_NONE = 1,
  7330. EV5_E01 = 2,
  7331. EV5_E0 = 4,
  7332. EV5_E1 = 8,
  7333. EV5_FAM = 16,
  7334. EV5_FA = 32,
  7335. EV5_FM = 64
  7336. };
  7337. static enum alphaev4_pipe
  7338. alphaev4_insn_pipe (rtx insn)
  7339. {
  7340. if (recog_memoized (insn) < 0)
  7341. return EV4_STOP;
  7342. if (get_attr_length (insn) != 4)
  7343. return EV4_STOP;
  7344. switch (get_attr_type (insn))
  7345. {
  7346. case TYPE_ILD:
  7347. case TYPE_LDSYM:
  7348. case TYPE_FLD:
  7349. case TYPE_LD_L:
  7350. return EV4_IBX;
  7351. case TYPE_IADD:
  7352. case TYPE_ILOG:
  7353. case TYPE_ICMOV:
  7354. case TYPE_ICMP:
  7355. case TYPE_FST:
  7356. case TYPE_SHIFT:
  7357. case TYPE_IMUL:
  7358. case TYPE_FBR:
  7359. case TYPE_MVI: /* fake */
  7360. return EV4_IB0;
  7361. case TYPE_IST:
  7362. case TYPE_MISC:
  7363. case TYPE_IBR:
  7364. case TYPE_JSR:
  7365. case TYPE_CALLPAL:
  7366. case TYPE_FCPYS:
  7367. case TYPE_FCMOV:
  7368. case TYPE_FADD:
  7369. case TYPE_FDIV:
  7370. case TYPE_FMUL:
  7371. case TYPE_ST_C:
  7372. case TYPE_MB:
  7373. case TYPE_FSQRT: /* fake */
  7374. case TYPE_FTOI: /* fake */
  7375. case TYPE_ITOF: /* fake */
  7376. return EV4_IB1;
  7377. default:
  7378. gcc_unreachable ();
  7379. }
  7380. }
  7381. static enum alphaev5_pipe
  7382. alphaev5_insn_pipe (rtx insn)
  7383. {
  7384. if (recog_memoized (insn) < 0)
  7385. return EV5_STOP;
  7386. if (get_attr_length (insn) != 4)
  7387. return EV5_STOP;
  7388. switch (get_attr_type (insn))
  7389. {
  7390. case TYPE_ILD:
  7391. case TYPE_FLD:
  7392. case TYPE_LDSYM:
  7393. case TYPE_IADD:
  7394. case TYPE_ILOG:
  7395. case TYPE_ICMOV:
  7396. case TYPE_ICMP:
  7397. return EV5_E01;
  7398. case TYPE_IST:
  7399. case TYPE_FST:
  7400. case TYPE_SHIFT:
  7401. case TYPE_IMUL:
  7402. case TYPE_MISC:
  7403. case TYPE_MVI:
  7404. case TYPE_LD_L:
  7405. case TYPE_ST_C:
  7406. case TYPE_MB:
  7407. case TYPE_FTOI: /* fake */
  7408. case TYPE_ITOF: /* fake */
  7409. return EV5_E0;
  7410. case TYPE_IBR:
  7411. case TYPE_JSR:
  7412. case TYPE_CALLPAL:
  7413. return EV5_E1;
  7414. case TYPE_FCPYS:
  7415. return EV5_FAM;
  7416. case TYPE_FBR:
  7417. case TYPE_FCMOV:
  7418. case TYPE_FADD:
  7419. case TYPE_FDIV:
  7420. case TYPE_FSQRT: /* fake */
  7421. return EV5_FA;
  7422. case TYPE_FMUL:
  7423. return EV5_FM;
  7424. default:
  7425. gcc_unreachable ();
  7426. }
  7427. }
  7428. /* IN_USE is a mask of the slots currently filled within the insn group.
  7429. The mask bits come from alphaev4_pipe above. If EV4_IBX is set, then
  7430. the insn in EV4_IB0 can be swapped by the hardware into EV4_IB1.
  7431. LEN is, of course, the length of the group in bytes. */
  7432. static rtx
  7433. alphaev4_next_group (rtx insn, int *pin_use, int *plen)
  7434. {
  7435. int len, in_use;
  7436. len = in_use = 0;
  7437. if (! INSN_P (insn)
  7438. || GET_CODE (PATTERN (insn)) == CLOBBER
  7439. || GET_CODE (PATTERN (insn)) == USE)
  7440. goto next_and_done;
  7441. while (1)
  7442. {
  7443. enum alphaev4_pipe pipe;
  7444. pipe = alphaev4_insn_pipe (insn);
  7445. switch (pipe)
  7446. {
  7447. case EV4_STOP:
  7448. /* Force complex instructions to start new groups. */
  7449. if (in_use)
  7450. goto done;
  7451. /* If this is a completely unrecognized insn, it's an asm.
  7452. We don't know how long it is, so record length as -1 to
  7453. signal a needed realignment. */
  7454. if (recog_memoized (insn) < 0)
  7455. len = -1;
  7456. else
  7457. len = get_attr_length (insn);
  7458. goto next_and_done;
  7459. case EV4_IBX:
  7460. if (in_use & EV4_IB0)
  7461. {
  7462. if (in_use & EV4_IB1)
  7463. goto done;
  7464. in_use |= EV4_IB1;
  7465. }
  7466. else
  7467. in_use |= EV4_IB0 | EV4_IBX;
  7468. break;
  7469. case EV4_IB0:
  7470. if (in_use & EV4_IB0)
  7471. {
  7472. if (!(in_use & EV4_IBX) || (in_use & EV4_IB1))
  7473. goto done;
  7474. in_use |= EV4_IB1;
  7475. }
  7476. in_use |= EV4_IB0;
  7477. break;
  7478. case EV4_IB1:
  7479. if (in_use & EV4_IB1)
  7480. goto done;
  7481. in_use |= EV4_IB1;
  7482. break;
  7483. default:
  7484. gcc_unreachable ();
  7485. }
  7486. len += 4;
  7487. /* Haifa doesn't do well scheduling branches. */
  7488. if (JUMP_P (insn))
  7489. goto next_and_done;
  7490. next:
  7491. insn = next_nonnote_insn (insn);
  7492. if (!insn || ! INSN_P (insn))
  7493. goto done;
  7494. /* Let Haifa tell us where it thinks insn group boundaries are. */
  7495. if (GET_MODE (insn) == TImode)
  7496. goto done;
  7497. if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
  7498. goto next;
  7499. }
  7500. next_and_done:
  7501. insn = next_nonnote_insn (insn);
  7502. done:
  7503. *plen = len;
  7504. *pin_use = in_use;
  7505. return insn;
  7506. }
  7507. /* IN_USE is a mask of the slots currently filled within the insn group.
  7508. The mask bits come from alphaev5_pipe above. If EV5_E01 is set, then
  7509. the insn in EV5_E0 can be swapped by the hardware into EV5_E1.
  7510. LEN is, of course, the length of the group in bytes. */
  7511. static rtx
  7512. alphaev5_next_group (rtx insn, int *pin_use, int *plen)
  7513. {
  7514. int len, in_use;
  7515. len = in_use = 0;
  7516. if (! INSN_P (insn)
  7517. || GET_CODE (PATTERN (insn)) == CLOBBER
  7518. || GET_CODE (PATTERN (insn)) == USE)
  7519. goto next_and_done;
  7520. while (1)
  7521. {
  7522. enum alphaev5_pipe pipe;
  7523. pipe = alphaev5_insn_pipe (insn);
  7524. switch (pipe)
  7525. {
  7526. case EV5_STOP:
  7527. /* Force complex instructions to start new groups. */
  7528. if (in_use)
  7529. goto done;
  7530. /* If this is a completely unrecognized insn, it's an asm.
  7531. We don't know how long it is, so record length as -1 to
  7532. signal a needed realignment. */
  7533. if (recog_memoized (insn) < 0)
  7534. len = -1;
  7535. else
  7536. len = get_attr_length (insn);
  7537. goto next_and_done;
  7538. /* ??? Most of the places below, we would like to assert never
  7539. happen, as it would indicate an error either in Haifa, or
  7540. in the scheduling description. Unfortunately, Haifa never
  7541. schedules the last instruction of the BB, so we don't have
  7542. an accurate TI bit to go off. */
  7543. case EV5_E01:
  7544. if (in_use & EV5_E0)
  7545. {
  7546. if (in_use & EV5_E1)
  7547. goto done;
  7548. in_use |= EV5_E1;
  7549. }
  7550. else
  7551. in_use |= EV5_E0 | EV5_E01;
  7552. break;
  7553. case EV5_E0:
  7554. if (in_use & EV5_E0)
  7555. {
  7556. if (!(in_use & EV5_E01) || (in_use & EV5_E1))
  7557. goto done;
  7558. in_use |= EV5_E1;
  7559. }
  7560. in_use |= EV5_E0;
  7561. break;
  7562. case EV5_E1:
  7563. if (in_use & EV5_E1)
  7564. goto done;
  7565. in_use |= EV5_E1;
  7566. break;
  7567. case EV5_FAM:
  7568. if (in_use & EV5_FA)
  7569. {
  7570. if (in_use & EV5_FM)
  7571. goto done;
  7572. in_use |= EV5_FM;
  7573. }
  7574. else
  7575. in_use |= EV5_FA | EV5_FAM;
  7576. break;
  7577. case EV5_FA:
  7578. if (in_use & EV5_FA)
  7579. goto done;
  7580. in_use |= EV5_FA;
  7581. break;
  7582. case EV5_FM:
  7583. if (in_use & EV5_FM)
  7584. goto done;
  7585. in_use |= EV5_FM;
  7586. break;
  7587. case EV5_NONE:
  7588. break;
  7589. default:
  7590. gcc_unreachable ();
  7591. }
  7592. len += 4;
  7593. /* Haifa doesn't do well scheduling branches. */
  7594. /* ??? If this is predicted not-taken, slotting continues, except
  7595. that no more IBR, FBR, or JSR insns may be slotted. */
  7596. if (JUMP_P (insn))
  7597. goto next_and_done;
  7598. next:
  7599. insn = next_nonnote_insn (insn);
  7600. if (!insn || ! INSN_P (insn))
  7601. goto done;
  7602. /* Let Haifa tell us where it thinks insn group boundaries are. */
  7603. if (GET_MODE (insn) == TImode)
  7604. goto done;
  7605. if (GET_CODE (insn) == CLOBBER || GET_CODE (insn) == USE)
  7606. goto next;
  7607. }
  7608. next_and_done:
  7609. insn = next_nonnote_insn (insn);
  7610. done:
  7611. *plen = len;
  7612. *pin_use = in_use;
  7613. return insn;
  7614. }
  7615. static rtx
  7616. alphaev4_next_nop (int *pin_use)
  7617. {
  7618. int in_use = *pin_use;
  7619. rtx nop;
  7620. if (!(in_use & EV4_IB0))
  7621. {
  7622. in_use |= EV4_IB0;
  7623. nop = gen_nop ();
  7624. }
  7625. else if ((in_use & (EV4_IBX|EV4_IB1)) == EV4_IBX)
  7626. {
  7627. in_use |= EV4_IB1;
  7628. nop = gen_nop ();
  7629. }
  7630. else if (TARGET_FP && !(in_use & EV4_IB1))
  7631. {
  7632. in_use |= EV4_IB1;
  7633. nop = gen_fnop ();
  7634. }
  7635. else
  7636. nop = gen_unop ();
  7637. *pin_use = in_use;
  7638. return nop;
  7639. }
  7640. static rtx
  7641. alphaev5_next_nop (int *pin_use)
  7642. {
  7643. int in_use = *pin_use;
  7644. rtx nop;
  7645. if (!(in_use & EV5_E1))
  7646. {
  7647. in_use |= EV5_E1;
  7648. nop = gen_nop ();
  7649. }
  7650. else if (TARGET_FP && !(in_use & EV5_FA))
  7651. {
  7652. in_use |= EV5_FA;
  7653. nop = gen_fnop ();
  7654. }
  7655. else if (TARGET_FP && !(in_use & EV5_FM))
  7656. {
  7657. in_use |= EV5_FM;
  7658. nop = gen_fnop ();
  7659. }
  7660. else
  7661. nop = gen_unop ();
  7662. *pin_use = in_use;
  7663. return nop;
  7664. }
  7665. /* The instruction group alignment main loop. */
  7666. static void
  7667. alpha_align_insns (unsigned int max_align,
  7668. rtx (*next_group) (rtx, int *, int *),
  7669. rtx (*next_nop) (int *))
  7670. {
  7671. /* ALIGN is the known alignment for the insn group. */
  7672. unsigned int align;
  7673. /* OFS is the offset of the current insn in the insn group. */
  7674. int ofs;
  7675. int prev_in_use, in_use, len, ldgp;
  7676. rtx i, next;
  7677. /* Let shorten branches care for assigning alignments to code labels. */
  7678. shorten_branches (get_insns ());
  7679. if (align_functions < 4)
  7680. align = 4;
  7681. else if ((unsigned int) align_functions < max_align)
  7682. align = align_functions;
  7683. else
  7684. align = max_align;
  7685. ofs = prev_in_use = 0;
  7686. i = get_insns ();
  7687. if (NOTE_P (i))
  7688. i = next_nonnote_insn (i);
  7689. ldgp = alpha_function_needs_gp ? 8 : 0;
  7690. while (i)
  7691. {
  7692. next = (*next_group) (i, &in_use, &len);
  7693. /* When we see a label, resync alignment etc. */
  7694. if (LABEL_P (i))
  7695. {
  7696. unsigned int new_align = 1 << label_to_alignment (i);
  7697. if (new_align >= align)
  7698. {
  7699. align = new_align < max_align ? new_align : max_align;
  7700. ofs = 0;
  7701. }
  7702. else if (ofs & (new_align-1))
  7703. ofs = (ofs | (new_align-1)) + 1;
  7704. gcc_assert (!len);
  7705. }
  7706. /* Handle complex instructions special. */
  7707. else if (in_use == 0)
  7708. {
  7709. /* Asms will have length < 0. This is a signal that we have
  7710. lost alignment knowledge. Assume, however, that the asm
  7711. will not mis-align instructions. */
  7712. if (len < 0)
  7713. {
  7714. ofs = 0;
  7715. align = 4;
  7716. len = 0;
  7717. }
  7718. }
  7719. /* If the known alignment is smaller than the recognized insn group,
  7720. realign the output. */
  7721. else if ((int) align < len)
  7722. {
  7723. unsigned int new_log_align = len > 8 ? 4 : 3;
  7724. rtx prev, where;
  7725. where = prev = prev_nonnote_insn (i);
  7726. if (!where || !LABEL_P (where))
  7727. where = i;
  7728. /* Can't realign between a call and its gp reload. */
  7729. if (! (TARGET_EXPLICIT_RELOCS
  7730. && prev && CALL_P (prev)))
  7731. {
  7732. emit_insn_before (gen_realign (GEN_INT (new_log_align)), where);
  7733. align = 1 << new_log_align;
  7734. ofs = 0;
  7735. }
  7736. }
  7737. /* We may not insert padding inside the initial ldgp sequence. */
  7738. else if (ldgp > 0)
  7739. ldgp -= len;
  7740. /* If the group won't fit in the same INT16 as the previous,
  7741. we need to add padding to keep the group together. Rather
  7742. than simply leaving the insn filling to the assembler, we
  7743. can make use of the knowledge of what sorts of instructions
  7744. were issued in the previous group to make sure that all of
  7745. the added nops are really free. */
  7746. else if (ofs + len > (int) align)
  7747. {
  7748. int nop_count = (align - ofs) / 4;
  7749. rtx where;
  7750. /* Insert nops before labels, branches, and calls to truly merge
  7751. the execution of the nops with the previous instruction group. */
  7752. where = prev_nonnote_insn (i);
  7753. if (where)
  7754. {
  7755. if (LABEL_P (where))
  7756. {
  7757. rtx where2 = prev_nonnote_insn (where);
  7758. if (where2 && JUMP_P (where2))
  7759. where = where2;
  7760. }
  7761. else if (NONJUMP_INSN_P (where))
  7762. where = i;
  7763. }
  7764. else
  7765. where = i;
  7766. do
  7767. emit_insn_before ((*next_nop)(&prev_in_use), where);
  7768. while (--nop_count);
  7769. ofs = 0;
  7770. }
  7771. ofs = (ofs + len) & (align - 1);
  7772. prev_in_use = in_use;
  7773. i = next;
  7774. }
  7775. }
  7776. /* Insert an unop between a noreturn function call and GP load. */
  7777. static void
  7778. alpha_pad_noreturn (void)
  7779. {
  7780. rtx insn, next;
  7781. for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
  7782. {
  7783. if (! (CALL_P (insn)
  7784. && find_reg_note (insn, REG_NORETURN, NULL_RTX)))
  7785. continue;
  7786. /* Make sure we do not split a call and its corresponding
  7787. CALL_ARG_LOCATION note. */
  7788. if (CALL_P (insn))
  7789. {
  7790. next = NEXT_INSN (insn);
  7791. if (next && NOTE_P (next)
  7792. && NOTE_KIND (next) == NOTE_INSN_CALL_ARG_LOCATION)
  7793. insn = next;
  7794. }
  7795. next = next_active_insn (insn);
  7796. if (next)
  7797. {
  7798. rtx pat = PATTERN (next);
  7799. if (GET_CODE (pat) == SET
  7800. && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE
  7801. && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1)
  7802. emit_insn_after (gen_unop (), insn);
  7803. }
  7804. }
  7805. }
  7806. /* Machine dependent reorg pass. */
  7807. static void
  7808. alpha_reorg (void)
  7809. {
  7810. /* Workaround for a linker error that triggers when an
  7811. exception handler immediatelly follows a noreturn function.
  7812. The instruction stream from an object file:
  7813. 54: 00 40 5b 6b jsr ra,(t12),58 <__func+0x58>
  7814. 58: 00 00 ba 27 ldah gp,0(ra)
  7815. 5c: 00 00 bd 23 lda gp,0(gp)
  7816. 60: 00 00 7d a7 ldq t12,0(gp)
  7817. 64: 00 40 5b 6b jsr ra,(t12),68 <__func+0x68>
  7818. was converted in the final link pass to:
  7819. fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8>
  7820. fdb28: 00 00 fe 2f unop
  7821. fdb2c: 00 00 fe 2f unop
  7822. fdb30: 30 82 7d a7 ldq t12,-32208(gp)
  7823. fdb34: 00 40 5b 6b jsr ra,(t12),fdb38 <__func+0x68>
  7824. GP load instructions were wrongly cleared by the linker relaxation
  7825. pass. This workaround prevents removal of GP loads by inserting
  7826. an unop instruction between a noreturn function call and
  7827. exception handler prologue. */
  7828. if (current_function_has_exception_handlers ())
  7829. alpha_pad_noreturn ();
  7830. if (alpha_tp != ALPHA_TP_PROG || flag_exceptions)
  7831. alpha_handle_trap_shadows ();
  7832. /* Due to the number of extra trapb insns, don't bother fixing up
  7833. alignment when trap precision is instruction. Moreover, we can
  7834. only do our job when sched2 is run. */
  7835. if (optimize && !optimize_size
  7836. && alpha_tp != ALPHA_TP_INSN
  7837. && flag_schedule_insns_after_reload)
  7838. {
  7839. if (alpha_tune == PROCESSOR_EV4)
  7840. alpha_align_insns (8, alphaev4_next_group, alphaev4_next_nop);
  7841. else if (alpha_tune == PROCESSOR_EV5)
  7842. alpha_align_insns (16, alphaev5_next_group, alphaev5_next_nop);
  7843. }
  7844. }
  7845. static void
  7846. alpha_file_start (void)
  7847. {
  7848. default_file_start ();
  7849. fputs ("\t.set noreorder\n", asm_out_file);
  7850. fputs ("\t.set volatile\n", asm_out_file);
  7851. if (TARGET_ABI_OSF)
  7852. fputs ("\t.set noat\n", asm_out_file);
  7853. if (TARGET_EXPLICIT_RELOCS)
  7854. fputs ("\t.set nomacro\n", asm_out_file);
  7855. if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX)
  7856. {
  7857. const char *arch;
  7858. if (alpha_cpu == PROCESSOR_EV6 || TARGET_FIX || TARGET_CIX)
  7859. arch = "ev6";
  7860. else if (TARGET_MAX)
  7861. arch = "pca56";
  7862. else if (TARGET_BWX)
  7863. arch = "ev56";
  7864. else if (alpha_cpu == PROCESSOR_EV5)
  7865. arch = "ev5";
  7866. else
  7867. arch = "ev4";
  7868. fprintf (asm_out_file, "\t.arch %s\n", arch);
  7869. }
  7870. }
  7871. /* Since we don't have a .dynbss section, we should not allow global
  7872. relocations in the .rodata section. */
  7873. static int
  7874. alpha_elf_reloc_rw_mask (void)
  7875. {
  7876. return flag_pic ? 3 : 2;
  7877. }
  7878. /* Return a section for X. The only special thing we do here is to
  7879. honor small data. */
  7880. static section *
  7881. alpha_elf_select_rtx_section (enum machine_mode mode, rtx x,
  7882. unsigned HOST_WIDE_INT align)
  7883. {
  7884. if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value)
  7885. /* ??? Consider using mergeable sdata sections. */
  7886. return sdata_section;
  7887. else
  7888. return default_elf_select_rtx_section (mode, x, align);
  7889. }
  7890. static unsigned int
  7891. alpha_elf_section_type_flags (tree decl, const char *name, int reloc)
  7892. {
  7893. unsigned int flags = 0;
  7894. if (strcmp (name, ".sdata") == 0
  7895. || strncmp (name, ".sdata.", 7) == 0
  7896. || strncmp (name, ".gnu.linkonce.s.", 16) == 0
  7897. || strcmp (name, ".sbss") == 0
  7898. || strncmp (name, ".sbss.", 6) == 0
  7899. || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
  7900. flags = SECTION_SMALL;
  7901. flags |= default_section_type_flags (decl, name, reloc);
  7902. return flags;
  7903. }
  7904. /* Structure to collect function names for final output in link section. */
  7905. /* Note that items marked with GTY can't be ifdef'ed out. */
  7906. enum reloc_kind
  7907. {
  7908. KIND_LINKAGE,
  7909. KIND_CODEADDR
  7910. };
  7911. struct GTY(()) alpha_links
  7912. {
  7913. rtx func;
  7914. rtx linkage;
  7915. enum reloc_kind rkind;
  7916. };
  7917. #if TARGET_ABI_OPEN_VMS
  7918. /* Return the VMS argument type corresponding to MODE. */
  7919. enum avms_arg_type
  7920. alpha_arg_type (enum machine_mode mode)
  7921. {
  7922. switch (mode)
  7923. {
  7924. case SFmode:
  7925. return TARGET_FLOAT_VAX ? FF : FS;
  7926. case DFmode:
  7927. return TARGET_FLOAT_VAX ? FD : FT;
  7928. default:
  7929. return I64;
  7930. }
  7931. }
  7932. /* Return an rtx for an integer representing the VMS Argument Information
  7933. register value. */
  7934. rtx
  7935. alpha_arg_info_reg_val (CUMULATIVE_ARGS cum)
  7936. {
  7937. unsigned HOST_WIDE_INT regval = cum.num_args;
  7938. int i;
  7939. for (i = 0; i < 6; i++)
  7940. regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
  7941. return GEN_INT (regval);
  7942. }
  7943. /* Return a SYMBOL_REF representing the reference to the .linkage entry
  7944. of function FUNC built for calls made from CFUNDECL. LFLAG is 1 if
  7945. this is the reference to the linkage pointer value, 0 if this is the
  7946. reference to the function entry value. RFLAG is 1 if this a reduced
  7947. reference (code address only), 0 if this is a full reference. */
  7948. rtx
  7949. alpha_use_linkage (rtx func, bool lflag, bool rflag)
  7950. {
  7951. struct alpha_links *al = NULL;
  7952. const char *name = XSTR (func, 0);
  7953. if (cfun->machine->links)
  7954. {
  7955. splay_tree_node lnode;
  7956. /* Is this name already defined? */
  7957. lnode = splay_tree_lookup (cfun->machine->links, (splay_tree_key) name);
  7958. if (lnode)
  7959. al = (struct alpha_links *) lnode->value;
  7960. }
  7961. else
  7962. cfun->machine->links = splay_tree_new_ggc
  7963. ((splay_tree_compare_fn) strcmp,
  7964. ggc_alloc_splay_tree_str_alpha_links_splay_tree_s,
  7965. ggc_alloc_splay_tree_str_alpha_links_splay_tree_node_s);
  7966. if (al == NULL)
  7967. {
  7968. size_t buf_len;
  7969. char *linksym;
  7970. tree id;
  7971. if (name[0] == '*')
  7972. name++;
  7973. /* Follow transparent alias, as this is used for CRTL translations. */
  7974. id = maybe_get_identifier (name);
  7975. if (id)
  7976. {
  7977. while (IDENTIFIER_TRANSPARENT_ALIAS (id))
  7978. id = TREE_CHAIN (id);
  7979. name = IDENTIFIER_POINTER (id);
  7980. }
  7981. buf_len = strlen (name) + 8 + 9;
  7982. linksym = (char *) alloca (buf_len);
  7983. snprintf (linksym, buf_len, "$%d..%s..lk", cfun->funcdef_no, name);
  7984. al = ggc_alloc_alpha_links ();
  7985. al->func = func;
  7986. al->linkage = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (linksym));
  7987. splay_tree_insert (cfun->machine->links,
  7988. (splay_tree_key) ggc_strdup (name),
  7989. (splay_tree_value) al);
  7990. }
  7991. al->rkind = rflag ? KIND_CODEADDR : KIND_LINKAGE;
  7992. if (lflag)
  7993. return gen_rtx_MEM (Pmode, plus_constant (Pmode, al->linkage, 8));
  7994. else
  7995. return al->linkage;
  7996. }
  7997. static int
  7998. alpha_write_one_linkage (splay_tree_node node, void *data)
  7999. {
  8000. const char *const name = (const char *) node->key;
  8001. struct alpha_links *link = (struct alpha_links *) node->value;
  8002. FILE *stream = (FILE *) data;
  8003. ASM_OUTPUT_INTERNAL_LABEL (stream, XSTR (link->linkage, 0));
  8004. if (link->rkind == KIND_CODEADDR)
  8005. {
  8006. /* External and used, request code address. */
  8007. fprintf (stream, "\t.code_address ");
  8008. }
  8009. else
  8010. {
  8011. if (!SYMBOL_REF_EXTERNAL_P (link->func)
  8012. && SYMBOL_REF_LOCAL_P (link->func))
  8013. {
  8014. /* Locally defined, build linkage pair. */
  8015. fprintf (stream, "\t.quad %s..en\n", name);
  8016. fprintf (stream, "\t.quad ");
  8017. }
  8018. else
  8019. {
  8020. /* External, request linkage pair. */
  8021. fprintf (stream, "\t.linkage ");
  8022. }
  8023. }
  8024. assemble_name (stream, name);
  8025. fputs ("\n", stream);
  8026. return 0;
  8027. }
  8028. static void
  8029. alpha_write_linkage (FILE *stream, const char *funname)
  8030. {
  8031. fprintf (stream, "\t.link\n");
  8032. fprintf (stream, "\t.align 3\n");
  8033. in_section = NULL;
  8034. #ifdef TARGET_VMS_CRASH_DEBUG
  8035. fputs ("\t.name ", stream);
  8036. assemble_name (stream, funname);
  8037. fputs ("..na\n", stream);
  8038. #endif
  8039. ASM_OUTPUT_LABEL (stream, funname);
  8040. fprintf (stream, "\t.pdesc ");
  8041. assemble_name (stream, funname);
  8042. fprintf (stream, "..en,%s\n",
  8043. alpha_procedure_type == PT_STACK ? "stack"
  8044. : alpha_procedure_type == PT_REGISTER ? "reg" : "null");
  8045. if (cfun->machine->links)
  8046. {
  8047. splay_tree_foreach (cfun->machine->links, alpha_write_one_linkage, stream);
  8048. /* splay_tree_delete (func->links); */
  8049. }
  8050. }
  8051. /* Switch to an arbitrary section NAME with attributes as specified
  8052. by FLAGS. ALIGN specifies any known alignment requirements for
  8053. the section; 0 if the default should be used. */
  8054. static void
  8055. vms_asm_named_section (const char *name, unsigned int flags,
  8056. tree decl ATTRIBUTE_UNUSED)
  8057. {
  8058. fputc ('\n', asm_out_file);
  8059. fprintf (asm_out_file, ".section\t%s", name);
  8060. if (flags & SECTION_DEBUG)
  8061. fprintf (asm_out_file, ",NOWRT");
  8062. fputc ('\n', asm_out_file);
  8063. }
  8064. /* Record an element in the table of global constructors. SYMBOL is
  8065. a SYMBOL_REF of the function to be called; PRIORITY is a number
  8066. between 0 and MAX_INIT_PRIORITY.
  8067. Differs from default_ctors_section_asm_out_constructor in that the
  8068. width of the .ctors entry is always 64 bits, rather than the 32 bits
  8069. used by a normal pointer. */
  8070. static void
  8071. vms_asm_out_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
  8072. {
  8073. switch_to_section (ctors_section);
  8074. assemble_align (BITS_PER_WORD);
  8075. assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
  8076. }
  8077. static void
  8078. vms_asm_out_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
  8079. {
  8080. switch_to_section (dtors_section);
  8081. assemble_align (BITS_PER_WORD);
  8082. assemble_integer (symbol, UNITS_PER_WORD, BITS_PER_WORD, 1);
  8083. }
  8084. #else
  8085. rtx
  8086. alpha_use_linkage (rtx func ATTRIBUTE_UNUSED,
  8087. bool lflag ATTRIBUTE_UNUSED,
  8088. bool rflag ATTRIBUTE_UNUSED)
  8089. {
  8090. return NULL_RTX;
  8091. }
  8092. #endif /* TARGET_ABI_OPEN_VMS */
  8093. static void
  8094. alpha_init_libfuncs (void)
  8095. {
  8096. if (TARGET_ABI_OPEN_VMS)
  8097. {
  8098. /* Use the VMS runtime library functions for division and
  8099. remainder. */
  8100. set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
  8101. set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
  8102. set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
  8103. set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
  8104. set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
  8105. set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
  8106. set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
  8107. set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
  8108. abort_libfunc = init_one_libfunc ("decc$abort");
  8109. memcmp_libfunc = init_one_libfunc ("decc$memcmp");
  8110. #ifdef MEM_LIBFUNCS_INIT
  8111. MEM_LIBFUNCS_INIT;
  8112. #endif
  8113. }
  8114. }
  8115. /* On the Alpha, we use this to disable the floating-point registers
  8116. when they don't exist. */
  8117. static void
  8118. alpha_conditional_register_usage (void)
  8119. {
  8120. int i;
  8121. if (! TARGET_FPREGS)
  8122. for (i = 32; i < 63; i++)
  8123. fixed_regs[i] = call_used_regs[i] = 1;
  8124. }
  8125. /* Initialize the GCC target structure. */
  8126. #if TARGET_ABI_OPEN_VMS
  8127. # undef TARGET_ATTRIBUTE_TABLE
  8128. # define TARGET_ATTRIBUTE_TABLE vms_attribute_table
  8129. # undef TARGET_CAN_ELIMINATE
  8130. # define TARGET_CAN_ELIMINATE alpha_vms_can_eliminate
  8131. #endif
  8132. #undef TARGET_IN_SMALL_DATA_P
  8133. #define TARGET_IN_SMALL_DATA_P alpha_in_small_data_p
  8134. #undef TARGET_ASM_ALIGNED_HI_OP
  8135. #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
  8136. #undef TARGET_ASM_ALIGNED_DI_OP
  8137. #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
  8138. /* Default unaligned ops are provided for ELF systems. To get unaligned
  8139. data for non-ELF systems, we have to turn off auto alignment. */
  8140. #if TARGET_ABI_OPEN_VMS
  8141. #undef TARGET_ASM_UNALIGNED_HI_OP
  8142. #define TARGET_ASM_UNALIGNED_HI_OP "\t.align 0\n\t.word\t"
  8143. #undef TARGET_ASM_UNALIGNED_SI_OP
  8144. #define TARGET_ASM_UNALIGNED_SI_OP "\t.align 0\n\t.long\t"
  8145. #undef TARGET_ASM_UNALIGNED_DI_OP
  8146. #define TARGET_ASM_UNALIGNED_DI_OP "\t.align 0\n\t.quad\t"
  8147. #endif
  8148. #undef TARGET_ASM_RELOC_RW_MASK
  8149. #define TARGET_ASM_RELOC_RW_MASK alpha_elf_reloc_rw_mask
  8150. #undef TARGET_ASM_SELECT_RTX_SECTION
  8151. #define TARGET_ASM_SELECT_RTX_SECTION alpha_elf_select_rtx_section
  8152. #undef TARGET_SECTION_TYPE_FLAGS
  8153. #define TARGET_SECTION_TYPE_FLAGS alpha_elf_section_type_flags
  8154. #undef TARGET_ASM_FUNCTION_END_PROLOGUE
  8155. #define TARGET_ASM_FUNCTION_END_PROLOGUE alpha_output_function_end_prologue
  8156. #undef TARGET_INIT_LIBFUNCS
  8157. #define TARGET_INIT_LIBFUNCS alpha_init_libfuncs
  8158. #undef TARGET_LEGITIMIZE_ADDRESS
  8159. #define TARGET_LEGITIMIZE_ADDRESS alpha_legitimize_address
  8160. #undef TARGET_ASM_FILE_START
  8161. #define TARGET_ASM_FILE_START alpha_file_start
  8162. #undef TARGET_SCHED_ADJUST_COST
  8163. #define TARGET_SCHED_ADJUST_COST alpha_adjust_cost
  8164. #undef TARGET_SCHED_ISSUE_RATE
  8165. #define TARGET_SCHED_ISSUE_RATE alpha_issue_rate
  8166. #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
  8167. #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
  8168. alpha_multipass_dfa_lookahead
  8169. #undef TARGET_HAVE_TLS
  8170. #define TARGET_HAVE_TLS HAVE_AS_TLS
  8171. #undef TARGET_BUILTIN_DECL
  8172. #define TARGET_BUILTIN_DECL alpha_builtin_decl
  8173. #undef TARGET_INIT_BUILTINS
  8174. #define TARGET_INIT_BUILTINS alpha_init_builtins
  8175. #undef TARGET_EXPAND_BUILTIN
  8176. #define TARGET_EXPAND_BUILTIN alpha_expand_builtin
  8177. #undef TARGET_FOLD_BUILTIN
  8178. #define TARGET_FOLD_BUILTIN alpha_fold_builtin
  8179. #undef TARGET_FUNCTION_OK_FOR_SIBCALL
  8180. #define TARGET_FUNCTION_OK_FOR_SIBCALL alpha_function_ok_for_sibcall
  8181. #undef TARGET_CANNOT_COPY_INSN_P
  8182. #define TARGET_CANNOT_COPY_INSN_P alpha_cannot_copy_insn_p
  8183. #undef TARGET_LEGITIMATE_CONSTANT_P
  8184. #define TARGET_LEGITIMATE_CONSTANT_P alpha_legitimate_constant_p
  8185. #undef TARGET_CANNOT_FORCE_CONST_MEM
  8186. #define TARGET_CANNOT_FORCE_CONST_MEM alpha_cannot_force_const_mem
  8187. #if TARGET_ABI_OSF
  8188. #undef TARGET_ASM_OUTPUT_MI_THUNK
  8189. #define TARGET_ASM_OUTPUT_MI_THUNK alpha_output_mi_thunk_osf
  8190. #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
  8191. #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
  8192. #undef TARGET_STDARG_OPTIMIZE_HOOK
  8193. #define TARGET_STDARG_OPTIMIZE_HOOK alpha_stdarg_optimize_hook
  8194. #endif
  8195. /* Use 16-bits anchor. */
  8196. #undef TARGET_MIN_ANCHOR_OFFSET
  8197. #define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1
  8198. #undef TARGET_MAX_ANCHOR_OFFSET
  8199. #define TARGET_MAX_ANCHOR_OFFSET 0x7fff
  8200. #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
  8201. #define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
  8202. #undef TARGET_RTX_COSTS
  8203. #define TARGET_RTX_COSTS alpha_rtx_costs
  8204. #undef TARGET_ADDRESS_COST
  8205. #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
  8206. #undef TARGET_MACHINE_DEPENDENT_REORG
  8207. #define TARGET_MACHINE_DEPENDENT_REORG alpha_reorg
  8208. #undef TARGET_PROMOTE_FUNCTION_MODE
  8209. #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
  8210. #undef TARGET_PROMOTE_PROTOTYPES
  8211. #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false
  8212. #undef TARGET_RETURN_IN_MEMORY
  8213. #define TARGET_RETURN_IN_MEMORY alpha_return_in_memory
  8214. #undef TARGET_PASS_BY_REFERENCE
  8215. #define TARGET_PASS_BY_REFERENCE alpha_pass_by_reference
  8216. #undef TARGET_SETUP_INCOMING_VARARGS
  8217. #define TARGET_SETUP_INCOMING_VARARGS alpha_setup_incoming_varargs
  8218. #undef TARGET_STRICT_ARGUMENT_NAMING
  8219. #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
  8220. #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
  8221. #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
  8222. #undef TARGET_SPLIT_COMPLEX_ARG
  8223. #define TARGET_SPLIT_COMPLEX_ARG alpha_split_complex_arg
  8224. #undef TARGET_GIMPLIFY_VA_ARG_EXPR
  8225. #define TARGET_GIMPLIFY_VA_ARG_EXPR alpha_gimplify_va_arg
  8226. #undef TARGET_ARG_PARTIAL_BYTES
  8227. #define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
  8228. #undef TARGET_FUNCTION_ARG
  8229. #define TARGET_FUNCTION_ARG alpha_function_arg
  8230. #undef TARGET_FUNCTION_ARG_ADVANCE
  8231. #define TARGET_FUNCTION_ARG_ADVANCE alpha_function_arg_advance
  8232. #undef TARGET_TRAMPOLINE_INIT
  8233. #define TARGET_TRAMPOLINE_INIT alpha_trampoline_init
  8234. #undef TARGET_INSTANTIATE_DECLS
  8235. #define TARGET_INSTANTIATE_DECLS alpha_instantiate_decls
  8236. #undef TARGET_SECONDARY_RELOAD
  8237. #define TARGET_SECONDARY_RELOAD alpha_secondary_reload
  8238. #undef TARGET_SCALAR_MODE_SUPPORTED_P
  8239. #define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
  8240. #undef TARGET_VECTOR_MODE_SUPPORTED_P
  8241. #define TARGET_VECTOR_MODE_SUPPORTED_P alpha_vector_mode_supported_p
  8242. #undef TARGET_BUILD_BUILTIN_VA_LIST
  8243. #define TARGET_BUILD_BUILTIN_VA_LIST alpha_build_builtin_va_list
  8244. #undef TARGET_EXPAND_BUILTIN_VA_START
  8245. #define TARGET_EXPAND_BUILTIN_VA_START alpha_va_start
  8246. /* The Alpha architecture does not require sequential consistency. See
  8247. http://www.cs.umd.edu/~pugh/java/memoryModel/AlphaReordering.html
  8248. for an example of how it can be violated in practice. */
  8249. #undef TARGET_RELAXED_ORDERING
  8250. #define TARGET_RELAXED_ORDERING true
  8251. #undef TARGET_OPTION_OVERRIDE
  8252. #define TARGET_OPTION_OVERRIDE alpha_option_override
  8253. #ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
  8254. #undef TARGET_MANGLE_TYPE
  8255. #define TARGET_MANGLE_TYPE alpha_mangle_type
  8256. #endif
  8257. #undef TARGET_LEGITIMATE_ADDRESS_P
  8258. #define TARGET_LEGITIMATE_ADDRESS_P alpha_legitimate_address_p
  8259. #undef TARGET_CONDITIONAL_REGISTER_USAGE
  8260. #define TARGET_CONDITIONAL_REGISTER_USAGE alpha_conditional_register_usage
  8261. struct gcc_target targetm = TARGET_INITIALIZER;
  8262. #include "gt-alpha.h"