PageRenderTime 54ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 0ms

/GCC/gcc/sreal.c

https://github.com/gatoatigrado/gccxmlclone
C | 544 lines | 414 code | 53 blank | 77 comment | 62 complexity | 6a56ac3bbfe2a815de336d7a27c1cbdf MD5 | raw file
Possible License(s): GPL-2.0
  1. /* Simple data type for positive real numbers for the GNU compiler.
  2. Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
  3. This file is part of GCC.
  4. GCC is free software; you can redistribute it and/or modify it under
  5. the terms of the GNU General Public License as published by the Free
  6. Software Foundation; either version 2, or (at your option) any later
  7. version.
  8. GCC is distributed in the hope that it will be useful, but WITHOUT ANY
  9. WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  11. for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with GCC; see the file COPYING. If not, write to the Free
  14. Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
  15. 02110-1301, USA. */
  16. /* This library supports positive real numbers and 0;
  17. inf and nan are NOT supported.
  18. It is written to be simple and fast.
  19. Value of sreal is
  20. x = sig * 2 ^ exp
  21. where
  22. sig = significant
  23. (for < 64-bit machines sig = sig_lo + sig_hi * 2 ^ SREAL_PART_BITS)
  24. exp = exponent
  25. One HOST_WIDE_INT is used for the significant on 64-bit (and more than
  26. 64-bit) machines,
  27. otherwise two HOST_WIDE_INTs are used for the significant.
  28. Only a half of significant bits is used (in normalized sreals) so that we do
  29. not have problems with overflow, for example when c->sig = a->sig * b->sig.
  30. So the precision for 64-bit and 32-bit machines is 32-bit.
  31. Invariant: The numbers are normalized before and after each call of sreal_*.
  32. Normalized sreals:
  33. All numbers (except zero) meet following conditions:
  34. SREAL_MIN_SIG <= sig && sig <= SREAL_MAX_SIG
  35. -SREAL_MAX_EXP <= exp && exp <= SREAL_MAX_EXP
  36. If the number would be too large, it is set to upper bounds of these
  37. conditions.
  38. If the number is zero or would be too small it meets following conditions:
  39. sig == 0 && exp == -SREAL_MAX_EXP
  40. */
  41. #include "config.h"
  42. #include "system.h"
  43. #include "coretypes.h"
  44. #include "tm.h"
  45. #include "sreal.h"
  46. static inline void copy (sreal *, sreal *);
  47. static inline void shift_right (sreal *, int);
  48. static void normalize (sreal *);
  49. /* Print the content of struct sreal. */
  50. void
  51. dump_sreal (FILE *file, sreal *x)
  52. {
  53. #if SREAL_PART_BITS < 32
  54. fprintf (file, "((" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^16 + "
  55. HOST_WIDE_INT_PRINT_UNSIGNED ") * 2^%d)",
  56. x->sig_hi, x->sig_lo, x->exp);
  57. #else
  58. fprintf (file, "(" HOST_WIDE_INT_PRINT_UNSIGNED " * 2^%d)", x->sig, x->exp);
  59. #endif
  60. }
  61. /* Copy the sreal number. */
  62. static inline void
  63. copy (sreal *r, sreal *a)
  64. {
  65. #if SREAL_PART_BITS < 32
  66. r->sig_lo = a->sig_lo;
  67. r->sig_hi = a->sig_hi;
  68. #else
  69. r->sig = a->sig;
  70. #endif
  71. r->exp = a->exp;
  72. }
  73. /* Shift X right by S bits. Needed: 0 < S <= SREAL_BITS.
  74. When the most significant bit shifted out is 1, add 1 to X (rounding). */
  75. static inline void
  76. shift_right (sreal *x, int s)
  77. {
  78. gcc_assert (s > 0);
  79. gcc_assert (s <= SREAL_BITS);
  80. /* Exponent should never be so large because shift_right is used only by
  81. sreal_add and sreal_sub ant thus the number cannot be shifted out from
  82. exponent range. */
  83. gcc_assert (x->exp + s <= SREAL_MAX_EXP);
  84. x->exp += s;
  85. #if SREAL_PART_BITS < 32
  86. if (s > SREAL_PART_BITS)
  87. {
  88. s -= SREAL_PART_BITS;
  89. x->sig_hi += (uhwi) 1 << (s - 1);
  90. x->sig_lo = x->sig_hi >> s;
  91. x->sig_hi = 0;
  92. }
  93. else
  94. {
  95. x->sig_lo += (uhwi) 1 << (s - 1);
  96. if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
  97. {
  98. x->sig_hi++;
  99. x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
  100. }
  101. x->sig_lo >>= s;
  102. x->sig_lo |= (x->sig_hi & (((uhwi) 1 << s) - 1)) << (SREAL_PART_BITS - s);
  103. x->sig_hi >>= s;
  104. }
  105. #else
  106. x->sig += (uhwi) 1 << (s - 1);
  107. x->sig >>= s;
  108. #endif
  109. }
  110. /* Normalize *X. */
  111. static void
  112. normalize (sreal *x)
  113. {
  114. #if SREAL_PART_BITS < 32
  115. int shift;
  116. HOST_WIDE_INT mask;
  117. if (x->sig_lo == 0 && x->sig_hi == 0)
  118. {
  119. x->exp = -SREAL_MAX_EXP;
  120. }
  121. else if (x->sig_hi < SREAL_MIN_SIG)
  122. {
  123. if (x->sig_hi == 0)
  124. {
  125. /* Move lower part of significant to higher part. */
  126. x->sig_hi = x->sig_lo;
  127. x->sig_lo = 0;
  128. x->exp -= SREAL_PART_BITS;
  129. }
  130. shift = 0;
  131. while (x->sig_hi < SREAL_MIN_SIG)
  132. {
  133. x->sig_hi <<= 1;
  134. x->exp--;
  135. shift++;
  136. }
  137. /* Check underflow. */
  138. if (x->exp < -SREAL_MAX_EXP)
  139. {
  140. x->exp = -SREAL_MAX_EXP;
  141. x->sig_hi = 0;
  142. x->sig_lo = 0;
  143. }
  144. else if (shift)
  145. {
  146. mask = (1 << SREAL_PART_BITS) - (1 << (SREAL_PART_BITS - shift));
  147. x->sig_hi |= (x->sig_lo & mask) >> (SREAL_PART_BITS - shift);
  148. x->sig_lo = (x->sig_lo << shift) & (((uhwi) 1 << SREAL_PART_BITS) - 1);
  149. }
  150. }
  151. else if (x->sig_hi > SREAL_MAX_SIG)
  152. {
  153. unsigned HOST_WIDE_INT tmp = x->sig_hi;
  154. /* Find out how many bits will be shifted. */
  155. shift = 0;
  156. do
  157. {
  158. tmp >>= 1;
  159. shift++;
  160. }
  161. while (tmp > SREAL_MAX_SIG);
  162. /* Round the number. */
  163. x->sig_lo += (uhwi) 1 << (shift - 1);
  164. x->sig_lo >>= shift;
  165. x->sig_lo += ((x->sig_hi & (((uhwi) 1 << shift) - 1))
  166. << (SREAL_PART_BITS - shift));
  167. x->sig_hi >>= shift;
  168. x->exp += shift;
  169. if (x->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
  170. {
  171. x->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
  172. x->sig_hi++;
  173. if (x->sig_hi > SREAL_MAX_SIG)
  174. {
  175. /* x->sig_hi was SREAL_MAX_SIG before increment
  176. so now last bit is zero. */
  177. x->sig_hi >>= 1;
  178. x->sig_lo >>= 1;
  179. x->exp++;
  180. }
  181. }
  182. /* Check overflow. */
  183. if (x->exp > SREAL_MAX_EXP)
  184. {
  185. x->exp = SREAL_MAX_EXP;
  186. x->sig_hi = SREAL_MAX_SIG;
  187. x->sig_lo = SREAL_MAX_SIG;
  188. }
  189. }
  190. #else
  191. if (x->sig == 0)
  192. {
  193. x->exp = -SREAL_MAX_EXP;
  194. }
  195. else if (x->sig < SREAL_MIN_SIG)
  196. {
  197. do
  198. {
  199. x->sig <<= 1;
  200. x->exp--;
  201. }
  202. while (x->sig < SREAL_MIN_SIG);
  203. /* Check underflow. */
  204. if (x->exp < -SREAL_MAX_EXP)
  205. {
  206. x->exp = -SREAL_MAX_EXP;
  207. x->sig = 0;
  208. }
  209. }
  210. else if (x->sig > SREAL_MAX_SIG)
  211. {
  212. int last_bit;
  213. do
  214. {
  215. last_bit = x->sig & 1;
  216. x->sig >>= 1;
  217. x->exp++;
  218. }
  219. while (x->sig > SREAL_MAX_SIG);
  220. /* Round the number. */
  221. x->sig += last_bit;
  222. if (x->sig > SREAL_MAX_SIG)
  223. {
  224. x->sig >>= 1;
  225. x->exp++;
  226. }
  227. /* Check overflow. */
  228. if (x->exp > SREAL_MAX_EXP)
  229. {
  230. x->exp = SREAL_MAX_EXP;
  231. x->sig = SREAL_MAX_SIG;
  232. }
  233. }
  234. #endif
  235. }
  236. /* Set *R to SIG * 2 ^ EXP. Return R. */
  237. sreal *
  238. sreal_init (sreal *r, unsigned HOST_WIDE_INT sig, signed int exp)
  239. {
  240. #if SREAL_PART_BITS < 32
  241. r->sig_lo = 0;
  242. r->sig_hi = sig;
  243. r->exp = exp - 16;
  244. #else
  245. r->sig = sig;
  246. r->exp = exp;
  247. #endif
  248. normalize (r);
  249. return r;
  250. }
  251. /* Return integer value of *R. */
  252. HOST_WIDE_INT
  253. sreal_to_int (sreal *r)
  254. {
  255. #if SREAL_PART_BITS < 32
  256. if (r->exp <= -SREAL_BITS)
  257. return 0;
  258. if (r->exp >= 0)
  259. return MAX_HOST_WIDE_INT;
  260. return ((r->sig_hi << SREAL_PART_BITS) + r->sig_lo) >> -r->exp;
  261. #else
  262. if (r->exp <= -SREAL_BITS)
  263. return 0;
  264. if (r->exp >= SREAL_PART_BITS)
  265. return MAX_HOST_WIDE_INT;
  266. if (r->exp > 0)
  267. return r->sig << r->exp;
  268. if (r->exp < 0)
  269. return r->sig >> -r->exp;
  270. return r->sig;
  271. #endif
  272. }
  273. /* Compare *A and *B. Return -1 if *A < *B, 1 if *A > *B and 0 if *A == *B. */
  274. int
  275. sreal_compare (sreal *a, sreal *b)
  276. {
  277. if (a->exp > b->exp)
  278. return 1;
  279. if (a->exp < b->exp)
  280. return -1;
  281. #if SREAL_PART_BITS < 32
  282. if (a->sig_hi > b->sig_hi)
  283. return 1;
  284. if (a->sig_hi < b->sig_hi)
  285. return -1;
  286. if (a->sig_lo > b->sig_lo)
  287. return 1;
  288. if (a->sig_lo < b->sig_lo)
  289. return -1;
  290. #else
  291. if (a->sig > b->sig)
  292. return 1;
  293. if (a->sig < b->sig)
  294. return -1;
  295. #endif
  296. return 0;
  297. }
  298. /* *R = *A + *B. Return R. */
  299. sreal *
  300. sreal_add (sreal *r, sreal *a, sreal *b)
  301. {
  302. int dexp;
  303. sreal tmp;
  304. sreal *bb;
  305. if (sreal_compare (a, b) < 0)
  306. {
  307. sreal *swap;
  308. swap = a;
  309. a = b;
  310. b = swap;
  311. }
  312. dexp = a->exp - b->exp;
  313. r->exp = a->exp;
  314. if (dexp > SREAL_BITS)
  315. {
  316. #if SREAL_PART_BITS < 32
  317. r->sig_hi = a->sig_hi;
  318. r->sig_lo = a->sig_lo;
  319. #else
  320. r->sig = a->sig;
  321. #endif
  322. return r;
  323. }
  324. if (dexp == 0)
  325. bb = b;
  326. else
  327. {
  328. copy (&tmp, b);
  329. shift_right (&tmp, dexp);
  330. bb = &tmp;
  331. }
  332. #if SREAL_PART_BITS < 32
  333. r->sig_hi = a->sig_hi + bb->sig_hi;
  334. r->sig_lo = a->sig_lo + bb->sig_lo;
  335. if (r->sig_lo & ((uhwi) 1 << SREAL_PART_BITS))
  336. {
  337. r->sig_hi++;
  338. r->sig_lo -= (uhwi) 1 << SREAL_PART_BITS;
  339. }
  340. #else
  341. r->sig = a->sig + bb->sig;
  342. #endif
  343. normalize (r);
  344. return r;
  345. }
  346. /* *R = *A - *B. Return R. */
  347. sreal *
  348. sreal_sub (sreal *r, sreal *a, sreal *b)
  349. {
  350. int dexp;
  351. sreal tmp;
  352. sreal *bb;
  353. gcc_assert (sreal_compare (a, b) >= 0);
  354. dexp = a->exp - b->exp;
  355. r->exp = a->exp;
  356. if (dexp > SREAL_BITS)
  357. {
  358. #if SREAL_PART_BITS < 32
  359. r->sig_hi = a->sig_hi;
  360. r->sig_lo = a->sig_lo;
  361. #else
  362. r->sig = a->sig;
  363. #endif
  364. return r;
  365. }
  366. if (dexp == 0)
  367. bb = b;
  368. else
  369. {
  370. copy (&tmp, b);
  371. shift_right (&tmp, dexp);
  372. bb = &tmp;
  373. }
  374. #if SREAL_PART_BITS < 32
  375. if (a->sig_lo < bb->sig_lo)
  376. {
  377. r->sig_hi = a->sig_hi - bb->sig_hi - 1;
  378. r->sig_lo = a->sig_lo + ((uhwi) 1 << SREAL_PART_BITS) - bb->sig_lo;
  379. }
  380. else
  381. {
  382. r->sig_hi = a->sig_hi - bb->sig_hi;
  383. r->sig_lo = a->sig_lo - bb->sig_lo;
  384. }
  385. #else
  386. r->sig = a->sig - bb->sig;
  387. #endif
  388. normalize (r);
  389. return r;
  390. }
  391. /* *R = *A * *B. Return R. */
  392. sreal *
  393. sreal_mul (sreal *r, sreal *a, sreal *b)
  394. {
  395. #if SREAL_PART_BITS < 32
  396. if (a->sig_hi < SREAL_MIN_SIG || b->sig_hi < SREAL_MIN_SIG)
  397. {
  398. r->sig_lo = 0;
  399. r->sig_hi = 0;
  400. r->exp = -SREAL_MAX_EXP;
  401. }
  402. else
  403. {
  404. unsigned HOST_WIDE_INT tmp1, tmp2, tmp3;
  405. if (sreal_compare (a, b) < 0)
  406. {
  407. sreal *swap;
  408. swap = a;
  409. a = b;
  410. b = swap;
  411. }
  412. r->exp = a->exp + b->exp + SREAL_PART_BITS;
  413. tmp1 = a->sig_lo * b->sig_lo;
  414. tmp2 = a->sig_lo * b->sig_hi;
  415. tmp3 = a->sig_hi * b->sig_lo + (tmp1 >> SREAL_PART_BITS);
  416. r->sig_hi = a->sig_hi * b->sig_hi;
  417. r->sig_hi += (tmp2 >> SREAL_PART_BITS) + (tmp3 >> SREAL_PART_BITS);
  418. tmp2 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
  419. tmp3 &= ((uhwi) 1 << SREAL_PART_BITS) - 1;
  420. tmp1 = tmp2 + tmp3;
  421. r->sig_lo = tmp1 & (((uhwi) 1 << SREAL_PART_BITS) - 1);
  422. r->sig_hi += tmp1 >> SREAL_PART_BITS;
  423. normalize (r);
  424. }
  425. #else
  426. if (a->sig < SREAL_MIN_SIG || b->sig < SREAL_MIN_SIG)
  427. {
  428. r->sig = 0;
  429. r->exp = -SREAL_MAX_EXP;
  430. }
  431. else
  432. {
  433. r->sig = a->sig * b->sig;
  434. r->exp = a->exp + b->exp;
  435. normalize (r);
  436. }
  437. #endif
  438. return r;
  439. }
  440. /* *R = *A / *B. Return R. */
  441. sreal *
  442. sreal_div (sreal *r, sreal *a, sreal *b)
  443. {
  444. #if SREAL_PART_BITS < 32
  445. unsigned HOST_WIDE_INT tmp, tmp1, tmp2;
  446. gcc_assert (b->sig_hi >= SREAL_MIN_SIG);
  447. if (a->sig_hi < SREAL_MIN_SIG)
  448. {
  449. r->sig_hi = 0;
  450. r->sig_lo = 0;
  451. r->exp = -SREAL_MAX_EXP;
  452. }
  453. else
  454. {
  455. /* Since division by the whole number is pretty ugly to write
  456. we are dividing by first 3/4 of bits of number. */
  457. tmp1 = (a->sig_hi << SREAL_PART_BITS) + a->sig_lo;
  458. tmp2 = ((b->sig_hi << (SREAL_PART_BITS / 2))
  459. + (b->sig_lo >> (SREAL_PART_BITS / 2)));
  460. if (b->sig_lo & ((uhwi) 1 << ((SREAL_PART_BITS / 2) - 1)))
  461. tmp2++;
  462. r->sig_lo = 0;
  463. tmp = tmp1 / tmp2;
  464. tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
  465. r->sig_hi = tmp << SREAL_PART_BITS;
  466. tmp = tmp1 / tmp2;
  467. tmp1 = (tmp1 % tmp2) << (SREAL_PART_BITS / 2);
  468. r->sig_hi += tmp << (SREAL_PART_BITS / 2);
  469. tmp = tmp1 / tmp2;
  470. r->sig_hi += tmp;
  471. r->exp = a->exp - b->exp - SREAL_BITS - SREAL_PART_BITS / 2;
  472. normalize (r);
  473. }
  474. #else
  475. gcc_assert (b->sig != 0);
  476. r->sig = (a->sig << SREAL_PART_BITS) / b->sig;
  477. r->exp = a->exp - b->exp - SREAL_PART_BITS;
  478. normalize (r);
  479. #endif
  480. return r;
  481. }