PageRenderTime 55ms CodeModel.GetById 25ms RepoModel.GetById 1ms app.codeStats 0ms

/mpz/powm.c

https://bitbucket.org/mgundes/mpir-mirror
C | 428 lines | 322 code | 36 blank | 70 comment | 77 complexity | cf7bf78e91b5845d228ebcb4a0dd683f MD5 | raw file
Possible License(s): BSD-3-Clause, LGPL-2.0, GPL-3.0, LGPL-3.0, GPL-2.0, LGPL-2.1
  1. /* mpz_powm(res,base,exp,mod) -- Set RES to (base**exp) mod MOD.
  2. Copyright 1991, 1993, 1994, 1996, 1997, 2000, 2001, 2002, 2005 Free Software
  3. Foundation, Inc. Contributed by Paul Zimmermann.
  4. This file is part of the GNU MP Library.
  5. The GNU MP Library is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU Lesser General Public License as published by
  7. the Free Software Foundation; either version 2.1 of the License, or (at your
  8. option) any later version.
  9. The GNU MP Library is distributed in the hope that it will be useful, but
  10. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  11. or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
  12. License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with the GNU MP Library; see the file COPYING.LIB. If not, write to
  15. the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
  16. MA 02110-1301, USA. */
  17. #include "mpir.h"
  18. #include "gmp-impl.h"
  19. #include "longlong.h"
  20. /* Compute t = a mod m, a is defined by (ap,an), m is defined by (mp,mn), and
  21. t is defined by (tp,mn). */
  22. static void
  23. reduce (mp_ptr tp, mp_srcptr ap, mp_size_t an, mp_srcptr mp, mp_size_t mn)
  24. {
  25. mp_ptr qp;
  26. TMP_DECL;
  27. TMP_MARK;
  28. qp = TMP_ALLOC_LIMBS (an - mn + 1);
  29. mpn_tdiv_qr (qp, tp, 0L, ap, an, mp, mn);
  30. TMP_FREE;
  31. }
  32. #if REDUCE_EXPONENT
  33. /* Return the group order of the ring mod m. */
  34. static mp_limb_t
  35. phi (mp_limb_t t)
  36. {
  37. mp_limb_t d, m, go;
  38. go = 1;
  39. if (t % 2 == 0)
  40. {
  41. t = t / 2;
  42. while (t % 2 == 0)
  43. {
  44. go *= 2;
  45. t = t / 2;
  46. }
  47. }
  48. for (d = 3;; d += 2)
  49. {
  50. m = d - 1;
  51. for (;;)
  52. {
  53. unsigned long int q = t / d;
  54. if (q < d)
  55. {
  56. if (t <= 1)
  57. return go;
  58. if (t == d)
  59. return go * m;
  60. return go * (t - 1);
  61. }
  62. if (t != q * d)
  63. break;
  64. go *= m;
  65. m = d;
  66. t = q;
  67. }
  68. }
  69. }
  70. #endif
  71. /* average number of calls to redc for an exponent of n bits
  72. with the sliding window algorithm of base 2^k: the optimal is
  73. obtained for the value of k which minimizes 2^(k-1)+n/(k+1):
  74. n\k 4 5 6 7 8
  75. 128 156* 159 171 200 261
  76. 256 309 307* 316 343 403
  77. 512 617 607* 610 632 688
  78. 1024 1231 1204 1195* 1207 1256
  79. 2048 2461 2399 2366 2360* 2396
  80. 4096 4918 4787 4707 4665* 4670
  81. */
  82. /* Use REDC instead of usual reduction for sizes < POWM_THRESHOLD. In REDC
  83. each modular multiplication costs about 2*n^2 limbs operations, whereas
  84. using usual reduction it costs 3*K(n), where K(n) is the cost of a
  85. multiplication using Karatsuba, and a division is assumed to cost 2*K(n),
  86. for example using Burnikel-Ziegler's algorithm. This gives a theoretical
  87. threshold of a*SQR_KARATSUBA_THRESHOLD, with a=(3/2)^(1/(2-ln(3)/ln(2))) ~
  88. 2.66. */
  89. /* For now, also disable REDC when MOD is even, as the inverse can't handle
  90. that. At some point, we might want to make the code faster for that case,
  91. perhaps using CRR. */
  92. #ifndef POWM_THRESHOLD
  93. #define POWM_THRESHOLD ((8 * SQR_KARATSUBA_THRESHOLD) / 3)
  94. #endif
  95. #define HANDLE_NEGATIVE_EXPONENT 1
  96. #undef REDUCE_EXPONENT
  97. void
  98. mpz_powm (mpz_ptr r, mpz_srcptr b, mpz_srcptr e, mpz_srcptr m)
  99. {
  100. mp_ptr xp, tp, qp, gp, this_gp;
  101. mp_srcptr bp, ep, mp;
  102. mp_size_t bn, es, en, mn, xn;
  103. mp_limb_t invm, c;
  104. unsigned long int enb;
  105. mp_size_t i, K, j, l, k;
  106. int m_zero_cnt, e_zero_cnt;
  107. int sh;
  108. int use_redc;
  109. #if HANDLE_NEGATIVE_EXPONENT
  110. mpz_t new_b;
  111. #endif
  112. #if REDUCE_EXPONENT
  113. mpz_t new_e;
  114. #endif
  115. TMP_DECL;
  116. mp = PTR(m);
  117. mn = ABSIZ (m);
  118. if (mn == 0)
  119. DIVIDE_BY_ZERO;
  120. TMP_MARK;
  121. es = SIZ (e);
  122. if (es <= 0)
  123. {
  124. if (es == 0)
  125. {
  126. /* Exponent is zero, result is 1 mod m, i.e., 1 or 0 depending on if
  127. m equals 1. */
  128. SIZ(r) = (mn == 1 && mp[0] == 1) ? 0 : 1;
  129. PTR(r)[0] = 1;
  130. TMP_FREE; /* we haven't really allocated anything here */
  131. return;
  132. }
  133. #if HANDLE_NEGATIVE_EXPONENT
  134. MPZ_TMP_INIT (new_b, mn + 1);
  135. if (! mpz_invert (new_b, b, m))
  136. DIVIDE_BY_ZERO;
  137. b = new_b;
  138. es = -es;
  139. #else
  140. DIVIDE_BY_ZERO;
  141. #endif
  142. }
  143. en = es;
  144. #if REDUCE_EXPONENT
  145. /* Reduce exponent by dividing it by phi(m) when m small. */
  146. if (mn == 1 && mp[0] < 0x7fffffffL && en * GMP_NUMB_BITS > 150)
  147. {
  148. MPZ_TMP_INIT (new_e, 2);
  149. mpz_mod_ui (new_e, e, phi (mp[0]));
  150. e = new_e;
  151. }
  152. #endif
  153. use_redc = mn < POWM_THRESHOLD && mp[0] % 2 != 0;
  154. if (use_redc)
  155. {
  156. /* invm = -1/m mod 2^BITS_PER_MP_LIMB, must have m odd */
  157. modlimb_invert (invm, mp[0]);
  158. invm = -invm;
  159. }
  160. else
  161. {
  162. /* Normalize m (i.e. make its most significant bit set) as required by
  163. division functions below. */
  164. count_leading_zeros (m_zero_cnt, mp[mn - 1]);
  165. m_zero_cnt -= GMP_NAIL_BITS;
  166. if (m_zero_cnt != 0)
  167. {
  168. mp_ptr new_mp;
  169. new_mp = TMP_ALLOC_LIMBS (mn);
  170. mpn_lshift (new_mp, mp, mn, m_zero_cnt);
  171. mp = new_mp;
  172. }
  173. }
  174. /* Determine optimal value of k, the number of exponent bits we look at
  175. at a time. */
  176. count_leading_zeros (e_zero_cnt, PTR(e)[en - 1]);
  177. e_zero_cnt -= GMP_NAIL_BITS;
  178. enb = en * GMP_NUMB_BITS - e_zero_cnt; /* number of bits of exponent */
  179. k = 1;
  180. K = 2;
  181. while (2 * enb > K * (2 + k * (3 + k)))
  182. {
  183. k++;
  184. K *= 2;
  185. if (k == 10) /* cap allocation */
  186. break;
  187. }
  188. tp = TMP_ALLOC_LIMBS (2 * mn);
  189. qp = TMP_ALLOC_LIMBS (mn + 1);
  190. gp = __GMP_ALLOCATE_FUNC_LIMBS (K / 2 * mn);
  191. /* Compute x*R^n where R=2^BITS_PER_MP_LIMB. */
  192. bn = ABSIZ (b);
  193. bp = PTR(b);
  194. /* Handle |b| >= m by computing b mod m. FIXME: It is not strictly necessary
  195. for speed or correctness to do this when b and m have the same number of
  196. limbs, perhaps remove mpn_cmp call. */
  197. if (bn > mn || (bn == mn && mpn_cmp (bp, mp, mn) >= 0))
  198. {
  199. /* Reduce possibly huge base while moving it to gp[0]. Use a function
  200. call to reduce, since we don't want the quotient allocation to
  201. live until function return. */
  202. if (use_redc)
  203. {
  204. reduce (tp + mn, bp, bn, mp, mn); /* b mod m */
  205. MPN_ZERO (tp, mn);
  206. mpn_tdiv_qr (qp, gp, 0L, tp, 2 * mn, mp, mn); /* unnormnalized! */
  207. }
  208. else
  209. {
  210. reduce (gp, bp, bn, mp, mn);
  211. }
  212. }
  213. else
  214. {
  215. /* |b| < m. We pad out operands to become mn limbs, which simplifies
  216. the rest of the function, but slows things down when the |b| << m. */
  217. if (use_redc)
  218. {
  219. MPN_ZERO (tp, mn);
  220. MPN_COPY (tp + mn, bp, bn);
  221. MPN_ZERO (tp + mn + bn, mn - bn);
  222. mpn_tdiv_qr (qp, gp, 0L, tp, 2 * mn, mp, mn);
  223. }
  224. else
  225. {
  226. MPN_COPY (gp, bp, bn);
  227. MPN_ZERO (gp + bn, mn - bn);
  228. }
  229. }
  230. /* Compute xx^i for odd g < 2^i. */
  231. xp = TMP_ALLOC_LIMBS (mn);
  232. mpn_sqr (tp, gp, mn);
  233. if (use_redc)
  234. mpn_redc_1 (xp, tp, mp, mn, invm); /* xx = x^2*R^n */
  235. else
  236. mpn_tdiv_qr (qp, xp, 0L, tp, 2 * mn, mp, mn);
  237. this_gp = gp;
  238. for (i = 1; i < K / 2; i++)
  239. {
  240. mpn_mul_n (tp, this_gp, xp, mn);
  241. this_gp += mn;
  242. if (use_redc)
  243. mpn_redc_1 (this_gp,tp, mp, mn, invm); /* g[i] = x^(2i+1)*R^n */
  244. else
  245. mpn_tdiv_qr (qp, this_gp, 0L, tp, 2 * mn, mp, mn);
  246. }
  247. /* Start the real stuff. */
  248. ep = PTR (e);
  249. i = en - 1; /* current index */
  250. c = ep[i]; /* current limb */
  251. sh = GMP_NUMB_BITS - e_zero_cnt; /* significant bits in ep[i] */
  252. sh -= k; /* index of lower bit of ep[i] to take into account */
  253. if (sh < 0)
  254. { /* k-sh extra bits are needed */
  255. if (i > 0)
  256. {
  257. i--;
  258. c <<= (-sh);
  259. sh += GMP_NUMB_BITS;
  260. c |= ep[i] >> sh;
  261. }
  262. }
  263. else
  264. c >>= sh;
  265. for (j = 0; c % 2 == 0; j++)
  266. c >>= 1;
  267. MPN_COPY (xp, gp + mn * (c >> 1), mn);
  268. while (--j >= 0)
  269. {
  270. mpn_sqr (tp, xp, mn);
  271. if (use_redc)
  272. mpn_redc_1 (xp,tp, mp, mn, invm);
  273. else
  274. mpn_tdiv_qr (qp, xp, 0L, tp, 2 * mn, mp, mn);
  275. }
  276. while (i > 0 || sh > 0)
  277. {
  278. c = ep[i];
  279. l = k; /* number of bits treated */
  280. sh -= l;
  281. if (sh < 0)
  282. {
  283. if (i > 0)
  284. {
  285. i--;
  286. c <<= (-sh);
  287. sh += GMP_NUMB_BITS;
  288. c |= ep[i] >> sh;
  289. }
  290. else
  291. {
  292. l += sh; /* last chunk of bits from e; l < k */
  293. }
  294. }
  295. else
  296. c >>= sh;
  297. c &= ((mp_limb_t) 1 << l) - 1;
  298. /* This while loop implements the sliding window improvement--loop while
  299. the most significant bit of c is zero, squaring xx as we go. */
  300. while ((c >> (l - 1)) == 0 && (i > 0 || sh > 0))
  301. {
  302. mpn_sqr (tp, xp, mn);
  303. if (use_redc)
  304. mpn_redc_1 (xp, tp,mp, mn, invm);
  305. else
  306. mpn_tdiv_qr (qp, xp, 0L, tp, 2 * mn, mp, mn);
  307. if (sh != 0)
  308. {
  309. sh--;
  310. c = (c << 1) + ((ep[i] >> sh) & 1);
  311. }
  312. else
  313. {
  314. i--;
  315. sh = GMP_NUMB_BITS - 1;
  316. c = (c << 1) + (ep[i] >> sh);
  317. }
  318. }
  319. /* Replace xx by xx^(2^l)*x^c. */
  320. if (c != 0)
  321. {
  322. for (j = 0; c % 2 == 0; j++)
  323. c >>= 1;
  324. /* c0 = c * 2^j, i.e. xx^(2^l)*x^c = (A^(2^(l - j))*c)^(2^j) */
  325. l -= j;
  326. while (--l >= 0)
  327. {
  328. mpn_sqr (tp, xp, mn);
  329. if (use_redc)
  330. mpn_redc_1 (xp, tp,mp, mn, invm);
  331. else
  332. mpn_tdiv_qr (qp, xp, 0L, tp, 2 * mn, mp, mn);
  333. }
  334. mpn_mul_n (tp, xp, gp + mn * (c >> 1), mn);
  335. if (use_redc)
  336. mpn_redc_1 (xp,tp, mp, mn, invm);
  337. else
  338. mpn_tdiv_qr (qp, xp, 0L, tp, 2 * mn, mp, mn);
  339. }
  340. else
  341. j = l; /* case c=0 */
  342. while (--j >= 0)
  343. {
  344. mpn_sqr (tp, xp, mn);
  345. if (use_redc)
  346. mpn_redc_1 (xp, tp, mp, mn, invm);
  347. else
  348. mpn_tdiv_qr (qp, xp, 0L, tp, 2 * mn, mp, mn);
  349. }
  350. }
  351. if (use_redc)
  352. {
  353. /* Convert back xx to xx/R^n. */
  354. MPN_COPY (tp, xp, mn);
  355. MPN_ZERO (tp + mn, mn);
  356. mpn_redc_1 (xp,tp, mp, mn, invm);
  357. if (mpn_cmp (xp, mp, mn) >= 0)
  358. mpn_sub_n (xp, xp, mp, mn);
  359. }
  360. else
  361. {
  362. if (m_zero_cnt != 0)
  363. {
  364. mp_limb_t cy;
  365. cy = mpn_lshift (tp, xp, mn, m_zero_cnt);
  366. tp[mn] = cy;
  367. mpn_tdiv_qr (qp, xp, 0L, tp, mn + (cy != 0), mp, mn);
  368. mpn_rshift (xp, xp, mn, m_zero_cnt);
  369. }
  370. }
  371. xn = mn;
  372. MPN_NORMALIZE (xp, xn);
  373. if ((ep[0] & 1) && SIZ(b) < 0 && xn != 0)
  374. {
  375. mp = PTR(m); /* want original, unnormalized m */
  376. mpn_sub (xp, mp, mn, xp, xn);
  377. xn = mn;
  378. MPN_NORMALIZE (xp, xn);
  379. }
  380. MPZ_REALLOC (r, xn);
  381. SIZ (r) = xn;
  382. MPN_COPY (PTR(r), xp, xn);
  383. __GMP_FREE_FUNC_LIMBS (gp, K / 2 * mn);
  384. TMP_FREE;
  385. }