PageRenderTime 79ms CodeModel.GetById 13ms RepoModel.GetById 0ms app.codeStats 1ms

/src/FreeImage/Source/LibJPEG/jidctint.c

https://bitbucket.org/cabalistic/ogredeps/
C | 5137 lines | 3541 code | 880 blank | 716 comment | 106 complexity | 4ea27dba5316cdba15d8294ad0364aa8 MD5 | raw file
Possible License(s): LGPL-3.0, BSD-3-Clause, CPL-1.0, Unlicense, GPL-2.0, GPL-3.0, LGPL-2.0, MPL-2.0-no-copyleft-exception, BSD-2-Clause, LGPL-2.1
  1. /*
  2. * jidctint.c
  3. *
  4. * Copyright (C) 1991-1998, Thomas G. Lane.
  5. * Modification developed 2002-2009 by Guido Vollbeding.
  6. * This file is part of the Independent JPEG Group's software.
  7. * For conditions of distribution and use, see the accompanying README file.
  8. *
  9. * This file contains a slow-but-accurate integer implementation of the
  10. * inverse DCT (Discrete Cosine Transform). In the IJG code, this routine
  11. * must also perform dequantization of the input coefficients.
  12. *
  13. * A 2-D IDCT can be done by 1-D IDCT on each column followed by 1-D IDCT
  14. * on each row (or vice versa, but it's more convenient to emit a row at
  15. * a time). Direct algorithms are also available, but they are much more
  16. * complex and seem not to be any faster when reduced to code.
  17. *
  18. * This implementation is based on an algorithm described in
  19. * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT
  20. * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics,
  21. * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991.
  22. * The primary algorithm described there uses 11 multiplies and 29 adds.
  23. * We use their alternate method with 12 multiplies and 32 adds.
  24. * The advantage of this method is that no data path contains more than one
  25. * multiplication; this allows a very simple and accurate implementation in
  26. * scaled fixed-point arithmetic, with a minimal number of shifts.
  27. *
  28. * We also provide IDCT routines with various output sample block sizes for
  29. * direct resolution reduction or enlargement and for direct resolving the
  30. * common 2x1 and 1x2 subsampling cases without additional resampling: NxN
  31. * (N=1...16), 2NxN, and Nx2N (N=1...8) pixels for one 8x8 input DCT block.
  32. *
  33. * For N<8 we simply take the corresponding low-frequency coefficients of
  34. * the 8x8 input DCT block and apply an NxN point IDCT on the sub-block
  35. * to yield the downscaled outputs.
  36. * This can be seen as direct low-pass downsampling from the DCT domain
  37. * point of view rather than the usual spatial domain point of view,
  38. * yielding significant computational savings and results at least
  39. * as good as common bilinear (averaging) spatial downsampling.
  40. *
  41. * For N>8 we apply a partial NxN IDCT on the 8 input coefficients as
  42. * lower frequencies and higher frequencies assumed to be zero.
  43. * It turns out that the computational effort is similar to the 8x8 IDCT
  44. * regarding the output size.
  45. * Furthermore, the scaling and descaling is the same for all IDCT sizes.
  46. *
  47. * CAUTION: We rely on the FIX() macro except for the N=1,2,4,8 cases
  48. * since there would be too many additional constants to pre-calculate.
  49. */
  50. #define JPEG_INTERNALS
  51. #include "jinclude.h"
  52. #include "jpeglib.h"
  53. #include "jdct.h" /* Private declarations for DCT subsystem */
  54. #ifdef DCT_ISLOW_SUPPORTED
  55. /*
  56. * This module is specialized to the case DCTSIZE = 8.
  57. */
  58. #if DCTSIZE != 8
  59. Sorry, this code only copes with 8x8 DCT blocks. /* deliberate syntax err */
  60. #endif
  61. /*
  62. * The poop on this scaling stuff is as follows:
  63. *
  64. * Each 1-D IDCT step produces outputs which are a factor of sqrt(N)
  65. * larger than the true IDCT outputs. The final outputs are therefore
  66. * a factor of N larger than desired; since N=8 this can be cured by
  67. * a simple right shift at the end of the algorithm. The advantage of
  68. * this arrangement is that we save two multiplications per 1-D IDCT,
  69. * because the y0 and y4 inputs need not be divided by sqrt(N).
  70. *
  71. * We have to do addition and subtraction of the integer inputs, which
  72. * is no problem, and multiplication by fractional constants, which is
  73. * a problem to do in integer arithmetic. We multiply all the constants
  74. * by CONST_SCALE and convert them to integer constants (thus retaining
  75. * CONST_BITS bits of precision in the constants). After doing a
  76. * multiplication we have to divide the product by CONST_SCALE, with proper
  77. * rounding, to produce the correct output. This division can be done
  78. * cheaply as a right shift of CONST_BITS bits. We postpone shifting
  79. * as long as possible so that partial sums can be added together with
  80. * full fractional precision.
  81. *
  82. * The outputs of the first pass are scaled up by PASS1_BITS bits so that
  83. * they are represented to better-than-integral precision. These outputs
  84. * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word
  85. * with the recommended scaling. (To scale up 12-bit sample data further, an
  86. * intermediate INT32 array would be needed.)
  87. *
  88. * To avoid overflow of the 32-bit intermediate results in pass 2, we must
  89. * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis
  90. * shows that the values given below are the most effective.
  91. */
  92. #if BITS_IN_JSAMPLE == 8
  93. #define CONST_BITS 13
  94. #define PASS1_BITS 2
  95. #else
  96. #define CONST_BITS 13
  97. #define PASS1_BITS 1 /* lose a little precision to avoid overflow */
  98. #endif
  99. /* Some C compilers fail to reduce "FIX(constant)" at compile time, thus
  100. * causing a lot of useless floating-point operations at run time.
  101. * To get around this we use the following pre-calculated constants.
  102. * If you change CONST_BITS you may want to add appropriate values.
  103. * (With a reasonable C compiler, you can just rely on the FIX() macro...)
  104. */
  105. #if CONST_BITS == 13
  106. #define FIX_0_298631336 ((INT32) 2446) /* FIX(0.298631336) */
  107. #define FIX_0_390180644 ((INT32) 3196) /* FIX(0.390180644) */
  108. #define FIX_0_541196100 ((INT32) 4433) /* FIX(0.541196100) */
  109. #define FIX_0_765366865 ((INT32) 6270) /* FIX(0.765366865) */
  110. #define FIX_0_899976223 ((INT32) 7373) /* FIX(0.899976223) */
  111. #define FIX_1_175875602 ((INT32) 9633) /* FIX(1.175875602) */
  112. #define FIX_1_501321110 ((INT32) 12299) /* FIX(1.501321110) */
  113. #define FIX_1_847759065 ((INT32) 15137) /* FIX(1.847759065) */
  114. #define FIX_1_961570560 ((INT32) 16069) /* FIX(1.961570560) */
  115. #define FIX_2_053119869 ((INT32) 16819) /* FIX(2.053119869) */
  116. #define FIX_2_562915447 ((INT32) 20995) /* FIX(2.562915447) */
  117. #define FIX_3_072711026 ((INT32) 25172) /* FIX(3.072711026) */
  118. #else
  119. #define FIX_0_298631336 FIX(0.298631336)
  120. #define FIX_0_390180644 FIX(0.390180644)
  121. #define FIX_0_541196100 FIX(0.541196100)
  122. #define FIX_0_765366865 FIX(0.765366865)
  123. #define FIX_0_899976223 FIX(0.899976223)
  124. #define FIX_1_175875602 FIX(1.175875602)
  125. #define FIX_1_501321110 FIX(1.501321110)
  126. #define FIX_1_847759065 FIX(1.847759065)
  127. #define FIX_1_961570560 FIX(1.961570560)
  128. #define FIX_2_053119869 FIX(2.053119869)
  129. #define FIX_2_562915447 FIX(2.562915447)
  130. #define FIX_3_072711026 FIX(3.072711026)
  131. #endif
  132. /* Multiply an INT32 variable by an INT32 constant to yield an INT32 result.
  133. * For 8-bit samples with the recommended scaling, all the variable
  134. * and constant values involved are no more than 16 bits wide, so a
  135. * 16x16->32 bit multiply can be used instead of a full 32x32 multiply.
  136. * For 12-bit samples, a full 32-bit multiplication will be needed.
  137. */
  138. #if BITS_IN_JSAMPLE == 8
  139. #define MULTIPLY(var,const) MULTIPLY16C16(var,const)
  140. #else
  141. #define MULTIPLY(var,const) ((var) * (const))
  142. #endif
  143. /* Dequantize a coefficient by multiplying it by the multiplier-table
  144. * entry; produce an int result. In this module, both inputs and result
  145. * are 16 bits or less, so either int or short multiply will work.
  146. */
  147. #define DEQUANTIZE(coef,quantval) (((ISLOW_MULT_TYPE) (coef)) * (quantval))
  148. /*
  149. * Perform dequantization and inverse DCT on one block of coefficients.
  150. */
  151. GLOBAL(void)
  152. jpeg_idct_islow (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  153. JCOEFPTR coef_block,
  154. JSAMPARRAY output_buf, JDIMENSION output_col)
  155. {
  156. INT32 tmp0, tmp1, tmp2, tmp3;
  157. INT32 tmp10, tmp11, tmp12, tmp13;
  158. INT32 z1, z2, z3;
  159. JCOEFPTR inptr;
  160. ISLOW_MULT_TYPE * quantptr;
  161. int * wsptr;
  162. JSAMPROW outptr;
  163. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  164. int ctr;
  165. int workspace[DCTSIZE2]; /* buffers data between passes */
  166. SHIFT_TEMPS
  167. /* Pass 1: process columns from input, store into work array. */
  168. /* Note results are scaled up by sqrt(8) compared to a true IDCT; */
  169. /* furthermore, we scale the results by 2**PASS1_BITS. */
  170. inptr = coef_block;
  171. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  172. wsptr = workspace;
  173. for (ctr = DCTSIZE; ctr > 0; ctr--) {
  174. /* Due to quantization, we will usually find that many of the input
  175. * coefficients are zero, especially the AC terms. We can exploit this
  176. * by short-circuiting the IDCT calculation for any column in which all
  177. * the AC terms are zero. In that case each output is equal to the
  178. * DC coefficient (with scale factor as needed).
  179. * With typical images and quantization tables, half or more of the
  180. * column DCT calculations can be simplified this way.
  181. */
  182. if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 &&
  183. inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 &&
  184. inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 &&
  185. inptr[DCTSIZE*7] == 0) {
  186. /* AC terms all zero */
  187. int dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) << PASS1_BITS;
  188. wsptr[DCTSIZE*0] = dcval;
  189. wsptr[DCTSIZE*1] = dcval;
  190. wsptr[DCTSIZE*2] = dcval;
  191. wsptr[DCTSIZE*3] = dcval;
  192. wsptr[DCTSIZE*4] = dcval;
  193. wsptr[DCTSIZE*5] = dcval;
  194. wsptr[DCTSIZE*6] = dcval;
  195. wsptr[DCTSIZE*7] = dcval;
  196. inptr++; /* advance pointers to next column */
  197. quantptr++;
  198. wsptr++;
  199. continue;
  200. }
  201. /* Even part: reverse the even part of the forward DCT. */
  202. /* The rotator is sqrt(2)*c(-6). */
  203. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  204. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  205. z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
  206. tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865);
  207. tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065);
  208. z2 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  209. z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  210. z2 <<= CONST_BITS;
  211. z3 <<= CONST_BITS;
  212. /* Add fudge factor here for final descale. */
  213. z2 += ONE << (CONST_BITS-PASS1_BITS-1);
  214. tmp0 = z2 + z3;
  215. tmp1 = z2 - z3;
  216. tmp10 = tmp0 + tmp2;
  217. tmp13 = tmp0 - tmp2;
  218. tmp11 = tmp1 + tmp3;
  219. tmp12 = tmp1 - tmp3;
  220. /* Odd part per figure 8; the matrix is unitary and hence its
  221. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  222. */
  223. tmp0 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  224. tmp1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  225. tmp2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  226. tmp3 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  227. z2 = tmp0 + tmp2;
  228. z3 = tmp1 + tmp3;
  229. z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* sqrt(2) * c3 */
  230. z2 = MULTIPLY(z2, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
  231. z3 = MULTIPLY(z3, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
  232. z2 += z1;
  233. z3 += z1;
  234. z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
  235. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
  236. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
  237. tmp0 += z1 + z2;
  238. tmp3 += z1 + z3;
  239. z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
  240. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
  241. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
  242. tmp1 += z1 + z3;
  243. tmp2 += z1 + z2;
  244. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  245. wsptr[DCTSIZE*0] = (int) RIGHT_SHIFT(tmp10 + tmp3, CONST_BITS-PASS1_BITS);
  246. wsptr[DCTSIZE*7] = (int) RIGHT_SHIFT(tmp10 - tmp3, CONST_BITS-PASS1_BITS);
  247. wsptr[DCTSIZE*1] = (int) RIGHT_SHIFT(tmp11 + tmp2, CONST_BITS-PASS1_BITS);
  248. wsptr[DCTSIZE*6] = (int) RIGHT_SHIFT(tmp11 - tmp2, CONST_BITS-PASS1_BITS);
  249. wsptr[DCTSIZE*2] = (int) RIGHT_SHIFT(tmp12 + tmp1, CONST_BITS-PASS1_BITS);
  250. wsptr[DCTSIZE*5] = (int) RIGHT_SHIFT(tmp12 - tmp1, CONST_BITS-PASS1_BITS);
  251. wsptr[DCTSIZE*3] = (int) RIGHT_SHIFT(tmp13 + tmp0, CONST_BITS-PASS1_BITS);
  252. wsptr[DCTSIZE*4] = (int) RIGHT_SHIFT(tmp13 - tmp0, CONST_BITS-PASS1_BITS);
  253. inptr++; /* advance pointers to next column */
  254. quantptr++;
  255. wsptr++;
  256. }
  257. /* Pass 2: process rows from work array, store into output array. */
  258. /* Note that we must descale the results by a factor of 8 == 2**3, */
  259. /* and also undo the PASS1_BITS scaling. */
  260. wsptr = workspace;
  261. for (ctr = 0; ctr < DCTSIZE; ctr++) {
  262. outptr = output_buf[ctr] + output_col;
  263. /* Rows of zeroes can be exploited in the same way as we did with columns.
  264. * However, the column calculation has created many nonzero AC terms, so
  265. * the simplification applies less often (typically 5% to 10% of the time).
  266. * On machines with very fast multiplication, it's possible that the
  267. * test takes more time than it's worth. In that case this section
  268. * may be commented out.
  269. */
  270. #ifndef NO_ZERO_ROW_TEST
  271. if (wsptr[1] == 0 && wsptr[2] == 0 && wsptr[3] == 0 && wsptr[4] == 0 &&
  272. wsptr[5] == 0 && wsptr[6] == 0 && wsptr[7] == 0) {
  273. /* AC terms all zero */
  274. JSAMPLE dcval = range_limit[(int) DESCALE((INT32) wsptr[0], PASS1_BITS+3)
  275. & RANGE_MASK];
  276. outptr[0] = dcval;
  277. outptr[1] = dcval;
  278. outptr[2] = dcval;
  279. outptr[3] = dcval;
  280. outptr[4] = dcval;
  281. outptr[5] = dcval;
  282. outptr[6] = dcval;
  283. outptr[7] = dcval;
  284. wsptr += DCTSIZE; /* advance pointer to next row */
  285. continue;
  286. }
  287. #endif
  288. /* Even part: reverse the even part of the forward DCT. */
  289. /* The rotator is sqrt(2)*c(-6). */
  290. z2 = (INT32) wsptr[2];
  291. z3 = (INT32) wsptr[6];
  292. z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
  293. tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865);
  294. tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065);
  295. /* Add fudge factor here for final descale. */
  296. z2 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  297. z3 = (INT32) wsptr[4];
  298. tmp0 = (z2 + z3) << CONST_BITS;
  299. tmp1 = (z2 - z3) << CONST_BITS;
  300. tmp10 = tmp0 + tmp2;
  301. tmp13 = tmp0 - tmp2;
  302. tmp11 = tmp1 + tmp3;
  303. tmp12 = tmp1 - tmp3;
  304. /* Odd part per figure 8; the matrix is unitary and hence its
  305. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  306. */
  307. tmp0 = (INT32) wsptr[7];
  308. tmp1 = (INT32) wsptr[5];
  309. tmp2 = (INT32) wsptr[3];
  310. tmp3 = (INT32) wsptr[1];
  311. z2 = tmp0 + tmp2;
  312. z3 = tmp1 + tmp3;
  313. z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* sqrt(2) * c3 */
  314. z2 = MULTIPLY(z2, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
  315. z3 = MULTIPLY(z3, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
  316. z2 += z1;
  317. z3 += z1;
  318. z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
  319. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
  320. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
  321. tmp0 += z1 + z2;
  322. tmp3 += z1 + z3;
  323. z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
  324. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
  325. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
  326. tmp1 += z1 + z3;
  327. tmp2 += z1 + z2;
  328. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  329. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp3,
  330. CONST_BITS+PASS1_BITS+3)
  331. & RANGE_MASK];
  332. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp3,
  333. CONST_BITS+PASS1_BITS+3)
  334. & RANGE_MASK];
  335. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp2,
  336. CONST_BITS+PASS1_BITS+3)
  337. & RANGE_MASK];
  338. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp2,
  339. CONST_BITS+PASS1_BITS+3)
  340. & RANGE_MASK];
  341. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp1,
  342. CONST_BITS+PASS1_BITS+3)
  343. & RANGE_MASK];
  344. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp1,
  345. CONST_BITS+PASS1_BITS+3)
  346. & RANGE_MASK];
  347. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13 + tmp0,
  348. CONST_BITS+PASS1_BITS+3)
  349. & RANGE_MASK];
  350. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp13 - tmp0,
  351. CONST_BITS+PASS1_BITS+3)
  352. & RANGE_MASK];
  353. wsptr += DCTSIZE; /* advance pointer to next row */
  354. }
  355. }
  356. #ifdef IDCT_SCALING_SUPPORTED
  357. /*
  358. * Perform dequantization and inverse DCT on one block of coefficients,
  359. * producing a 7x7 output block.
  360. *
  361. * Optimized algorithm with 12 multiplications in the 1-D kernel.
  362. * cK represents sqrt(2) * cos(K*pi/14).
  363. */
  364. GLOBAL(void)
  365. jpeg_idct_7x7 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  366. JCOEFPTR coef_block,
  367. JSAMPARRAY output_buf, JDIMENSION output_col)
  368. {
  369. INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12, tmp13;
  370. INT32 z1, z2, z3;
  371. JCOEFPTR inptr;
  372. ISLOW_MULT_TYPE * quantptr;
  373. int * wsptr;
  374. JSAMPROW outptr;
  375. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  376. int ctr;
  377. int workspace[7*7]; /* buffers data between passes */
  378. SHIFT_TEMPS
  379. /* Pass 1: process columns from input, store into work array. */
  380. inptr = coef_block;
  381. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  382. wsptr = workspace;
  383. for (ctr = 0; ctr < 7; ctr++, inptr++, quantptr++, wsptr++) {
  384. /* Even part */
  385. tmp13 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  386. tmp13 <<= CONST_BITS;
  387. /* Add fudge factor here for final descale. */
  388. tmp13 += ONE << (CONST_BITS-PASS1_BITS-1);
  389. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  390. z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  391. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  392. tmp10 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */
  393. tmp12 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */
  394. tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */
  395. tmp0 = z1 + z3;
  396. z2 -= tmp0;
  397. tmp0 = MULTIPLY(tmp0, FIX(1.274162392)) + tmp13; /* c2 */
  398. tmp10 += tmp0 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */
  399. tmp12 += tmp0 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */
  400. tmp13 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */
  401. /* Odd part */
  402. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  403. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  404. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  405. tmp1 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */
  406. tmp2 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */
  407. tmp0 = tmp1 - tmp2;
  408. tmp1 += tmp2;
  409. tmp2 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */
  410. tmp1 += tmp2;
  411. z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */
  412. tmp0 += z2;
  413. tmp2 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */
  414. /* Final output stage */
  415. wsptr[7*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  416. wsptr[7*6] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  417. wsptr[7*1] = (int) RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS-PASS1_BITS);
  418. wsptr[7*5] = (int) RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS-PASS1_BITS);
  419. wsptr[7*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS);
  420. wsptr[7*4] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS);
  421. wsptr[7*3] = (int) RIGHT_SHIFT(tmp13, CONST_BITS-PASS1_BITS);
  422. }
  423. /* Pass 2: process 7 rows from work array, store into output array. */
  424. wsptr = workspace;
  425. for (ctr = 0; ctr < 7; ctr++) {
  426. outptr = output_buf[ctr] + output_col;
  427. /* Even part */
  428. /* Add fudge factor here for final descale. */
  429. tmp13 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  430. tmp13 <<= CONST_BITS;
  431. z1 = (INT32) wsptr[2];
  432. z2 = (INT32) wsptr[4];
  433. z3 = (INT32) wsptr[6];
  434. tmp10 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */
  435. tmp12 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */
  436. tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */
  437. tmp0 = z1 + z3;
  438. z2 -= tmp0;
  439. tmp0 = MULTIPLY(tmp0, FIX(1.274162392)) + tmp13; /* c2 */
  440. tmp10 += tmp0 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */
  441. tmp12 += tmp0 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */
  442. tmp13 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */
  443. /* Odd part */
  444. z1 = (INT32) wsptr[1];
  445. z2 = (INT32) wsptr[3];
  446. z3 = (INT32) wsptr[5];
  447. tmp1 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */
  448. tmp2 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */
  449. tmp0 = tmp1 - tmp2;
  450. tmp1 += tmp2;
  451. tmp2 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */
  452. tmp1 += tmp2;
  453. z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */
  454. tmp0 += z2;
  455. tmp2 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */
  456. /* Final output stage */
  457. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  458. CONST_BITS+PASS1_BITS+3)
  459. & RANGE_MASK];
  460. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  461. CONST_BITS+PASS1_BITS+3)
  462. & RANGE_MASK];
  463. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  464. CONST_BITS+PASS1_BITS+3)
  465. & RANGE_MASK];
  466. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  467. CONST_BITS+PASS1_BITS+3)
  468. & RANGE_MASK];
  469. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  470. CONST_BITS+PASS1_BITS+3)
  471. & RANGE_MASK];
  472. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  473. CONST_BITS+PASS1_BITS+3)
  474. & RANGE_MASK];
  475. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13,
  476. CONST_BITS+PASS1_BITS+3)
  477. & RANGE_MASK];
  478. wsptr += 7; /* advance pointer to next row */
  479. }
  480. }
  481. /*
  482. * Perform dequantization and inverse DCT on one block of coefficients,
  483. * producing a reduced-size 6x6 output block.
  484. *
  485. * Optimized algorithm with 3 multiplications in the 1-D kernel.
  486. * cK represents sqrt(2) * cos(K*pi/12).
  487. */
  488. GLOBAL(void)
  489. jpeg_idct_6x6 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  490. JCOEFPTR coef_block,
  491. JSAMPARRAY output_buf, JDIMENSION output_col)
  492. {
  493. INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12;
  494. INT32 z1, z2, z3;
  495. JCOEFPTR inptr;
  496. ISLOW_MULT_TYPE * quantptr;
  497. int * wsptr;
  498. JSAMPROW outptr;
  499. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  500. int ctr;
  501. int workspace[6*6]; /* buffers data between passes */
  502. SHIFT_TEMPS
  503. /* Pass 1: process columns from input, store into work array. */
  504. inptr = coef_block;
  505. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  506. wsptr = workspace;
  507. for (ctr = 0; ctr < 6; ctr++, inptr++, quantptr++, wsptr++) {
  508. /* Even part */
  509. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  510. tmp0 <<= CONST_BITS;
  511. /* Add fudge factor here for final descale. */
  512. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  513. tmp2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  514. tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */
  515. tmp1 = tmp0 + tmp10;
  516. tmp11 = RIGHT_SHIFT(tmp0 - tmp10 - tmp10, CONST_BITS-PASS1_BITS);
  517. tmp10 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  518. tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */
  519. tmp10 = tmp1 + tmp0;
  520. tmp12 = tmp1 - tmp0;
  521. /* Odd part */
  522. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  523. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  524. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  525. tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  526. tmp0 = tmp1 + ((z1 + z2) << CONST_BITS);
  527. tmp2 = tmp1 + ((z3 - z2) << CONST_BITS);
  528. tmp1 = (z1 - z2 - z3) << PASS1_BITS;
  529. /* Final output stage */
  530. wsptr[6*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  531. wsptr[6*5] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  532. wsptr[6*1] = (int) (tmp11 + tmp1);
  533. wsptr[6*4] = (int) (tmp11 - tmp1);
  534. wsptr[6*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS);
  535. wsptr[6*3] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS);
  536. }
  537. /* Pass 2: process 6 rows from work array, store into output array. */
  538. wsptr = workspace;
  539. for (ctr = 0; ctr < 6; ctr++) {
  540. outptr = output_buf[ctr] + output_col;
  541. /* Even part */
  542. /* Add fudge factor here for final descale. */
  543. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  544. tmp0 <<= CONST_BITS;
  545. tmp2 = (INT32) wsptr[4];
  546. tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */
  547. tmp1 = tmp0 + tmp10;
  548. tmp11 = tmp0 - tmp10 - tmp10;
  549. tmp10 = (INT32) wsptr[2];
  550. tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */
  551. tmp10 = tmp1 + tmp0;
  552. tmp12 = tmp1 - tmp0;
  553. /* Odd part */
  554. z1 = (INT32) wsptr[1];
  555. z2 = (INT32) wsptr[3];
  556. z3 = (INT32) wsptr[5];
  557. tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  558. tmp0 = tmp1 + ((z1 + z2) << CONST_BITS);
  559. tmp2 = tmp1 + ((z3 - z2) << CONST_BITS);
  560. tmp1 = (z1 - z2 - z3) << CONST_BITS;
  561. /* Final output stage */
  562. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  563. CONST_BITS+PASS1_BITS+3)
  564. & RANGE_MASK];
  565. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  566. CONST_BITS+PASS1_BITS+3)
  567. & RANGE_MASK];
  568. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  569. CONST_BITS+PASS1_BITS+3)
  570. & RANGE_MASK];
  571. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  572. CONST_BITS+PASS1_BITS+3)
  573. & RANGE_MASK];
  574. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  575. CONST_BITS+PASS1_BITS+3)
  576. & RANGE_MASK];
  577. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  578. CONST_BITS+PASS1_BITS+3)
  579. & RANGE_MASK];
  580. wsptr += 6; /* advance pointer to next row */
  581. }
  582. }
  583. /*
  584. * Perform dequantization and inverse DCT on one block of coefficients,
  585. * producing a reduced-size 5x5 output block.
  586. *
  587. * Optimized algorithm with 5 multiplications in the 1-D kernel.
  588. * cK represents sqrt(2) * cos(K*pi/10).
  589. */
  590. GLOBAL(void)
  591. jpeg_idct_5x5 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  592. JCOEFPTR coef_block,
  593. JSAMPARRAY output_buf, JDIMENSION output_col)
  594. {
  595. INT32 tmp0, tmp1, tmp10, tmp11, tmp12;
  596. INT32 z1, z2, z3;
  597. JCOEFPTR inptr;
  598. ISLOW_MULT_TYPE * quantptr;
  599. int * wsptr;
  600. JSAMPROW outptr;
  601. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  602. int ctr;
  603. int workspace[5*5]; /* buffers data between passes */
  604. SHIFT_TEMPS
  605. /* Pass 1: process columns from input, store into work array. */
  606. inptr = coef_block;
  607. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  608. wsptr = workspace;
  609. for (ctr = 0; ctr < 5; ctr++, inptr++, quantptr++, wsptr++) {
  610. /* Even part */
  611. tmp12 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  612. tmp12 <<= CONST_BITS;
  613. /* Add fudge factor here for final descale. */
  614. tmp12 += ONE << (CONST_BITS-PASS1_BITS-1);
  615. tmp0 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  616. tmp1 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  617. z1 = MULTIPLY(tmp0 + tmp1, FIX(0.790569415)); /* (c2+c4)/2 */
  618. z2 = MULTIPLY(tmp0 - tmp1, FIX(0.353553391)); /* (c2-c4)/2 */
  619. z3 = tmp12 + z2;
  620. tmp10 = z3 + z1;
  621. tmp11 = z3 - z1;
  622. tmp12 -= z2 << 2;
  623. /* Odd part */
  624. z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  625. z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  626. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */
  627. tmp0 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */
  628. tmp1 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */
  629. /* Final output stage */
  630. wsptr[5*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  631. wsptr[5*4] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  632. wsptr[5*1] = (int) RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS-PASS1_BITS);
  633. wsptr[5*3] = (int) RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS-PASS1_BITS);
  634. wsptr[5*2] = (int) RIGHT_SHIFT(tmp12, CONST_BITS-PASS1_BITS);
  635. }
  636. /* Pass 2: process 5 rows from work array, store into output array. */
  637. wsptr = workspace;
  638. for (ctr = 0; ctr < 5; ctr++) {
  639. outptr = output_buf[ctr] + output_col;
  640. /* Even part */
  641. /* Add fudge factor here for final descale. */
  642. tmp12 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  643. tmp12 <<= CONST_BITS;
  644. tmp0 = (INT32) wsptr[2];
  645. tmp1 = (INT32) wsptr[4];
  646. z1 = MULTIPLY(tmp0 + tmp1, FIX(0.790569415)); /* (c2+c4)/2 */
  647. z2 = MULTIPLY(tmp0 - tmp1, FIX(0.353553391)); /* (c2-c4)/2 */
  648. z3 = tmp12 + z2;
  649. tmp10 = z3 + z1;
  650. tmp11 = z3 - z1;
  651. tmp12 -= z2 << 2;
  652. /* Odd part */
  653. z2 = (INT32) wsptr[1];
  654. z3 = (INT32) wsptr[3];
  655. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */
  656. tmp0 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */
  657. tmp1 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */
  658. /* Final output stage */
  659. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  660. CONST_BITS+PASS1_BITS+3)
  661. & RANGE_MASK];
  662. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  663. CONST_BITS+PASS1_BITS+3)
  664. & RANGE_MASK];
  665. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  666. CONST_BITS+PASS1_BITS+3)
  667. & RANGE_MASK];
  668. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  669. CONST_BITS+PASS1_BITS+3)
  670. & RANGE_MASK];
  671. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12,
  672. CONST_BITS+PASS1_BITS+3)
  673. & RANGE_MASK];
  674. wsptr += 5; /* advance pointer to next row */
  675. }
  676. }
  677. /*
  678. * Perform dequantization and inverse DCT on one block of coefficients,
  679. * producing a reduced-size 4x4 output block.
  680. *
  681. * Optimized algorithm with 3 multiplications in the 1-D kernel.
  682. * cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point IDCT].
  683. */
  684. GLOBAL(void)
  685. jpeg_idct_4x4 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  686. JCOEFPTR coef_block,
  687. JSAMPARRAY output_buf, JDIMENSION output_col)
  688. {
  689. INT32 tmp0, tmp2, tmp10, tmp12;
  690. INT32 z1, z2, z3;
  691. JCOEFPTR inptr;
  692. ISLOW_MULT_TYPE * quantptr;
  693. int * wsptr;
  694. JSAMPROW outptr;
  695. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  696. int ctr;
  697. int workspace[4*4]; /* buffers data between passes */
  698. SHIFT_TEMPS
  699. /* Pass 1: process columns from input, store into work array. */
  700. inptr = coef_block;
  701. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  702. wsptr = workspace;
  703. for (ctr = 0; ctr < 4; ctr++, inptr++, quantptr++, wsptr++) {
  704. /* Even part */
  705. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  706. tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  707. tmp10 = (tmp0 + tmp2) << PASS1_BITS;
  708. tmp12 = (tmp0 - tmp2) << PASS1_BITS;
  709. /* Odd part */
  710. /* Same rotation as in the even part of the 8x8 LL&M IDCT */
  711. z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  712. z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  713. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  714. /* Add fudge factor here for final descale. */
  715. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  716. tmp0 = RIGHT_SHIFT(z1 + MULTIPLY(z2, FIX_0_765366865), /* c2-c6 */
  717. CONST_BITS-PASS1_BITS);
  718. tmp2 = RIGHT_SHIFT(z1 - MULTIPLY(z3, FIX_1_847759065), /* c2+c6 */
  719. CONST_BITS-PASS1_BITS);
  720. /* Final output stage */
  721. wsptr[4*0] = (int) (tmp10 + tmp0);
  722. wsptr[4*3] = (int) (tmp10 - tmp0);
  723. wsptr[4*1] = (int) (tmp12 + tmp2);
  724. wsptr[4*2] = (int) (tmp12 - tmp2);
  725. }
  726. /* Pass 2: process 4 rows from work array, store into output array. */
  727. wsptr = workspace;
  728. for (ctr = 0; ctr < 4; ctr++) {
  729. outptr = output_buf[ctr] + output_col;
  730. /* Even part */
  731. /* Add fudge factor here for final descale. */
  732. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  733. tmp2 = (INT32) wsptr[2];
  734. tmp10 = (tmp0 + tmp2) << CONST_BITS;
  735. tmp12 = (tmp0 - tmp2) << CONST_BITS;
  736. /* Odd part */
  737. /* Same rotation as in the even part of the 8x8 LL&M IDCT */
  738. z2 = (INT32) wsptr[1];
  739. z3 = (INT32) wsptr[3];
  740. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  741. tmp0 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  742. tmp2 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  743. /* Final output stage */
  744. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  745. CONST_BITS+PASS1_BITS+3)
  746. & RANGE_MASK];
  747. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  748. CONST_BITS+PASS1_BITS+3)
  749. & RANGE_MASK];
  750. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  751. CONST_BITS+PASS1_BITS+3)
  752. & RANGE_MASK];
  753. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  754. CONST_BITS+PASS1_BITS+3)
  755. & RANGE_MASK];
  756. wsptr += 4; /* advance pointer to next row */
  757. }
  758. }
  759. /*
  760. * Perform dequantization and inverse DCT on one block of coefficients,
  761. * producing a reduced-size 3x3 output block.
  762. *
  763. * Optimized algorithm with 2 multiplications in the 1-D kernel.
  764. * cK represents sqrt(2) * cos(K*pi/6).
  765. */
  766. GLOBAL(void)
  767. jpeg_idct_3x3 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  768. JCOEFPTR coef_block,
  769. JSAMPARRAY output_buf, JDIMENSION output_col)
  770. {
  771. INT32 tmp0, tmp2, tmp10, tmp12;
  772. JCOEFPTR inptr;
  773. ISLOW_MULT_TYPE * quantptr;
  774. int * wsptr;
  775. JSAMPROW outptr;
  776. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  777. int ctr;
  778. int workspace[3*3]; /* buffers data between passes */
  779. SHIFT_TEMPS
  780. /* Pass 1: process columns from input, store into work array. */
  781. inptr = coef_block;
  782. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  783. wsptr = workspace;
  784. for (ctr = 0; ctr < 3; ctr++, inptr++, quantptr++, wsptr++) {
  785. /* Even part */
  786. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  787. tmp0 <<= CONST_BITS;
  788. /* Add fudge factor here for final descale. */
  789. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  790. tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  791. tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */
  792. tmp10 = tmp0 + tmp12;
  793. tmp2 = tmp0 - tmp12 - tmp12;
  794. /* Odd part */
  795. tmp12 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  796. tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */
  797. /* Final output stage */
  798. wsptr[3*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  799. wsptr[3*2] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  800. wsptr[3*1] = (int) RIGHT_SHIFT(tmp2, CONST_BITS-PASS1_BITS);
  801. }
  802. /* Pass 2: process 3 rows from work array, store into output array. */
  803. wsptr = workspace;
  804. for (ctr = 0; ctr < 3; ctr++) {
  805. outptr = output_buf[ctr] + output_col;
  806. /* Even part */
  807. /* Add fudge factor here for final descale. */
  808. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  809. tmp0 <<= CONST_BITS;
  810. tmp2 = (INT32) wsptr[2];
  811. tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */
  812. tmp10 = tmp0 + tmp12;
  813. tmp2 = tmp0 - tmp12 - tmp12;
  814. /* Odd part */
  815. tmp12 = (INT32) wsptr[1];
  816. tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */
  817. /* Final output stage */
  818. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  819. CONST_BITS+PASS1_BITS+3)
  820. & RANGE_MASK];
  821. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  822. CONST_BITS+PASS1_BITS+3)
  823. & RANGE_MASK];
  824. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp2,
  825. CONST_BITS+PASS1_BITS+3)
  826. & RANGE_MASK];
  827. wsptr += 3; /* advance pointer to next row */
  828. }
  829. }
  830. /*
  831. * Perform dequantization and inverse DCT on one block of coefficients,
  832. * producing a reduced-size 2x2 output block.
  833. *
  834. * Multiplication-less algorithm.
  835. */
  836. GLOBAL(void)
  837. jpeg_idct_2x2 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  838. JCOEFPTR coef_block,
  839. JSAMPARRAY output_buf, JDIMENSION output_col)
  840. {
  841. INT32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
  842. ISLOW_MULT_TYPE * quantptr;
  843. JSAMPROW outptr;
  844. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  845. SHIFT_TEMPS
  846. /* Pass 1: process columns from input. */
  847. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  848. /* Column 0 */
  849. tmp4 = DEQUANTIZE(coef_block[DCTSIZE*0], quantptr[DCTSIZE*0]);
  850. tmp5 = DEQUANTIZE(coef_block[DCTSIZE*1], quantptr[DCTSIZE*1]);
  851. /* Add fudge factor here for final descale. */
  852. tmp4 += ONE << 2;
  853. tmp0 = tmp4 + tmp5;
  854. tmp2 = tmp4 - tmp5;
  855. /* Column 1 */
  856. tmp4 = DEQUANTIZE(coef_block[DCTSIZE*0+1], quantptr[DCTSIZE*0+1]);
  857. tmp5 = DEQUANTIZE(coef_block[DCTSIZE*1+1], quantptr[DCTSIZE*1+1]);
  858. tmp1 = tmp4 + tmp5;
  859. tmp3 = tmp4 - tmp5;
  860. /* Pass 2: process 2 rows, store into output array. */
  861. /* Row 0 */
  862. outptr = output_buf[0] + output_col;
  863. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp0 + tmp1, 3) & RANGE_MASK];
  864. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp0 - tmp1, 3) & RANGE_MASK];
  865. /* Row 1 */
  866. outptr = output_buf[1] + output_col;
  867. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp2 + tmp3, 3) & RANGE_MASK];
  868. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp2 - tmp3, 3) & RANGE_MASK];
  869. }
  870. /*
  871. * Perform dequantization and inverse DCT on one block of coefficients,
  872. * producing a reduced-size 1x1 output block.
  873. *
  874. * We hardly need an inverse DCT routine for this: just take the
  875. * average pixel value, which is one-eighth of the DC coefficient.
  876. */
  877. GLOBAL(void)
  878. jpeg_idct_1x1 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  879. JCOEFPTR coef_block,
  880. JSAMPARRAY output_buf, JDIMENSION output_col)
  881. {
  882. int dcval;
  883. ISLOW_MULT_TYPE * quantptr;
  884. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  885. SHIFT_TEMPS
  886. /* 1x1 is trivial: just take the DC coefficient divided by 8. */
  887. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  888. dcval = DEQUANTIZE(coef_block[0], quantptr[0]);
  889. dcval = (int) DESCALE((INT32) dcval, 3);
  890. output_buf[0][output_col] = range_limit[dcval & RANGE_MASK];
  891. }
  892. /*
  893. * Perform dequantization and inverse DCT on one block of coefficients,
  894. * producing a 9x9 output block.
  895. *
  896. * Optimized algorithm with 10 multiplications in the 1-D kernel.
  897. * cK represents sqrt(2) * cos(K*pi/18).
  898. */
  899. GLOBAL(void)
  900. jpeg_idct_9x9 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  901. JCOEFPTR coef_block,
  902. JSAMPARRAY output_buf, JDIMENSION output_col)
  903. {
  904. INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13, tmp14;
  905. INT32 z1, z2, z3, z4;
  906. JCOEFPTR inptr;
  907. ISLOW_MULT_TYPE * quantptr;
  908. int * wsptr;
  909. JSAMPROW outptr;
  910. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  911. int ctr;
  912. int workspace[8*9]; /* buffers data between passes */
  913. SHIFT_TEMPS
  914. /* Pass 1: process columns from input, store into work array. */
  915. inptr = coef_block;
  916. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  917. wsptr = workspace;
  918. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  919. /* Even part */
  920. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  921. tmp0 <<= CONST_BITS;
  922. /* Add fudge factor here for final descale. */
  923. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  924. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  925. z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  926. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  927. tmp3 = MULTIPLY(z3, FIX(0.707106781)); /* c6 */
  928. tmp1 = tmp0 + tmp3;
  929. tmp2 = tmp0 - tmp3 - tmp3;
  930. tmp0 = MULTIPLY(z1 - z2, FIX(0.707106781)); /* c6 */
  931. tmp11 = tmp2 + tmp0;
  932. tmp14 = tmp2 - tmp0 - tmp0;
  933. tmp0 = MULTIPLY(z1 + z2, FIX(1.328926049)); /* c2 */
  934. tmp2 = MULTIPLY(z1, FIX(1.083350441)); /* c4 */
  935. tmp3 = MULTIPLY(z2, FIX(0.245575608)); /* c8 */
  936. tmp10 = tmp1 + tmp0 - tmp3;
  937. tmp12 = tmp1 - tmp0 + tmp2;
  938. tmp13 = tmp1 - tmp2 + tmp3;
  939. /* Odd part */
  940. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  941. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  942. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  943. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  944. z2 = MULTIPLY(z2, - FIX(1.224744871)); /* -c3 */
  945. tmp2 = MULTIPLY(z1 + z3, FIX(0.909038955)); /* c5 */
  946. tmp3 = MULTIPLY(z1 + z4, FIX(0.483689525)); /* c7 */
  947. tmp0 = tmp2 + tmp3 - z2;
  948. tmp1 = MULTIPLY(z3 - z4, FIX(1.392728481)); /* c1 */
  949. tmp2 += z2 - tmp1;
  950. tmp3 += z2 + tmp1;
  951. tmp1 = MULTIPLY(z1 - z3 - z4, FIX(1.224744871)); /* c3 */
  952. /* Final output stage */
  953. wsptr[8*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  954. wsptr[8*8] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  955. wsptr[8*1] = (int) RIGHT_SHIFT(tmp11 + tmp1, CONST_BITS-PASS1_BITS);
  956. wsptr[8*7] = (int) RIGHT_SHIFT(tmp11 - tmp1, CONST_BITS-PASS1_BITS);
  957. wsptr[8*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS);
  958. wsptr[8*6] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS);
  959. wsptr[8*3] = (int) RIGHT_SHIFT(tmp13 + tmp3, CONST_BITS-PASS1_BITS);
  960. wsptr[8*5] = (int) RIGHT_SHIFT(tmp13 - tmp3, CONST_BITS-PASS1_BITS);
  961. wsptr[8*4] = (int) RIGHT_SHIFT(tmp14, CONST_BITS-PASS1_BITS);
  962. }
  963. /* Pass 2: process 9 rows from work array, store into output array. */
  964. wsptr = workspace;
  965. for (ctr = 0; ctr < 9; ctr++) {
  966. outptr = output_buf[ctr] + output_col;
  967. /* Even part */
  968. /* Add fudge factor here for final descale. */
  969. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  970. tmp0 <<= CONST_BITS;
  971. z1 = (INT32) wsptr[2];
  972. z2 = (INT32) wsptr[4];
  973. z3 = (INT32) wsptr[6];
  974. tmp3 = MULTIPLY(z3, FIX(0.707106781)); /* c6 */
  975. tmp1 = tmp0 + tmp3;
  976. tmp2 = tmp0 - tmp3 - tmp3;
  977. tmp0 = MULTIPLY(z1 - z2, FIX(0.707106781)); /* c6 */
  978. tmp11 = tmp2 + tmp0;
  979. tmp14 = tmp2 - tmp0 - tmp0;
  980. tmp0 = MULTIPLY(z1 + z2, FIX(1.328926049)); /* c2 */
  981. tmp2 = MULTIPLY(z1, FIX(1.083350441)); /* c4 */
  982. tmp3 = MULTIPLY(z2, FIX(0.245575608)); /* c8 */
  983. tmp10 = tmp1 + tmp0 - tmp3;
  984. tmp12 = tmp1 - tmp0 + tmp2;
  985. tmp13 = tmp1 - tmp2 + tmp3;
  986. /* Odd part */
  987. z1 = (INT32) wsptr[1];
  988. z2 = (INT32) wsptr[3];
  989. z3 = (INT32) wsptr[5];
  990. z4 = (INT32) wsptr[7];
  991. z2 = MULTIPLY(z2, - FIX(1.224744871)); /* -c3 */
  992. tmp2 = MULTIPLY(z1 + z3, FIX(0.909038955)); /* c5 */
  993. tmp3 = MULTIPLY(z1 + z4, FIX(0.483689525)); /* c7 */
  994. tmp0 = tmp2 + tmp3 - z2;
  995. tmp1 = MULTIPLY(z3 - z4, FIX(1.392728481)); /* c1 */
  996. tmp2 += z2 - tmp1;
  997. tmp3 += z2 + tmp1;
  998. tmp1 = MULTIPLY(z1 - z3 - z4, FIX(1.224744871)); /* c3 */
  999. /* Final output stage */
  1000. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  1001. CONST_BITS+PASS1_BITS+3)
  1002. & RANGE_MASK];
  1003. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  1004. CONST_BITS+PASS1_BITS+3)
  1005. & RANGE_MASK];
  1006. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  1007. CONST_BITS+PASS1_BITS+3)
  1008. & RANGE_MASK];
  1009. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  1010. CONST_BITS+PASS1_BITS+3)
  1011. & RANGE_MASK];
  1012. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  1013. CONST_BITS+PASS1_BITS+3)
  1014. & RANGE_MASK];
  1015. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  1016. CONST_BITS+PASS1_BITS+3)
  1017. & RANGE_MASK];
  1018. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13 + tmp3,
  1019. CONST_BITS+PASS1_BITS+3)
  1020. & RANGE_MASK];
  1021. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp13 - tmp3,
  1022. CONST_BITS+PASS1_BITS+3)
  1023. & RANGE_MASK];
  1024. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp14,
  1025. CONST_BITS+PASS1_BITS+3)
  1026. & RANGE_MASK];
  1027. wsptr += 8; /* advance pointer to next row */
  1028. }
  1029. }
  1030. /*
  1031. * Perform dequantization and inverse DCT on one block of coefficients,
  1032. * producing a 10x10 output block.
  1033. *
  1034. * Optimized algorithm with 12 multiplications in the 1-D kernel.
  1035. * cK represents sqrt(2) * cos(K*pi/20).
  1036. */
  1037. GLOBAL(void)
  1038. jpeg_idct_10x10 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1039. JCOEFPTR coef_block,
  1040. JSAMPARRAY output_buf, JDIMENSION output_col)
  1041. {
  1042. INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
  1043. INT32 tmp20, tmp21, tmp22, tmp23, tmp24;
  1044. INT32 z1, z2, z3, z4, z5;
  1045. JCOEFPTR inptr;
  1046. ISLOW_MULT_TYPE * quantptr;
  1047. int * wsptr;
  1048. JSAMPROW outptr;
  1049. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1050. int ctr;
  1051. int workspace[8*10]; /* buffers data between passes */
  1052. SHIFT_TEMPS
  1053. /* Pass 1: process columns from input, store into work array. */
  1054. inptr = coef_block;
  1055. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1056. wsptr = workspace;
  1057. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1058. /* Even part */
  1059. z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1060. z3 <<= CONST_BITS;
  1061. /* Add fudge factor here for final descale. */
  1062. z3 += ONE << (CONST_BITS-PASS1_BITS-1);
  1063. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1064. z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */
  1065. z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */
  1066. tmp10 = z3 + z1;
  1067. tmp11 = z3 - z2;
  1068. tmp22 = RIGHT_SHIFT(z3 - ((z1 - z2) << 1), /* c0 = (c4-c8)*2 */
  1069. CONST_BITS-PASS1_BITS);
  1070. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1071. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1072. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */
  1073. tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */
  1074. tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */
  1075. tmp20 = tmp10 + tmp12;
  1076. tmp24 = tmp10 - tmp12;
  1077. tmp21 = tmp11 + tmp13;
  1078. tmp23 = tmp11 - tmp13;
  1079. /* Odd part */
  1080. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1081. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1082. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1083. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1084. tmp11 = z2 + z4;
  1085. tmp13 = z2 - z4;
  1086. tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */
  1087. z5 = z3 << CONST_BITS;
  1088. z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */
  1089. z4 = z5 + tmp12;
  1090. tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */
  1091. tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */
  1092. z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */
  1093. z4 = z5 - tmp12 - (tmp13 << (CONST_BITS - 1));
  1094. tmp12 = (z1 - tmp13 - z3) << PASS1_BITS;
  1095. tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */
  1096. tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */
  1097. /* Final output stage */
  1098. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1099. wsptr[8*9] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1100. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1101. wsptr[8*8] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1102. wsptr[8*2] = (int) (tmp22 + tmp12);
  1103. wsptr[8*7] = (int) (tmp22 - tmp12);
  1104. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1105. wsptr[8*6] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  1106. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1107. wsptr[8*5] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1108. }
  1109. /* Pass 2: process 10 rows from work array, store into output array. */
  1110. wsptr = workspace;
  1111. for (ctr = 0; ctr < 10; ctr++) {
  1112. outptr = output_buf[ctr] + output_col;
  1113. /* Even part */
  1114. /* Add fudge factor here for final descale. */
  1115. z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1116. z3 <<= CONST_BITS;
  1117. z4 = (INT32) wsptr[4];
  1118. z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */
  1119. z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */
  1120. tmp10 = z3 + z1;
  1121. tmp11 = z3 - z2;
  1122. tmp22 = z3 - ((z1 - z2) << 1); /* c0 = (c4-c8)*2 */
  1123. z2 = (INT32) wsptr[2];
  1124. z3 = (INT32) wsptr[6];
  1125. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */
  1126. tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */
  1127. tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */
  1128. tmp20 = tmp10 + tmp12;
  1129. tmp24 = tmp10 - tmp12;
  1130. tmp21 = tmp11 + tmp13;
  1131. tmp23 = tmp11 - tmp13;
  1132. /* Odd part */
  1133. z1 = (INT32) wsptr[1];
  1134. z2 = (INT32) wsptr[3];
  1135. z3 = (INT32) wsptr[5];
  1136. z3 <<= CONST_BITS;
  1137. z4 = (INT32) wsptr[7];
  1138. tmp11 = z2 + z4;
  1139. tmp13 = z2 - z4;
  1140. tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */
  1141. z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */
  1142. z4 = z3 + tmp12;
  1143. tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */
  1144. tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */
  1145. z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */
  1146. z4 = z3 - tmp12 - (tmp13 << (CONST_BITS - 1));
  1147. tmp12 = ((z1 - tmp13) << CONST_BITS) - z3;
  1148. tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */
  1149. tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */
  1150. /* Final output stage */
  1151. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1152. CONST_BITS+PASS1_BITS+3)
  1153. & RANGE_MASK];
  1154. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1155. CONST_BITS+PASS1_BITS+3)
  1156. & RANGE_MASK];
  1157. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1158. CONST_BITS+PASS1_BITS+3)
  1159. & RANGE_MASK];
  1160. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1161. CONST_BITS+PASS1_BITS+3)
  1162. & RANGE_MASK];
  1163. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1164. CONST_BITS+PASS1_BITS+3)
  1165. & RANGE_MASK];
  1166. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1167. CONST_BITS+PASS1_BITS+3)
  1168. & RANGE_MASK];
  1169. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1170. CONST_BITS+PASS1_BITS+3)
  1171. & RANGE_MASK];
  1172. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1173. CONST_BITS+PASS1_BITS+3)
  1174. & RANGE_MASK];
  1175. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1176. CONST_BITS+PASS1_BITS+3)
  1177. & RANGE_MASK];
  1178. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1179. CONST_BITS+PASS1_BITS+3)
  1180. & RANGE_MASK];
  1181. wsptr += 8; /* advance pointer to next row */
  1182. }
  1183. }
  1184. /*
  1185. * Perform dequantization and inverse DCT on one block of coefficients,
  1186. * producing a 11x11 output block.
  1187. *
  1188. * Optimized algorithm with 24 multiplications in the 1-D kernel.
  1189. * cK represents sqrt(2) * cos(K*pi/22).
  1190. */
  1191. GLOBAL(void)
  1192. jpeg_idct_11x11 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1193. JCOEFPTR coef_block,
  1194. JSAMPARRAY output_buf, JDIMENSION output_col)
  1195. {
  1196. INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
  1197. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25;
  1198. INT32 z1, z2, z3, z4;
  1199. JCOEFPTR inptr;
  1200. ISLOW_MULT_TYPE * quantptr;
  1201. int * wsptr;
  1202. JSAMPROW outptr;
  1203. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1204. int ctr;
  1205. int workspace[8*11]; /* buffers data between passes */
  1206. SHIFT_TEMPS
  1207. /* Pass 1: process columns from input, store into work array. */
  1208. inptr = coef_block;
  1209. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1210. wsptr = workspace;
  1211. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1212. /* Even part */
  1213. tmp10 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1214. tmp10 <<= CONST_BITS;
  1215. /* Add fudge factor here for final descale. */
  1216. tmp10 += ONE << (CONST_BITS-PASS1_BITS-1);
  1217. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1218. z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1219. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1220. tmp20 = MULTIPLY(z2 - z3, FIX(2.546640132)); /* c2+c4 */
  1221. tmp23 = MULTIPLY(z2 - z1, FIX(0.430815045)); /* c2-c6 */
  1222. z4 = z1 + z3;
  1223. tmp24 = MULTIPLY(z4, - FIX(1.155664402)); /* -(c2-c10) */
  1224. z4 -= z2;
  1225. tmp25 = tmp10 + MULTIPLY(z4, FIX(1.356927976)); /* c2 */
  1226. tmp21 = tmp20 + tmp23 + tmp25 -
  1227. MULTIPLY(z2, FIX(1.821790775)); /* c2+c4+c10-c6 */
  1228. tmp20 += tmp25 + MULTIPLY(z3, FIX(2.115825087)); /* c4+c6 */
  1229. tmp23 += tmp25 - MULTIPLY(z1, FIX(1.513598477)); /* c6+c8 */
  1230. tmp24 += tmp25;
  1231. tmp22 = tmp24 - MULTIPLY(z3, FIX(0.788749120)); /* c8+c10 */
  1232. tmp24 += MULTIPLY(z2, FIX(1.944413522)) - /* c2+c8 */
  1233. MULTIPLY(z1, FIX(1.390975730)); /* c4+c10 */
  1234. tmp25 = tmp10 - MULTIPLY(z4, FIX(1.414213562)); /* c0 */
  1235. /* Odd part */
  1236. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1237. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1238. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1239. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1240. tmp11 = z1 + z2;
  1241. tmp14 = MULTIPLY(tmp11 + z3 + z4, FIX(0.398430003)); /* c9 */
  1242. tmp11 = MULTIPLY(tmp11, FIX(0.887983902)); /* c3-c9 */
  1243. tmp12 = MULTIPLY(z1 + z3, FIX(0.670361295)); /* c5-c9 */
  1244. tmp13 = tmp14 + MULTIPLY(z1 + z4, FIX(0.366151574)); /* c7-c9 */
  1245. tmp10 = tmp11 + tmp12 + tmp13 -
  1246. MULTIPLY(z1, FIX(0.923107866)); /* c7+c5+c3-c1-2*c9 */
  1247. z1 = tmp14 - MULTIPLY(z2 + z3, FIX(1.163011579)); /* c7+c9 */
  1248. tmp11 += z1 + MULTIPLY(z2, FIX(2.073276588)); /* c1+c7+3*c9-c3 */
  1249. tmp12 += z1 - MULTIPLY(z3, FIX(1.192193623)); /* c3+c5-c7-c9 */
  1250. z1 = MULTIPLY(z2 + z4, - FIX(1.798248910)); /* -(c1+c9) */
  1251. tmp11 += z1;
  1252. tmp13 += z1 + MULTIPLY(z4, FIX(2.102458632)); /* c1+c5+c9-c7 */
  1253. tmp14 += MULTIPLY(z2, - FIX(1.467221301)) + /* -(c5+c9) */
  1254. MULTIPLY(z3, FIX(1.001388905)) - /* c1-c9 */
  1255. MULTIPLY(z4, FIX(1.684843907)); /* c3+c9 */
  1256. /* Final output stage */
  1257. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1258. wsptr[8*10] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1259. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1260. wsptr[8*9] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1261. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1262. wsptr[8*8] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1263. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1264. wsptr[8*7] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  1265. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1266. wsptr[8*6] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1267. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25, CONST_BITS-PASS1_BITS);
  1268. }
  1269. /* Pass 2: process 11 rows from work array, store into output array. */
  1270. wsptr = workspace;
  1271. for (ctr = 0; ctr < 11; ctr++) {
  1272. outptr = output_buf[ctr] + output_col;
  1273. /* Even part */
  1274. /* Add fudge factor here for final descale. */
  1275. tmp10 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1276. tmp10 <<= CONST_BITS;
  1277. z1 = (INT32) wsptr[2];
  1278. z2 = (INT32) wsptr[4];
  1279. z3 = (INT32) wsptr[6];
  1280. tmp20 = MULTIPLY(z2 - z3, FIX(2.546640132)); /* c2+c4 */
  1281. tmp23 = MULTIPLY(z2 - z1, FIX(0.430815045)); /* c2-c6 */
  1282. z4 = z1 + z3;
  1283. tmp24 = MULTIPLY(z4, - FIX(1.155664402)); /* -(c2-c10) */
  1284. z4 -= z2;
  1285. tmp25 = tmp10 + MULTIPLY(z4, FIX(1.356927976)); /* c2 */
  1286. tmp21 = tmp20 + tmp23 + tmp25 -
  1287. MULTIPLY(z2, FIX(1.821790775)); /* c2+c4+c10-c6 */
  1288. tmp20 += tmp25 + MULTIPLY(z3, FIX(2.115825087)); /* c4+c6 */
  1289. tmp23 += tmp25 - MULTIPLY(z1, FIX(1.513598477)); /* c6+c8 */
  1290. tmp24 += tmp25;
  1291. tmp22 = tmp24 - MULTIPLY(z3, FIX(0.788749120)); /* c8+c10 */
  1292. tmp24 += MULTIPLY(z2, FIX(1.944413522)) - /* c2+c8 */
  1293. MULTIPLY(z1, FIX(1.390975730)); /* c4+c10 */
  1294. tmp25 = tmp10 - MULTIPLY(z4, FIX(1.414213562)); /* c0 */
  1295. /* Odd part */
  1296. z1 = (INT32) wsptr[1];
  1297. z2 = (INT32) wsptr[3];
  1298. z3 = (INT32) wsptr[5];
  1299. z4 = (INT32) wsptr[7];
  1300. tmp11 = z1 + z2;
  1301. tmp14 = MULTIPLY(tmp11 + z3 + z4, FIX(0.398430003)); /* c9 */
  1302. tmp11 = MULTIPLY(tmp11, FIX(0.887983902)); /* c3-c9 */
  1303. tmp12 = MULTIPLY(z1 + z3, FIX(0.670361295)); /* c5-c9 */
  1304. tmp13 = tmp14 + MULTIPLY(z1 + z4, FIX(0.366151574)); /* c7-c9 */
  1305. tmp10 = tmp11 + tmp12 + tmp13 -
  1306. MULTIPLY(z1, FIX(0.923107866)); /* c7+c5+c3-c1-2*c9 */
  1307. z1 = tmp14 - MULTIPLY(z2 + z3, FIX(1.163011579)); /* c7+c9 */
  1308. tmp11 += z1 + MULTIPLY(z2, FIX(2.073276588)); /* c1+c7+3*c9-c3 */
  1309. tmp12 += z1 - MULTIPLY(z3, FIX(1.192193623)); /* c3+c5-c7-c9 */
  1310. z1 = MULTIPLY(z2 + z4, - FIX(1.798248910)); /* -(c1+c9) */
  1311. tmp11 += z1;
  1312. tmp13 += z1 + MULTIPLY(z4, FIX(2.102458632)); /* c1+c5+c9-c7 */
  1313. tmp14 += MULTIPLY(z2, - FIX(1.467221301)) + /* -(c5+c9) */
  1314. MULTIPLY(z3, FIX(1.001388905)) - /* c1-c9 */
  1315. MULTIPLY(z4, FIX(1.684843907)); /* c3+c9 */
  1316. /* Final output stage */
  1317. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1318. CONST_BITS+PASS1_BITS+3)
  1319. & RANGE_MASK];
  1320. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1321. CONST_BITS+PASS1_BITS+3)
  1322. & RANGE_MASK];
  1323. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1324. CONST_BITS+PASS1_BITS+3)
  1325. & RANGE_MASK];
  1326. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1327. CONST_BITS+PASS1_BITS+3)
  1328. & RANGE_MASK];
  1329. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1330. CONST_BITS+PASS1_BITS+3)
  1331. & RANGE_MASK];
  1332. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1333. CONST_BITS+PASS1_BITS+3)
  1334. & RANGE_MASK];
  1335. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1336. CONST_BITS+PASS1_BITS+3)
  1337. & RANGE_MASK];
  1338. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1339. CONST_BITS+PASS1_BITS+3)
  1340. & RANGE_MASK];
  1341. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1342. CONST_BITS+PASS1_BITS+3)
  1343. & RANGE_MASK];
  1344. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1345. CONST_BITS+PASS1_BITS+3)
  1346. & RANGE_MASK];
  1347. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25,
  1348. CONST_BITS+PASS1_BITS+3)
  1349. & RANGE_MASK];
  1350. wsptr += 8; /* advance pointer to next row */
  1351. }
  1352. }
  1353. /*
  1354. * Perform dequantization and inverse DCT on one block of coefficients,
  1355. * producing a 12x12 output block.
  1356. *
  1357. * Optimized algorithm with 15 multiplications in the 1-D kernel.
  1358. * cK represents sqrt(2) * cos(K*pi/24).
  1359. */
  1360. GLOBAL(void)
  1361. jpeg_idct_12x12 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1362. JCOEFPTR coef_block,
  1363. JSAMPARRAY output_buf, JDIMENSION output_col)
  1364. {
  1365. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
  1366. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25;
  1367. INT32 z1, z2, z3, z4;
  1368. JCOEFPTR inptr;
  1369. ISLOW_MULT_TYPE * quantptr;
  1370. int * wsptr;
  1371. JSAMPROW outptr;
  1372. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1373. int ctr;
  1374. int workspace[8*12]; /* buffers data between passes */
  1375. SHIFT_TEMPS
  1376. /* Pass 1: process columns from input, store into work array. */
  1377. inptr = coef_block;
  1378. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1379. wsptr = workspace;
  1380. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1381. /* Even part */
  1382. z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1383. z3 <<= CONST_BITS;
  1384. /* Add fudge factor here for final descale. */
  1385. z3 += ONE << (CONST_BITS-PASS1_BITS-1);
  1386. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1387. z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */
  1388. tmp10 = z3 + z4;
  1389. tmp11 = z3 - z4;
  1390. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1391. z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */
  1392. z1 <<= CONST_BITS;
  1393. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1394. z2 <<= CONST_BITS;
  1395. tmp12 = z1 - z2;
  1396. tmp21 = z3 + tmp12;
  1397. tmp24 = z3 - tmp12;
  1398. tmp12 = z4 + z2;
  1399. tmp20 = tmp10 + tmp12;
  1400. tmp25 = tmp10 - tmp12;
  1401. tmp12 = z4 - z1 - z2;
  1402. tmp22 = tmp11 + tmp12;
  1403. tmp23 = tmp11 - tmp12;
  1404. /* Odd part */
  1405. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1406. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1407. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1408. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1409. tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */
  1410. tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */
  1411. tmp10 = z1 + z3;
  1412. tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */
  1413. tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */
  1414. tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */
  1415. tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */
  1416. tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */
  1417. tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */
  1418. tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */
  1419. MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */
  1420. z1 -= z4;
  1421. z2 -= z3;
  1422. z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */
  1423. tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */
  1424. tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */
  1425. /* Final output stage */
  1426. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1427. wsptr[8*11] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1428. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1429. wsptr[8*10] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1430. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1431. wsptr[8*9] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1432. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1433. wsptr[8*8] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  1434. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1435. wsptr[8*7] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1436. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  1437. wsptr[8*6] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  1438. }
  1439. /* Pass 2: process 12 rows from work array, store into output array. */
  1440. wsptr = workspace;
  1441. for (ctr = 0; ctr < 12; ctr++) {
  1442. outptr = output_buf[ctr] + output_col;
  1443. /* Even part */
  1444. /* Add fudge factor here for final descale. */
  1445. z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1446. z3 <<= CONST_BITS;
  1447. z4 = (INT32) wsptr[4];
  1448. z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */
  1449. tmp10 = z3 + z4;
  1450. tmp11 = z3 - z4;
  1451. z1 = (INT32) wsptr[2];
  1452. z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */
  1453. z1 <<= CONST_BITS;
  1454. z2 = (INT32) wsptr[6];
  1455. z2 <<= CONST_BITS;
  1456. tmp12 = z1 - z2;
  1457. tmp21 = z3 + tmp12;
  1458. tmp24 = z3 - tmp12;
  1459. tmp12 = z4 + z2;
  1460. tmp20 = tmp10 + tmp12;
  1461. tmp25 = tmp10 - tmp12;
  1462. tmp12 = z4 - z1 - z2;
  1463. tmp22 = tmp11 + tmp12;
  1464. tmp23 = tmp11 - tmp12;
  1465. /* Odd part */
  1466. z1 = (INT32) wsptr[1];
  1467. z2 = (INT32) wsptr[3];
  1468. z3 = (INT32) wsptr[5];
  1469. z4 = (INT32) wsptr[7];
  1470. tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */
  1471. tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */
  1472. tmp10 = z1 + z3;
  1473. tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */
  1474. tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */
  1475. tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */
  1476. tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */
  1477. tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */
  1478. tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */
  1479. tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */
  1480. MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */
  1481. z1 -= z4;
  1482. z2 -= z3;
  1483. z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */
  1484. tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */
  1485. tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */
  1486. /* Final output stage */
  1487. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1488. CONST_BITS+PASS1_BITS+3)
  1489. & RANGE_MASK];
  1490. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1491. CONST_BITS+PASS1_BITS+3)
  1492. & RANGE_MASK];
  1493. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1494. CONST_BITS+PASS1_BITS+3)
  1495. & RANGE_MASK];
  1496. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1497. CONST_BITS+PASS1_BITS+3)
  1498. & RANGE_MASK];
  1499. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1500. CONST_BITS+PASS1_BITS+3)
  1501. & RANGE_MASK];
  1502. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1503. CONST_BITS+PASS1_BITS+3)
  1504. & RANGE_MASK];
  1505. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1506. CONST_BITS+PASS1_BITS+3)
  1507. & RANGE_MASK];
  1508. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1509. CONST_BITS+PASS1_BITS+3)
  1510. & RANGE_MASK];
  1511. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1512. CONST_BITS+PASS1_BITS+3)
  1513. & RANGE_MASK];
  1514. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1515. CONST_BITS+PASS1_BITS+3)
  1516. & RANGE_MASK];
  1517. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  1518. CONST_BITS+PASS1_BITS+3)
  1519. & RANGE_MASK];
  1520. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  1521. CONST_BITS+PASS1_BITS+3)
  1522. & RANGE_MASK];
  1523. wsptr += 8; /* advance pointer to next row */
  1524. }
  1525. }
  1526. /*
  1527. * Perform dequantization and inverse DCT on one block of coefficients,
  1528. * producing a 13x13 output block.
  1529. *
  1530. * Optimized algorithm with 29 multiplications in the 1-D kernel.
  1531. * cK represents sqrt(2) * cos(K*pi/26).
  1532. */
  1533. GLOBAL(void)
  1534. jpeg_idct_13x13 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1535. JCOEFPTR coef_block,
  1536. JSAMPARRAY output_buf, JDIMENSION output_col)
  1537. {
  1538. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
  1539. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26;
  1540. INT32 z1, z2, z3, z4;
  1541. JCOEFPTR inptr;
  1542. ISLOW_MULT_TYPE * quantptr;
  1543. int * wsptr;
  1544. JSAMPROW outptr;
  1545. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1546. int ctr;
  1547. int workspace[8*13]; /* buffers data between passes */
  1548. SHIFT_TEMPS
  1549. /* Pass 1: process columns from input, store into work array. */
  1550. inptr = coef_block;
  1551. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1552. wsptr = workspace;
  1553. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1554. /* Even part */
  1555. z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1556. z1 <<= CONST_BITS;
  1557. /* Add fudge factor here for final descale. */
  1558. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  1559. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1560. z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1561. z4 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1562. tmp10 = z3 + z4;
  1563. tmp11 = z3 - z4;
  1564. tmp12 = MULTIPLY(tmp10, FIX(1.155388986)); /* (c4+c6)/2 */
  1565. tmp13 = MULTIPLY(tmp11, FIX(0.096834934)) + z1; /* (c4-c6)/2 */
  1566. tmp20 = MULTIPLY(z2, FIX(1.373119086)) + tmp12 + tmp13; /* c2 */
  1567. tmp22 = MULTIPLY(z2, FIX(0.501487041)) - tmp12 + tmp13; /* c10 */
  1568. tmp12 = MULTIPLY(tmp10, FIX(0.316450131)); /* (c8-c12)/2 */
  1569. tmp13 = MULTIPLY(tmp11, FIX(0.486914739)) + z1; /* (c8+c12)/2 */
  1570. tmp21 = MULTIPLY(z2, FIX(1.058554052)) - tmp12 + tmp13; /* c6 */
  1571. tmp25 = MULTIPLY(z2, - FIX(1.252223920)) + tmp12 + tmp13; /* c4 */
  1572. tmp12 = MULTIPLY(tmp10, FIX(0.435816023)); /* (c2-c10)/2 */
  1573. tmp13 = MULTIPLY(tmp11, FIX(0.937303064)) - z1; /* (c2+c10)/2 */
  1574. tmp23 = MULTIPLY(z2, - FIX(0.170464608)) - tmp12 - tmp13; /* c12 */
  1575. tmp24 = MULTIPLY(z2, - FIX(0.803364869)) + tmp12 - tmp13; /* c8 */
  1576. tmp26 = MULTIPLY(tmp11 - z2, FIX(1.414213562)) + z1; /* c0 */
  1577. /* Odd part */
  1578. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1579. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1580. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1581. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1582. tmp11 = MULTIPLY(z1 + z2, FIX(1.322312651)); /* c3 */
  1583. tmp12 = MULTIPLY(z1 + z3, FIX(1.163874945)); /* c5 */
  1584. tmp15 = z1 + z4;
  1585. tmp13 = MULTIPLY(tmp15, FIX(0.937797057)); /* c7 */
  1586. tmp10 = tmp11 + tmp12 + tmp13 -
  1587. MULTIPLY(z1, FIX(2.020082300)); /* c7+c5+c3-c1 */
  1588. tmp14 = MULTIPLY(z2 + z3, - FIX(0.338443458)); /* -c11 */
  1589. tmp11 += tmp14 + MULTIPLY(z2, FIX(0.837223564)); /* c5+c9+c11-c3 */
  1590. tmp12 += tmp14 - MULTIPLY(z3, FIX(1.572116027)); /* c1+c5-c9-c11 */
  1591. tmp14 = MULTIPLY(z2 + z4, - FIX(1.163874945)); /* -c5 */
  1592. tmp11 += tmp14;
  1593. tmp13 += tmp14 + MULTIPLY(z4, FIX(2.205608352)); /* c3+c5+c9-c7 */
  1594. tmp14 = MULTIPLY(z3 + z4, - FIX(0.657217813)); /* -c9 */
  1595. tmp12 += tmp14;
  1596. tmp13 += tmp14;
  1597. tmp15 = MULTIPLY(tmp15, FIX(0.338443458)); /* c11 */
  1598. tmp14 = tmp15 + MULTIPLY(z1, FIX(0.318774355)) - /* c9-c11 */
  1599. MULTIPLY(z2, FIX(0.466105296)); /* c1-c7 */
  1600. z1 = MULTIPLY(z3 - z2, FIX(0.937797057)); /* c7 */
  1601. tmp14 += z1;
  1602. tmp15 += z1 + MULTIPLY(z3, FIX(0.384515595)) - /* c3-c7 */
  1603. MULTIPLY(z4, FIX(1.742345811)); /* c1+c11 */
  1604. /* Final output stage */
  1605. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1606. wsptr[8*12] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1607. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1608. wsptr[8*11] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1609. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1610. wsptr[8*10] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1611. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1612. wsptr[8*9] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  1613. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1614. wsptr[8*8] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1615. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  1616. wsptr[8*7] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  1617. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26, CONST_BITS-PASS1_BITS);
  1618. }
  1619. /* Pass 2: process 13 rows from work array, store into output array. */
  1620. wsptr = workspace;
  1621. for (ctr = 0; ctr < 13; ctr++) {
  1622. outptr = output_buf[ctr] + output_col;
  1623. /* Even part */
  1624. /* Add fudge factor here for final descale. */
  1625. z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1626. z1 <<= CONST_BITS;
  1627. z2 = (INT32) wsptr[2];
  1628. z3 = (INT32) wsptr[4];
  1629. z4 = (INT32) wsptr[6];
  1630. tmp10 = z3 + z4;
  1631. tmp11 = z3 - z4;
  1632. tmp12 = MULTIPLY(tmp10, FIX(1.155388986)); /* (c4+c6)/2 */
  1633. tmp13 = MULTIPLY(tmp11, FIX(0.096834934)) + z1; /* (c4-c6)/2 */
  1634. tmp20 = MULTIPLY(z2, FIX(1.373119086)) + tmp12 + tmp13; /* c2 */
  1635. tmp22 = MULTIPLY(z2, FIX(0.501487041)) - tmp12 + tmp13; /* c10 */
  1636. tmp12 = MULTIPLY(tmp10, FIX(0.316450131)); /* (c8-c12)/2 */
  1637. tmp13 = MULTIPLY(tmp11, FIX(0.486914739)) + z1; /* (c8+c12)/2 */
  1638. tmp21 = MULTIPLY(z2, FIX(1.058554052)) - tmp12 + tmp13; /* c6 */
  1639. tmp25 = MULTIPLY(z2, - FIX(1.252223920)) + tmp12 + tmp13; /* c4 */
  1640. tmp12 = MULTIPLY(tmp10, FIX(0.435816023)); /* (c2-c10)/2 */
  1641. tmp13 = MULTIPLY(tmp11, FIX(0.937303064)) - z1; /* (c2+c10)/2 */
  1642. tmp23 = MULTIPLY(z2, - FIX(0.170464608)) - tmp12 - tmp13; /* c12 */
  1643. tmp24 = MULTIPLY(z2, - FIX(0.803364869)) + tmp12 - tmp13; /* c8 */
  1644. tmp26 = MULTIPLY(tmp11 - z2, FIX(1.414213562)) + z1; /* c0 */
  1645. /* Odd part */
  1646. z1 = (INT32) wsptr[1];
  1647. z2 = (INT32) wsptr[3];
  1648. z3 = (INT32) wsptr[5];
  1649. z4 = (INT32) wsptr[7];
  1650. tmp11 = MULTIPLY(z1 + z2, FIX(1.322312651)); /* c3 */
  1651. tmp12 = MULTIPLY(z1 + z3, FIX(1.163874945)); /* c5 */
  1652. tmp15 = z1 + z4;
  1653. tmp13 = MULTIPLY(tmp15, FIX(0.937797057)); /* c7 */
  1654. tmp10 = tmp11 + tmp12 + tmp13 -
  1655. MULTIPLY(z1, FIX(2.020082300)); /* c7+c5+c3-c1 */
  1656. tmp14 = MULTIPLY(z2 + z3, - FIX(0.338443458)); /* -c11 */
  1657. tmp11 += tmp14 + MULTIPLY(z2, FIX(0.837223564)); /* c5+c9+c11-c3 */
  1658. tmp12 += tmp14 - MULTIPLY(z3, FIX(1.572116027)); /* c1+c5-c9-c11 */
  1659. tmp14 = MULTIPLY(z2 + z4, - FIX(1.163874945)); /* -c5 */
  1660. tmp11 += tmp14;
  1661. tmp13 += tmp14 + MULTIPLY(z4, FIX(2.205608352)); /* c3+c5+c9-c7 */
  1662. tmp14 = MULTIPLY(z3 + z4, - FIX(0.657217813)); /* -c9 */
  1663. tmp12 += tmp14;
  1664. tmp13 += tmp14;
  1665. tmp15 = MULTIPLY(tmp15, FIX(0.338443458)); /* c11 */
  1666. tmp14 = tmp15 + MULTIPLY(z1, FIX(0.318774355)) - /* c9-c11 */
  1667. MULTIPLY(z2, FIX(0.466105296)); /* c1-c7 */
  1668. z1 = MULTIPLY(z3 - z2, FIX(0.937797057)); /* c7 */
  1669. tmp14 += z1;
  1670. tmp15 += z1 + MULTIPLY(z3, FIX(0.384515595)) - /* c3-c7 */
  1671. MULTIPLY(z4, FIX(1.742345811)); /* c1+c11 */
  1672. /* Final output stage */
  1673. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1674. CONST_BITS+PASS1_BITS+3)
  1675. & RANGE_MASK];
  1676. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1677. CONST_BITS+PASS1_BITS+3)
  1678. & RANGE_MASK];
  1679. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1680. CONST_BITS+PASS1_BITS+3)
  1681. & RANGE_MASK];
  1682. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1683. CONST_BITS+PASS1_BITS+3)
  1684. & RANGE_MASK];
  1685. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1686. CONST_BITS+PASS1_BITS+3)
  1687. & RANGE_MASK];
  1688. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1689. CONST_BITS+PASS1_BITS+3)
  1690. & RANGE_MASK];
  1691. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1692. CONST_BITS+PASS1_BITS+3)
  1693. & RANGE_MASK];
  1694. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1695. CONST_BITS+PASS1_BITS+3)
  1696. & RANGE_MASK];
  1697. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1698. CONST_BITS+PASS1_BITS+3)
  1699. & RANGE_MASK];
  1700. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1701. CONST_BITS+PASS1_BITS+3)
  1702. & RANGE_MASK];
  1703. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  1704. CONST_BITS+PASS1_BITS+3)
  1705. & RANGE_MASK];
  1706. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  1707. CONST_BITS+PASS1_BITS+3)
  1708. & RANGE_MASK];
  1709. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26,
  1710. CONST_BITS+PASS1_BITS+3)
  1711. & RANGE_MASK];
  1712. wsptr += 8; /* advance pointer to next row */
  1713. }
  1714. }
  1715. /*
  1716. * Perform dequantization and inverse DCT on one block of coefficients,
  1717. * producing a 14x14 output block.
  1718. *
  1719. * Optimized algorithm with 20 multiplications in the 1-D kernel.
  1720. * cK represents sqrt(2) * cos(K*pi/28).
  1721. */
  1722. GLOBAL(void)
  1723. jpeg_idct_14x14 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1724. JCOEFPTR coef_block,
  1725. JSAMPARRAY output_buf, JDIMENSION output_col)
  1726. {
  1727. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
  1728. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26;
  1729. INT32 z1, z2, z3, z4;
  1730. JCOEFPTR inptr;
  1731. ISLOW_MULT_TYPE * quantptr;
  1732. int * wsptr;
  1733. JSAMPROW outptr;
  1734. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1735. int ctr;
  1736. int workspace[8*14]; /* buffers data between passes */
  1737. SHIFT_TEMPS
  1738. /* Pass 1: process columns from input, store into work array. */
  1739. inptr = coef_block;
  1740. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1741. wsptr = workspace;
  1742. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1743. /* Even part */
  1744. z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1745. z1 <<= CONST_BITS;
  1746. /* Add fudge factor here for final descale. */
  1747. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  1748. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1749. z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */
  1750. z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */
  1751. z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */
  1752. tmp10 = z1 + z2;
  1753. tmp11 = z1 + z3;
  1754. tmp12 = z1 - z4;
  1755. tmp23 = RIGHT_SHIFT(z1 - ((z2 + z3 - z4) << 1), /* c0 = (c4+c12-c8)*2 */
  1756. CONST_BITS-PASS1_BITS);
  1757. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1758. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1759. z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */
  1760. tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */
  1761. tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */
  1762. tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */
  1763. MULTIPLY(z2, FIX(1.378756276)); /* c2 */
  1764. tmp20 = tmp10 + tmp13;
  1765. tmp26 = tmp10 - tmp13;
  1766. tmp21 = tmp11 + tmp14;
  1767. tmp25 = tmp11 - tmp14;
  1768. tmp22 = tmp12 + tmp15;
  1769. tmp24 = tmp12 - tmp15;
  1770. /* Odd part */
  1771. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1772. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1773. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1774. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1775. tmp13 = z4 << CONST_BITS;
  1776. tmp14 = z1 + z3;
  1777. tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */
  1778. tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */
  1779. tmp10 = tmp11 + tmp12 + tmp13 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */
  1780. tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */
  1781. tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */
  1782. z1 -= z2;
  1783. tmp15 = MULTIPLY(z1, FIX(0.467085129)) - tmp13; /* c11 */
  1784. tmp16 += tmp15;
  1785. z1 += z4;
  1786. z4 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - tmp13; /* -c13 */
  1787. tmp11 += z4 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */
  1788. tmp12 += z4 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */
  1789. z4 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */
  1790. tmp14 += z4 + tmp13 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */
  1791. tmp15 += z4 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */
  1792. tmp13 = (z1 - z3) << PASS1_BITS;
  1793. /* Final output stage */
  1794. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1795. wsptr[8*13] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1796. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1797. wsptr[8*12] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1798. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1799. wsptr[8*11] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1800. wsptr[8*3] = (int) (tmp23 + tmp13);
  1801. wsptr[8*10] = (int) (tmp23 - tmp13);
  1802. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1803. wsptr[8*9] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1804. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  1805. wsptr[8*8] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  1806. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS-PASS1_BITS);
  1807. wsptr[8*7] = (int) RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS-PASS1_BITS);
  1808. }
  1809. /* Pass 2: process 14 rows from work array, store into output array. */
  1810. wsptr = workspace;
  1811. for (ctr = 0; ctr < 14; ctr++) {
  1812. outptr = output_buf[ctr] + output_col;
  1813. /* Even part */
  1814. /* Add fudge factor here for final descale. */
  1815. z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  1816. z1 <<= CONST_BITS;
  1817. z4 = (INT32) wsptr[4];
  1818. z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */
  1819. z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */
  1820. z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */
  1821. tmp10 = z1 + z2;
  1822. tmp11 = z1 + z3;
  1823. tmp12 = z1 - z4;
  1824. tmp23 = z1 - ((z2 + z3 - z4) << 1); /* c0 = (c4+c12-c8)*2 */
  1825. z1 = (INT32) wsptr[2];
  1826. z2 = (INT32) wsptr[6];
  1827. z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */
  1828. tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */
  1829. tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */
  1830. tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */
  1831. MULTIPLY(z2, FIX(1.378756276)); /* c2 */
  1832. tmp20 = tmp10 + tmp13;
  1833. tmp26 = tmp10 - tmp13;
  1834. tmp21 = tmp11 + tmp14;
  1835. tmp25 = tmp11 - tmp14;
  1836. tmp22 = tmp12 + tmp15;
  1837. tmp24 = tmp12 - tmp15;
  1838. /* Odd part */
  1839. z1 = (INT32) wsptr[1];
  1840. z2 = (INT32) wsptr[3];
  1841. z3 = (INT32) wsptr[5];
  1842. z4 = (INT32) wsptr[7];
  1843. z4 <<= CONST_BITS;
  1844. tmp14 = z1 + z3;
  1845. tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */
  1846. tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */
  1847. tmp10 = tmp11 + tmp12 + z4 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */
  1848. tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */
  1849. tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */
  1850. z1 -= z2;
  1851. tmp15 = MULTIPLY(z1, FIX(0.467085129)) - z4; /* c11 */
  1852. tmp16 += tmp15;
  1853. tmp13 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - z4; /* -c13 */
  1854. tmp11 += tmp13 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */
  1855. tmp12 += tmp13 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */
  1856. tmp13 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */
  1857. tmp14 += tmp13 + z4 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */
  1858. tmp15 += tmp13 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */
  1859. tmp13 = ((z1 - z3) << CONST_BITS) + z4;
  1860. /* Final output stage */
  1861. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  1862. CONST_BITS+PASS1_BITS+3)
  1863. & RANGE_MASK];
  1864. outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  1865. CONST_BITS+PASS1_BITS+3)
  1866. & RANGE_MASK];
  1867. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  1868. CONST_BITS+PASS1_BITS+3)
  1869. & RANGE_MASK];
  1870. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  1871. CONST_BITS+PASS1_BITS+3)
  1872. & RANGE_MASK];
  1873. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  1874. CONST_BITS+PASS1_BITS+3)
  1875. & RANGE_MASK];
  1876. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  1877. CONST_BITS+PASS1_BITS+3)
  1878. & RANGE_MASK];
  1879. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  1880. CONST_BITS+PASS1_BITS+3)
  1881. & RANGE_MASK];
  1882. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  1883. CONST_BITS+PASS1_BITS+3)
  1884. & RANGE_MASK];
  1885. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  1886. CONST_BITS+PASS1_BITS+3)
  1887. & RANGE_MASK];
  1888. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  1889. CONST_BITS+PASS1_BITS+3)
  1890. & RANGE_MASK];
  1891. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  1892. CONST_BITS+PASS1_BITS+3)
  1893. & RANGE_MASK];
  1894. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  1895. CONST_BITS+PASS1_BITS+3)
  1896. & RANGE_MASK];
  1897. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp16,
  1898. CONST_BITS+PASS1_BITS+3)
  1899. & RANGE_MASK];
  1900. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp16,
  1901. CONST_BITS+PASS1_BITS+3)
  1902. & RANGE_MASK];
  1903. wsptr += 8; /* advance pointer to next row */
  1904. }
  1905. }
  1906. /*
  1907. * Perform dequantization and inverse DCT on one block of coefficients,
  1908. * producing a 15x15 output block.
  1909. *
  1910. * Optimized algorithm with 22 multiplications in the 1-D kernel.
  1911. * cK represents sqrt(2) * cos(K*pi/30).
  1912. */
  1913. GLOBAL(void)
  1914. jpeg_idct_15x15 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  1915. JCOEFPTR coef_block,
  1916. JSAMPARRAY output_buf, JDIMENSION output_col)
  1917. {
  1918. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
  1919. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27;
  1920. INT32 z1, z2, z3, z4;
  1921. JCOEFPTR inptr;
  1922. ISLOW_MULT_TYPE * quantptr;
  1923. int * wsptr;
  1924. JSAMPROW outptr;
  1925. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  1926. int ctr;
  1927. int workspace[8*15]; /* buffers data between passes */
  1928. SHIFT_TEMPS
  1929. /* Pass 1: process columns from input, store into work array. */
  1930. inptr = coef_block;
  1931. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  1932. wsptr = workspace;
  1933. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  1934. /* Even part */
  1935. z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  1936. z1 <<= CONST_BITS;
  1937. /* Add fudge factor here for final descale. */
  1938. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  1939. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  1940. z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  1941. z4 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  1942. tmp10 = MULTIPLY(z4, FIX(0.437016024)); /* c12 */
  1943. tmp11 = MULTIPLY(z4, FIX(1.144122806)); /* c6 */
  1944. tmp12 = z1 - tmp10;
  1945. tmp13 = z1 + tmp11;
  1946. z1 -= (tmp11 - tmp10) << 1; /* c0 = (c6-c12)*2 */
  1947. z4 = z2 - z3;
  1948. z3 += z2;
  1949. tmp10 = MULTIPLY(z3, FIX(1.337628990)); /* (c2+c4)/2 */
  1950. tmp11 = MULTIPLY(z4, FIX(0.045680613)); /* (c2-c4)/2 */
  1951. z2 = MULTIPLY(z2, FIX(1.439773946)); /* c4+c14 */
  1952. tmp20 = tmp13 + tmp10 + tmp11;
  1953. tmp23 = tmp12 - tmp10 + tmp11 + z2;
  1954. tmp10 = MULTIPLY(z3, FIX(0.547059574)); /* (c8+c14)/2 */
  1955. tmp11 = MULTIPLY(z4, FIX(0.399234004)); /* (c8-c14)/2 */
  1956. tmp25 = tmp13 - tmp10 - tmp11;
  1957. tmp26 = tmp12 + tmp10 - tmp11 - z2;
  1958. tmp10 = MULTIPLY(z3, FIX(0.790569415)); /* (c6+c12)/2 */
  1959. tmp11 = MULTIPLY(z4, FIX(0.353553391)); /* (c6-c12)/2 */
  1960. tmp21 = tmp12 + tmp10 + tmp11;
  1961. tmp24 = tmp13 - tmp10 + tmp11;
  1962. tmp11 += tmp11;
  1963. tmp22 = z1 + tmp11; /* c10 = c6-c12 */
  1964. tmp27 = z1 - tmp11 - tmp11; /* c0 = (c6-c12)*2 */
  1965. /* Odd part */
  1966. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  1967. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  1968. z4 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  1969. z3 = MULTIPLY(z4, FIX(1.224744871)); /* c5 */
  1970. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  1971. tmp13 = z2 - z4;
  1972. tmp15 = MULTIPLY(z1 + tmp13, FIX(0.831253876)); /* c9 */
  1973. tmp11 = tmp15 + MULTIPLY(z1, FIX(0.513743148)); /* c3-c9 */
  1974. tmp14 = tmp15 - MULTIPLY(tmp13, FIX(2.176250899)); /* c3+c9 */
  1975. tmp13 = MULTIPLY(z2, - FIX(0.831253876)); /* -c9 */
  1976. tmp15 = MULTIPLY(z2, - FIX(1.344997024)); /* -c3 */
  1977. z2 = z1 - z4;
  1978. tmp12 = z3 + MULTIPLY(z2, FIX(1.406466353)); /* c1 */
  1979. tmp10 = tmp12 + MULTIPLY(z4, FIX(2.457431844)) - tmp15; /* c1+c7 */
  1980. tmp16 = tmp12 - MULTIPLY(z1, FIX(1.112434820)) + tmp13; /* c1-c13 */
  1981. tmp12 = MULTIPLY(z2, FIX(1.224744871)) - z3; /* c5 */
  1982. z2 = MULTIPLY(z1 + z4, FIX(0.575212477)); /* c11 */
  1983. tmp13 += z2 + MULTIPLY(z1, FIX(0.475753014)) - z3; /* c7-c11 */
  1984. tmp15 += z2 - MULTIPLY(z4, FIX(0.869244010)) + z3; /* c11+c13 */
  1985. /* Final output stage */
  1986. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  1987. wsptr[8*14] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  1988. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  1989. wsptr[8*13] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  1990. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  1991. wsptr[8*12] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  1992. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  1993. wsptr[8*11] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  1994. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  1995. wsptr[8*10] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  1996. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  1997. wsptr[8*9] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  1998. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS-PASS1_BITS);
  1999. wsptr[8*8] = (int) RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS-PASS1_BITS);
  2000. wsptr[8*7] = (int) RIGHT_SHIFT(tmp27, CONST_BITS-PASS1_BITS);
  2001. }
  2002. /* Pass 2: process 15 rows from work array, store into output array. */
  2003. wsptr = workspace;
  2004. for (ctr = 0; ctr < 15; ctr++) {
  2005. outptr = output_buf[ctr] + output_col;
  2006. /* Even part */
  2007. /* Add fudge factor here for final descale. */
  2008. z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  2009. z1 <<= CONST_BITS;
  2010. z2 = (INT32) wsptr[2];
  2011. z3 = (INT32) wsptr[4];
  2012. z4 = (INT32) wsptr[6];
  2013. tmp10 = MULTIPLY(z4, FIX(0.437016024)); /* c12 */
  2014. tmp11 = MULTIPLY(z4, FIX(1.144122806)); /* c6 */
  2015. tmp12 = z1 - tmp10;
  2016. tmp13 = z1 + tmp11;
  2017. z1 -= (tmp11 - tmp10) << 1; /* c0 = (c6-c12)*2 */
  2018. z4 = z2 - z3;
  2019. z3 += z2;
  2020. tmp10 = MULTIPLY(z3, FIX(1.337628990)); /* (c2+c4)/2 */
  2021. tmp11 = MULTIPLY(z4, FIX(0.045680613)); /* (c2-c4)/2 */
  2022. z2 = MULTIPLY(z2, FIX(1.439773946)); /* c4+c14 */
  2023. tmp20 = tmp13 + tmp10 + tmp11;
  2024. tmp23 = tmp12 - tmp10 + tmp11 + z2;
  2025. tmp10 = MULTIPLY(z3, FIX(0.547059574)); /* (c8+c14)/2 */
  2026. tmp11 = MULTIPLY(z4, FIX(0.399234004)); /* (c8-c14)/2 */
  2027. tmp25 = tmp13 - tmp10 - tmp11;
  2028. tmp26 = tmp12 + tmp10 - tmp11 - z2;
  2029. tmp10 = MULTIPLY(z3, FIX(0.790569415)); /* (c6+c12)/2 */
  2030. tmp11 = MULTIPLY(z4, FIX(0.353553391)); /* (c6-c12)/2 */
  2031. tmp21 = tmp12 + tmp10 + tmp11;
  2032. tmp24 = tmp13 - tmp10 + tmp11;
  2033. tmp11 += tmp11;
  2034. tmp22 = z1 + tmp11; /* c10 = c6-c12 */
  2035. tmp27 = z1 - tmp11 - tmp11; /* c0 = (c6-c12)*2 */
  2036. /* Odd part */
  2037. z1 = (INT32) wsptr[1];
  2038. z2 = (INT32) wsptr[3];
  2039. z4 = (INT32) wsptr[5];
  2040. z3 = MULTIPLY(z4, FIX(1.224744871)); /* c5 */
  2041. z4 = (INT32) wsptr[7];
  2042. tmp13 = z2 - z4;
  2043. tmp15 = MULTIPLY(z1 + tmp13, FIX(0.831253876)); /* c9 */
  2044. tmp11 = tmp15 + MULTIPLY(z1, FIX(0.513743148)); /* c3-c9 */
  2045. tmp14 = tmp15 - MULTIPLY(tmp13, FIX(2.176250899)); /* c3+c9 */
  2046. tmp13 = MULTIPLY(z2, - FIX(0.831253876)); /* -c9 */
  2047. tmp15 = MULTIPLY(z2, - FIX(1.344997024)); /* -c3 */
  2048. z2 = z1 - z4;
  2049. tmp12 = z3 + MULTIPLY(z2, FIX(1.406466353)); /* c1 */
  2050. tmp10 = tmp12 + MULTIPLY(z4, FIX(2.457431844)) - tmp15; /* c1+c7 */
  2051. tmp16 = tmp12 - MULTIPLY(z1, FIX(1.112434820)) + tmp13; /* c1-c13 */
  2052. tmp12 = MULTIPLY(z2, FIX(1.224744871)) - z3; /* c5 */
  2053. z2 = MULTIPLY(z1 + z4, FIX(0.575212477)); /* c11 */
  2054. tmp13 += z2 + MULTIPLY(z1, FIX(0.475753014)) - z3; /* c7-c11 */
  2055. tmp15 += z2 - MULTIPLY(z4, FIX(0.869244010)) + z3; /* c11+c13 */
  2056. /* Final output stage */
  2057. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  2058. CONST_BITS+PASS1_BITS+3)
  2059. & RANGE_MASK];
  2060. outptr[14] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  2061. CONST_BITS+PASS1_BITS+3)
  2062. & RANGE_MASK];
  2063. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  2064. CONST_BITS+PASS1_BITS+3)
  2065. & RANGE_MASK];
  2066. outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  2067. CONST_BITS+PASS1_BITS+3)
  2068. & RANGE_MASK];
  2069. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  2070. CONST_BITS+PASS1_BITS+3)
  2071. & RANGE_MASK];
  2072. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  2073. CONST_BITS+PASS1_BITS+3)
  2074. & RANGE_MASK];
  2075. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  2076. CONST_BITS+PASS1_BITS+3)
  2077. & RANGE_MASK];
  2078. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  2079. CONST_BITS+PASS1_BITS+3)
  2080. & RANGE_MASK];
  2081. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  2082. CONST_BITS+PASS1_BITS+3)
  2083. & RANGE_MASK];
  2084. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  2085. CONST_BITS+PASS1_BITS+3)
  2086. & RANGE_MASK];
  2087. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  2088. CONST_BITS+PASS1_BITS+3)
  2089. & RANGE_MASK];
  2090. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  2091. CONST_BITS+PASS1_BITS+3)
  2092. & RANGE_MASK];
  2093. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp16,
  2094. CONST_BITS+PASS1_BITS+3)
  2095. & RANGE_MASK];
  2096. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp16,
  2097. CONST_BITS+PASS1_BITS+3)
  2098. & RANGE_MASK];
  2099. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp27,
  2100. CONST_BITS+PASS1_BITS+3)
  2101. & RANGE_MASK];
  2102. wsptr += 8; /* advance pointer to next row */
  2103. }
  2104. }
  2105. /*
  2106. * Perform dequantization and inverse DCT on one block of coefficients,
  2107. * producing a 16x16 output block.
  2108. *
  2109. * Optimized algorithm with 28 multiplications in the 1-D kernel.
  2110. * cK represents sqrt(2) * cos(K*pi/32).
  2111. */
  2112. GLOBAL(void)
  2113. jpeg_idct_16x16 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  2114. JCOEFPTR coef_block,
  2115. JSAMPARRAY output_buf, JDIMENSION output_col)
  2116. {
  2117. INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13;
  2118. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27;
  2119. INT32 z1, z2, z3, z4;
  2120. JCOEFPTR inptr;
  2121. ISLOW_MULT_TYPE * quantptr;
  2122. int * wsptr;
  2123. JSAMPROW outptr;
  2124. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  2125. int ctr;
  2126. int workspace[8*16]; /* buffers data between passes */
  2127. SHIFT_TEMPS
  2128. /* Pass 1: process columns from input, store into work array. */
  2129. inptr = coef_block;
  2130. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  2131. wsptr = workspace;
  2132. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  2133. /* Even part */
  2134. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  2135. tmp0 <<= CONST_BITS;
  2136. /* Add fudge factor here for final descale. */
  2137. tmp0 += 1 << (CONST_BITS-PASS1_BITS-1);
  2138. z1 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  2139. tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */
  2140. tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */
  2141. tmp10 = tmp0 + tmp1;
  2142. tmp11 = tmp0 - tmp1;
  2143. tmp12 = tmp0 + tmp2;
  2144. tmp13 = tmp0 - tmp2;
  2145. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  2146. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  2147. z3 = z1 - z2;
  2148. z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */
  2149. z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */
  2150. tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */
  2151. tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */
  2152. tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */
  2153. tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */
  2154. tmp20 = tmp10 + tmp0;
  2155. tmp27 = tmp10 - tmp0;
  2156. tmp21 = tmp12 + tmp1;
  2157. tmp26 = tmp12 - tmp1;
  2158. tmp22 = tmp13 + tmp2;
  2159. tmp25 = tmp13 - tmp2;
  2160. tmp23 = tmp11 + tmp3;
  2161. tmp24 = tmp11 - tmp3;
  2162. /* Odd part */
  2163. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  2164. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  2165. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  2166. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  2167. tmp11 = z1 + z3;
  2168. tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */
  2169. tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */
  2170. tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */
  2171. tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */
  2172. tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */
  2173. tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */
  2174. tmp0 = tmp1 + tmp2 + tmp3 -
  2175. MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */
  2176. tmp13 = tmp10 + tmp11 + tmp12 -
  2177. MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */
  2178. z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */
  2179. tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */
  2180. tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */
  2181. z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */
  2182. tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */
  2183. tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */
  2184. z2 += z4;
  2185. z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */
  2186. tmp1 += z1;
  2187. tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */
  2188. z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */
  2189. tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */
  2190. tmp12 += z2;
  2191. z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */
  2192. tmp2 += z2;
  2193. tmp3 += z2;
  2194. z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */
  2195. tmp10 += z2;
  2196. tmp11 += z2;
  2197. /* Final output stage */
  2198. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp0, CONST_BITS-PASS1_BITS);
  2199. wsptr[8*15] = (int) RIGHT_SHIFT(tmp20 - tmp0, CONST_BITS-PASS1_BITS);
  2200. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp1, CONST_BITS-PASS1_BITS);
  2201. wsptr[8*14] = (int) RIGHT_SHIFT(tmp21 - tmp1, CONST_BITS-PASS1_BITS);
  2202. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp2, CONST_BITS-PASS1_BITS);
  2203. wsptr[8*13] = (int) RIGHT_SHIFT(tmp22 - tmp2, CONST_BITS-PASS1_BITS);
  2204. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp3, CONST_BITS-PASS1_BITS);
  2205. wsptr[8*12] = (int) RIGHT_SHIFT(tmp23 - tmp3, CONST_BITS-PASS1_BITS);
  2206. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp10, CONST_BITS-PASS1_BITS);
  2207. wsptr[8*11] = (int) RIGHT_SHIFT(tmp24 - tmp10, CONST_BITS-PASS1_BITS);
  2208. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp11, CONST_BITS-PASS1_BITS);
  2209. wsptr[8*10] = (int) RIGHT_SHIFT(tmp25 - tmp11, CONST_BITS-PASS1_BITS);
  2210. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp12, CONST_BITS-PASS1_BITS);
  2211. wsptr[8*9] = (int) RIGHT_SHIFT(tmp26 - tmp12, CONST_BITS-PASS1_BITS);
  2212. wsptr[8*7] = (int) RIGHT_SHIFT(tmp27 + tmp13, CONST_BITS-PASS1_BITS);
  2213. wsptr[8*8] = (int) RIGHT_SHIFT(tmp27 - tmp13, CONST_BITS-PASS1_BITS);
  2214. }
  2215. /* Pass 2: process 16 rows from work array, store into output array. */
  2216. wsptr = workspace;
  2217. for (ctr = 0; ctr < 16; ctr++) {
  2218. outptr = output_buf[ctr] + output_col;
  2219. /* Even part */
  2220. /* Add fudge factor here for final descale. */
  2221. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  2222. tmp0 <<= CONST_BITS;
  2223. z1 = (INT32) wsptr[4];
  2224. tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */
  2225. tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */
  2226. tmp10 = tmp0 + tmp1;
  2227. tmp11 = tmp0 - tmp1;
  2228. tmp12 = tmp0 + tmp2;
  2229. tmp13 = tmp0 - tmp2;
  2230. z1 = (INT32) wsptr[2];
  2231. z2 = (INT32) wsptr[6];
  2232. z3 = z1 - z2;
  2233. z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */
  2234. z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */
  2235. tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */
  2236. tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */
  2237. tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */
  2238. tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */
  2239. tmp20 = tmp10 + tmp0;
  2240. tmp27 = tmp10 - tmp0;
  2241. tmp21 = tmp12 + tmp1;
  2242. tmp26 = tmp12 - tmp1;
  2243. tmp22 = tmp13 + tmp2;
  2244. tmp25 = tmp13 - tmp2;
  2245. tmp23 = tmp11 + tmp3;
  2246. tmp24 = tmp11 - tmp3;
  2247. /* Odd part */
  2248. z1 = (INT32) wsptr[1];
  2249. z2 = (INT32) wsptr[3];
  2250. z3 = (INT32) wsptr[5];
  2251. z4 = (INT32) wsptr[7];
  2252. tmp11 = z1 + z3;
  2253. tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */
  2254. tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */
  2255. tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */
  2256. tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */
  2257. tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */
  2258. tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */
  2259. tmp0 = tmp1 + tmp2 + tmp3 -
  2260. MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */
  2261. tmp13 = tmp10 + tmp11 + tmp12 -
  2262. MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */
  2263. z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */
  2264. tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */
  2265. tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */
  2266. z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */
  2267. tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */
  2268. tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */
  2269. z2 += z4;
  2270. z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */
  2271. tmp1 += z1;
  2272. tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */
  2273. z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */
  2274. tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */
  2275. tmp12 += z2;
  2276. z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */
  2277. tmp2 += z2;
  2278. tmp3 += z2;
  2279. z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */
  2280. tmp10 += z2;
  2281. tmp11 += z2;
  2282. /* Final output stage */
  2283. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp0,
  2284. CONST_BITS+PASS1_BITS+3)
  2285. & RANGE_MASK];
  2286. outptr[15] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp0,
  2287. CONST_BITS+PASS1_BITS+3)
  2288. & RANGE_MASK];
  2289. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp1,
  2290. CONST_BITS+PASS1_BITS+3)
  2291. & RANGE_MASK];
  2292. outptr[14] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp1,
  2293. CONST_BITS+PASS1_BITS+3)
  2294. & RANGE_MASK];
  2295. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp2,
  2296. CONST_BITS+PASS1_BITS+3)
  2297. & RANGE_MASK];
  2298. outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp2,
  2299. CONST_BITS+PASS1_BITS+3)
  2300. & RANGE_MASK];
  2301. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp3,
  2302. CONST_BITS+PASS1_BITS+3)
  2303. & RANGE_MASK];
  2304. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp3,
  2305. CONST_BITS+PASS1_BITS+3)
  2306. & RANGE_MASK];
  2307. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp10,
  2308. CONST_BITS+PASS1_BITS+3)
  2309. & RANGE_MASK];
  2310. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp10,
  2311. CONST_BITS+PASS1_BITS+3)
  2312. & RANGE_MASK];
  2313. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp11,
  2314. CONST_BITS+PASS1_BITS+3)
  2315. & RANGE_MASK];
  2316. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp11,
  2317. CONST_BITS+PASS1_BITS+3)
  2318. & RANGE_MASK];
  2319. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp12,
  2320. CONST_BITS+PASS1_BITS+3)
  2321. & RANGE_MASK];
  2322. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp12,
  2323. CONST_BITS+PASS1_BITS+3)
  2324. & RANGE_MASK];
  2325. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp27 + tmp13,
  2326. CONST_BITS+PASS1_BITS+3)
  2327. & RANGE_MASK];
  2328. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp27 - tmp13,
  2329. CONST_BITS+PASS1_BITS+3)
  2330. & RANGE_MASK];
  2331. wsptr += 8; /* advance pointer to next row */
  2332. }
  2333. }
  2334. /*
  2335. * Perform dequantization and inverse DCT on one block of coefficients,
  2336. * producing a 16x8 output block.
  2337. *
  2338. * 8-point IDCT in pass 1 (columns), 16-point in pass 2 (rows).
  2339. */
  2340. GLOBAL(void)
  2341. jpeg_idct_16x8 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  2342. JCOEFPTR coef_block,
  2343. JSAMPARRAY output_buf, JDIMENSION output_col)
  2344. {
  2345. INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13;
  2346. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27;
  2347. INT32 z1, z2, z3, z4;
  2348. JCOEFPTR inptr;
  2349. ISLOW_MULT_TYPE * quantptr;
  2350. int * wsptr;
  2351. JSAMPROW outptr;
  2352. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  2353. int ctr;
  2354. int workspace[8*8]; /* buffers data between passes */
  2355. SHIFT_TEMPS
  2356. /* Pass 1: process columns from input, store into work array. */
  2357. /* Note results are scaled up by sqrt(8) compared to a true IDCT; */
  2358. /* furthermore, we scale the results by 2**PASS1_BITS. */
  2359. inptr = coef_block;
  2360. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  2361. wsptr = workspace;
  2362. for (ctr = DCTSIZE; ctr > 0; ctr--) {
  2363. /* Due to quantization, we will usually find that many of the input
  2364. * coefficients are zero, especially the AC terms. We can exploit this
  2365. * by short-circuiting the IDCT calculation for any column in which all
  2366. * the AC terms are zero. In that case each output is equal to the
  2367. * DC coefficient (with scale factor as needed).
  2368. * With typical images and quantization tables, half or more of the
  2369. * column DCT calculations can be simplified this way.
  2370. */
  2371. if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 &&
  2372. inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 &&
  2373. inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 &&
  2374. inptr[DCTSIZE*7] == 0) {
  2375. /* AC terms all zero */
  2376. int dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) << PASS1_BITS;
  2377. wsptr[DCTSIZE*0] = dcval;
  2378. wsptr[DCTSIZE*1] = dcval;
  2379. wsptr[DCTSIZE*2] = dcval;
  2380. wsptr[DCTSIZE*3] = dcval;
  2381. wsptr[DCTSIZE*4] = dcval;
  2382. wsptr[DCTSIZE*5] = dcval;
  2383. wsptr[DCTSIZE*6] = dcval;
  2384. wsptr[DCTSIZE*7] = dcval;
  2385. inptr++; /* advance pointers to next column */
  2386. quantptr++;
  2387. wsptr++;
  2388. continue;
  2389. }
  2390. /* Even part: reverse the even part of the forward DCT. */
  2391. /* The rotator is sqrt(2)*c(-6). */
  2392. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  2393. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  2394. z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
  2395. tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865);
  2396. tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065);
  2397. z2 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  2398. z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  2399. z2 <<= CONST_BITS;
  2400. z3 <<= CONST_BITS;
  2401. /* Add fudge factor here for final descale. */
  2402. z2 += ONE << (CONST_BITS-PASS1_BITS-1);
  2403. tmp0 = z2 + z3;
  2404. tmp1 = z2 - z3;
  2405. tmp10 = tmp0 + tmp2;
  2406. tmp13 = tmp0 - tmp2;
  2407. tmp11 = tmp1 + tmp3;
  2408. tmp12 = tmp1 - tmp3;
  2409. /* Odd part per figure 8; the matrix is unitary and hence its
  2410. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  2411. */
  2412. tmp0 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  2413. tmp1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  2414. tmp2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  2415. tmp3 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  2416. z2 = tmp0 + tmp2;
  2417. z3 = tmp1 + tmp3;
  2418. z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* sqrt(2) * c3 */
  2419. z2 = MULTIPLY(z2, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
  2420. z3 = MULTIPLY(z3, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
  2421. z2 += z1;
  2422. z3 += z1;
  2423. z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
  2424. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
  2425. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
  2426. tmp0 += z1 + z2;
  2427. tmp3 += z1 + z3;
  2428. z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
  2429. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
  2430. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
  2431. tmp1 += z1 + z3;
  2432. tmp2 += z1 + z2;
  2433. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  2434. wsptr[DCTSIZE*0] = (int) RIGHT_SHIFT(tmp10 + tmp3, CONST_BITS-PASS1_BITS);
  2435. wsptr[DCTSIZE*7] = (int) RIGHT_SHIFT(tmp10 - tmp3, CONST_BITS-PASS1_BITS);
  2436. wsptr[DCTSIZE*1] = (int) RIGHT_SHIFT(tmp11 + tmp2, CONST_BITS-PASS1_BITS);
  2437. wsptr[DCTSIZE*6] = (int) RIGHT_SHIFT(tmp11 - tmp2, CONST_BITS-PASS1_BITS);
  2438. wsptr[DCTSIZE*2] = (int) RIGHT_SHIFT(tmp12 + tmp1, CONST_BITS-PASS1_BITS);
  2439. wsptr[DCTSIZE*5] = (int) RIGHT_SHIFT(tmp12 - tmp1, CONST_BITS-PASS1_BITS);
  2440. wsptr[DCTSIZE*3] = (int) RIGHT_SHIFT(tmp13 + tmp0, CONST_BITS-PASS1_BITS);
  2441. wsptr[DCTSIZE*4] = (int) RIGHT_SHIFT(tmp13 - tmp0, CONST_BITS-PASS1_BITS);
  2442. inptr++; /* advance pointers to next column */
  2443. quantptr++;
  2444. wsptr++;
  2445. }
  2446. /* Pass 2: process 8 rows from work array, store into output array.
  2447. * 16-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/32).
  2448. */
  2449. wsptr = workspace;
  2450. for (ctr = 0; ctr < 8; ctr++) {
  2451. outptr = output_buf[ctr] + output_col;
  2452. /* Even part */
  2453. /* Add fudge factor here for final descale. */
  2454. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  2455. tmp0 <<= CONST_BITS;
  2456. z1 = (INT32) wsptr[4];
  2457. tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */
  2458. tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */
  2459. tmp10 = tmp0 + tmp1;
  2460. tmp11 = tmp0 - tmp1;
  2461. tmp12 = tmp0 + tmp2;
  2462. tmp13 = tmp0 - tmp2;
  2463. z1 = (INT32) wsptr[2];
  2464. z2 = (INT32) wsptr[6];
  2465. z3 = z1 - z2;
  2466. z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */
  2467. z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */
  2468. tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */
  2469. tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */
  2470. tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */
  2471. tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */
  2472. tmp20 = tmp10 + tmp0;
  2473. tmp27 = tmp10 - tmp0;
  2474. tmp21 = tmp12 + tmp1;
  2475. tmp26 = tmp12 - tmp1;
  2476. tmp22 = tmp13 + tmp2;
  2477. tmp25 = tmp13 - tmp2;
  2478. tmp23 = tmp11 + tmp3;
  2479. tmp24 = tmp11 - tmp3;
  2480. /* Odd part */
  2481. z1 = (INT32) wsptr[1];
  2482. z2 = (INT32) wsptr[3];
  2483. z3 = (INT32) wsptr[5];
  2484. z4 = (INT32) wsptr[7];
  2485. tmp11 = z1 + z3;
  2486. tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */
  2487. tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */
  2488. tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */
  2489. tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */
  2490. tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */
  2491. tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */
  2492. tmp0 = tmp1 + tmp2 + tmp3 -
  2493. MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */
  2494. tmp13 = tmp10 + tmp11 + tmp12 -
  2495. MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */
  2496. z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */
  2497. tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */
  2498. tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */
  2499. z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */
  2500. tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */
  2501. tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */
  2502. z2 += z4;
  2503. z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */
  2504. tmp1 += z1;
  2505. tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */
  2506. z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */
  2507. tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */
  2508. tmp12 += z2;
  2509. z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */
  2510. tmp2 += z2;
  2511. tmp3 += z2;
  2512. z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */
  2513. tmp10 += z2;
  2514. tmp11 += z2;
  2515. /* Final output stage */
  2516. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp0,
  2517. CONST_BITS+PASS1_BITS+3)
  2518. & RANGE_MASK];
  2519. outptr[15] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp0,
  2520. CONST_BITS+PASS1_BITS+3)
  2521. & RANGE_MASK];
  2522. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp1,
  2523. CONST_BITS+PASS1_BITS+3)
  2524. & RANGE_MASK];
  2525. outptr[14] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp1,
  2526. CONST_BITS+PASS1_BITS+3)
  2527. & RANGE_MASK];
  2528. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp2,
  2529. CONST_BITS+PASS1_BITS+3)
  2530. & RANGE_MASK];
  2531. outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp2,
  2532. CONST_BITS+PASS1_BITS+3)
  2533. & RANGE_MASK];
  2534. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp3,
  2535. CONST_BITS+PASS1_BITS+3)
  2536. & RANGE_MASK];
  2537. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp3,
  2538. CONST_BITS+PASS1_BITS+3)
  2539. & RANGE_MASK];
  2540. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp10,
  2541. CONST_BITS+PASS1_BITS+3)
  2542. & RANGE_MASK];
  2543. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp10,
  2544. CONST_BITS+PASS1_BITS+3)
  2545. & RANGE_MASK];
  2546. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp11,
  2547. CONST_BITS+PASS1_BITS+3)
  2548. & RANGE_MASK];
  2549. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp11,
  2550. CONST_BITS+PASS1_BITS+3)
  2551. & RANGE_MASK];
  2552. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp12,
  2553. CONST_BITS+PASS1_BITS+3)
  2554. & RANGE_MASK];
  2555. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp12,
  2556. CONST_BITS+PASS1_BITS+3)
  2557. & RANGE_MASK];
  2558. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp27 + tmp13,
  2559. CONST_BITS+PASS1_BITS+3)
  2560. & RANGE_MASK];
  2561. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp27 - tmp13,
  2562. CONST_BITS+PASS1_BITS+3)
  2563. & RANGE_MASK];
  2564. wsptr += 8; /* advance pointer to next row */
  2565. }
  2566. }
  2567. /*
  2568. * Perform dequantization and inverse DCT on one block of coefficients,
  2569. * producing a 14x7 output block.
  2570. *
  2571. * 7-point IDCT in pass 1 (columns), 14-point in pass 2 (rows).
  2572. */
  2573. GLOBAL(void)
  2574. jpeg_idct_14x7 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  2575. JCOEFPTR coef_block,
  2576. JSAMPARRAY output_buf, JDIMENSION output_col)
  2577. {
  2578. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
  2579. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26;
  2580. INT32 z1, z2, z3, z4;
  2581. JCOEFPTR inptr;
  2582. ISLOW_MULT_TYPE * quantptr;
  2583. int * wsptr;
  2584. JSAMPROW outptr;
  2585. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  2586. int ctr;
  2587. int workspace[8*7]; /* buffers data between passes */
  2588. SHIFT_TEMPS
  2589. /* Pass 1: process columns from input, store into work array.
  2590. * 7-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/14).
  2591. */
  2592. inptr = coef_block;
  2593. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  2594. wsptr = workspace;
  2595. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  2596. /* Even part */
  2597. tmp23 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  2598. tmp23 <<= CONST_BITS;
  2599. /* Add fudge factor here for final descale. */
  2600. tmp23 += ONE << (CONST_BITS-PASS1_BITS-1);
  2601. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  2602. z2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  2603. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  2604. tmp20 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */
  2605. tmp22 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */
  2606. tmp21 = tmp20 + tmp22 + tmp23 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */
  2607. tmp10 = z1 + z3;
  2608. z2 -= tmp10;
  2609. tmp10 = MULTIPLY(tmp10, FIX(1.274162392)) + tmp23; /* c2 */
  2610. tmp20 += tmp10 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */
  2611. tmp22 += tmp10 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */
  2612. tmp23 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */
  2613. /* Odd part */
  2614. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  2615. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  2616. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  2617. tmp11 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */
  2618. tmp12 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */
  2619. tmp10 = tmp11 - tmp12;
  2620. tmp11 += tmp12;
  2621. tmp12 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */
  2622. tmp11 += tmp12;
  2623. z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */
  2624. tmp10 += z2;
  2625. tmp12 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */
  2626. /* Final output stage */
  2627. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  2628. wsptr[8*6] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  2629. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  2630. wsptr[8*5] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  2631. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  2632. wsptr[8*4] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  2633. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23, CONST_BITS-PASS1_BITS);
  2634. }
  2635. /* Pass 2: process 7 rows from work array, store into output array.
  2636. * 14-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/28).
  2637. */
  2638. wsptr = workspace;
  2639. for (ctr = 0; ctr < 7; ctr++) {
  2640. outptr = output_buf[ctr] + output_col;
  2641. /* Even part */
  2642. /* Add fudge factor here for final descale. */
  2643. z1 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  2644. z1 <<= CONST_BITS;
  2645. z4 = (INT32) wsptr[4];
  2646. z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */
  2647. z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */
  2648. z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */
  2649. tmp10 = z1 + z2;
  2650. tmp11 = z1 + z3;
  2651. tmp12 = z1 - z4;
  2652. tmp23 = z1 - ((z2 + z3 - z4) << 1); /* c0 = (c4+c12-c8)*2 */
  2653. z1 = (INT32) wsptr[2];
  2654. z2 = (INT32) wsptr[6];
  2655. z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */
  2656. tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */
  2657. tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */
  2658. tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */
  2659. MULTIPLY(z2, FIX(1.378756276)); /* c2 */
  2660. tmp20 = tmp10 + tmp13;
  2661. tmp26 = tmp10 - tmp13;
  2662. tmp21 = tmp11 + tmp14;
  2663. tmp25 = tmp11 - tmp14;
  2664. tmp22 = tmp12 + tmp15;
  2665. tmp24 = tmp12 - tmp15;
  2666. /* Odd part */
  2667. z1 = (INT32) wsptr[1];
  2668. z2 = (INT32) wsptr[3];
  2669. z3 = (INT32) wsptr[5];
  2670. z4 = (INT32) wsptr[7];
  2671. z4 <<= CONST_BITS;
  2672. tmp14 = z1 + z3;
  2673. tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */
  2674. tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */
  2675. tmp10 = tmp11 + tmp12 + z4 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */
  2676. tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */
  2677. tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */
  2678. z1 -= z2;
  2679. tmp15 = MULTIPLY(z1, FIX(0.467085129)) - z4; /* c11 */
  2680. tmp16 += tmp15;
  2681. tmp13 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - z4; /* -c13 */
  2682. tmp11 += tmp13 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */
  2683. tmp12 += tmp13 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */
  2684. tmp13 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */
  2685. tmp14 += tmp13 + z4 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */
  2686. tmp15 += tmp13 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */
  2687. tmp13 = ((z1 - z3) << CONST_BITS) + z4;
  2688. /* Final output stage */
  2689. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  2690. CONST_BITS+PASS1_BITS+3)
  2691. & RANGE_MASK];
  2692. outptr[13] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  2693. CONST_BITS+PASS1_BITS+3)
  2694. & RANGE_MASK];
  2695. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  2696. CONST_BITS+PASS1_BITS+3)
  2697. & RANGE_MASK];
  2698. outptr[12] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  2699. CONST_BITS+PASS1_BITS+3)
  2700. & RANGE_MASK];
  2701. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  2702. CONST_BITS+PASS1_BITS+3)
  2703. & RANGE_MASK];
  2704. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  2705. CONST_BITS+PASS1_BITS+3)
  2706. & RANGE_MASK];
  2707. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  2708. CONST_BITS+PASS1_BITS+3)
  2709. & RANGE_MASK];
  2710. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  2711. CONST_BITS+PASS1_BITS+3)
  2712. & RANGE_MASK];
  2713. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  2714. CONST_BITS+PASS1_BITS+3)
  2715. & RANGE_MASK];
  2716. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  2717. CONST_BITS+PASS1_BITS+3)
  2718. & RANGE_MASK];
  2719. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  2720. CONST_BITS+PASS1_BITS+3)
  2721. & RANGE_MASK];
  2722. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  2723. CONST_BITS+PASS1_BITS+3)
  2724. & RANGE_MASK];
  2725. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp26 + tmp16,
  2726. CONST_BITS+PASS1_BITS+3)
  2727. & RANGE_MASK];
  2728. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp26 - tmp16,
  2729. CONST_BITS+PASS1_BITS+3)
  2730. & RANGE_MASK];
  2731. wsptr += 8; /* advance pointer to next row */
  2732. }
  2733. }
  2734. /*
  2735. * Perform dequantization and inverse DCT on one block of coefficients,
  2736. * producing a 12x6 output block.
  2737. *
  2738. * 6-point IDCT in pass 1 (columns), 12-point in pass 2 (rows).
  2739. */
  2740. GLOBAL(void)
  2741. jpeg_idct_12x6 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  2742. JCOEFPTR coef_block,
  2743. JSAMPARRAY output_buf, JDIMENSION output_col)
  2744. {
  2745. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
  2746. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25;
  2747. INT32 z1, z2, z3, z4;
  2748. JCOEFPTR inptr;
  2749. ISLOW_MULT_TYPE * quantptr;
  2750. int * wsptr;
  2751. JSAMPROW outptr;
  2752. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  2753. int ctr;
  2754. int workspace[8*6]; /* buffers data between passes */
  2755. SHIFT_TEMPS
  2756. /* Pass 1: process columns from input, store into work array.
  2757. * 6-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/12).
  2758. */
  2759. inptr = coef_block;
  2760. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  2761. wsptr = workspace;
  2762. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  2763. /* Even part */
  2764. tmp10 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  2765. tmp10 <<= CONST_BITS;
  2766. /* Add fudge factor here for final descale. */
  2767. tmp10 += ONE << (CONST_BITS-PASS1_BITS-1);
  2768. tmp12 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  2769. tmp20 = MULTIPLY(tmp12, FIX(0.707106781)); /* c4 */
  2770. tmp11 = tmp10 + tmp20;
  2771. tmp21 = RIGHT_SHIFT(tmp10 - tmp20 - tmp20, CONST_BITS-PASS1_BITS);
  2772. tmp20 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  2773. tmp10 = MULTIPLY(tmp20, FIX(1.224744871)); /* c2 */
  2774. tmp20 = tmp11 + tmp10;
  2775. tmp22 = tmp11 - tmp10;
  2776. /* Odd part */
  2777. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  2778. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  2779. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  2780. tmp11 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  2781. tmp10 = tmp11 + ((z1 + z2) << CONST_BITS);
  2782. tmp12 = tmp11 + ((z3 - z2) << CONST_BITS);
  2783. tmp11 = (z1 - z2 - z3) << PASS1_BITS;
  2784. /* Final output stage */
  2785. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  2786. wsptr[8*5] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  2787. wsptr[8*1] = (int) (tmp21 + tmp11);
  2788. wsptr[8*4] = (int) (tmp21 - tmp11);
  2789. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  2790. wsptr[8*3] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  2791. }
  2792. /* Pass 2: process 6 rows from work array, store into output array.
  2793. * 12-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/24).
  2794. */
  2795. wsptr = workspace;
  2796. for (ctr = 0; ctr < 6; ctr++) {
  2797. outptr = output_buf[ctr] + output_col;
  2798. /* Even part */
  2799. /* Add fudge factor here for final descale. */
  2800. z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  2801. z3 <<= CONST_BITS;
  2802. z4 = (INT32) wsptr[4];
  2803. z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */
  2804. tmp10 = z3 + z4;
  2805. tmp11 = z3 - z4;
  2806. z1 = (INT32) wsptr[2];
  2807. z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */
  2808. z1 <<= CONST_BITS;
  2809. z2 = (INT32) wsptr[6];
  2810. z2 <<= CONST_BITS;
  2811. tmp12 = z1 - z2;
  2812. tmp21 = z3 + tmp12;
  2813. tmp24 = z3 - tmp12;
  2814. tmp12 = z4 + z2;
  2815. tmp20 = tmp10 + tmp12;
  2816. tmp25 = tmp10 - tmp12;
  2817. tmp12 = z4 - z1 - z2;
  2818. tmp22 = tmp11 + tmp12;
  2819. tmp23 = tmp11 - tmp12;
  2820. /* Odd part */
  2821. z1 = (INT32) wsptr[1];
  2822. z2 = (INT32) wsptr[3];
  2823. z3 = (INT32) wsptr[5];
  2824. z4 = (INT32) wsptr[7];
  2825. tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */
  2826. tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */
  2827. tmp10 = z1 + z3;
  2828. tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */
  2829. tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */
  2830. tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */
  2831. tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */
  2832. tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */
  2833. tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */
  2834. tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */
  2835. MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */
  2836. z1 -= z4;
  2837. z2 -= z3;
  2838. z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */
  2839. tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */
  2840. tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */
  2841. /* Final output stage */
  2842. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  2843. CONST_BITS+PASS1_BITS+3)
  2844. & RANGE_MASK];
  2845. outptr[11] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  2846. CONST_BITS+PASS1_BITS+3)
  2847. & RANGE_MASK];
  2848. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  2849. CONST_BITS+PASS1_BITS+3)
  2850. & RANGE_MASK];
  2851. outptr[10] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  2852. CONST_BITS+PASS1_BITS+3)
  2853. & RANGE_MASK];
  2854. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  2855. CONST_BITS+PASS1_BITS+3)
  2856. & RANGE_MASK];
  2857. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  2858. CONST_BITS+PASS1_BITS+3)
  2859. & RANGE_MASK];
  2860. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  2861. CONST_BITS+PASS1_BITS+3)
  2862. & RANGE_MASK];
  2863. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  2864. CONST_BITS+PASS1_BITS+3)
  2865. & RANGE_MASK];
  2866. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  2867. CONST_BITS+PASS1_BITS+3)
  2868. & RANGE_MASK];
  2869. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  2870. CONST_BITS+PASS1_BITS+3)
  2871. & RANGE_MASK];
  2872. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp25 + tmp15,
  2873. CONST_BITS+PASS1_BITS+3)
  2874. & RANGE_MASK];
  2875. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp25 - tmp15,
  2876. CONST_BITS+PASS1_BITS+3)
  2877. & RANGE_MASK];
  2878. wsptr += 8; /* advance pointer to next row */
  2879. }
  2880. }
  2881. /*
  2882. * Perform dequantization and inverse DCT on one block of coefficients,
  2883. * producing a 10x5 output block.
  2884. *
  2885. * 5-point IDCT in pass 1 (columns), 10-point in pass 2 (rows).
  2886. */
  2887. GLOBAL(void)
  2888. jpeg_idct_10x5 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  2889. JCOEFPTR coef_block,
  2890. JSAMPARRAY output_buf, JDIMENSION output_col)
  2891. {
  2892. INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
  2893. INT32 tmp20, tmp21, tmp22, tmp23, tmp24;
  2894. INT32 z1, z2, z3, z4;
  2895. JCOEFPTR inptr;
  2896. ISLOW_MULT_TYPE * quantptr;
  2897. int * wsptr;
  2898. JSAMPROW outptr;
  2899. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  2900. int ctr;
  2901. int workspace[8*5]; /* buffers data between passes */
  2902. SHIFT_TEMPS
  2903. /* Pass 1: process columns from input, store into work array.
  2904. * 5-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/10).
  2905. */
  2906. inptr = coef_block;
  2907. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  2908. wsptr = workspace;
  2909. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  2910. /* Even part */
  2911. tmp12 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  2912. tmp12 <<= CONST_BITS;
  2913. /* Add fudge factor here for final descale. */
  2914. tmp12 += ONE << (CONST_BITS-PASS1_BITS-1);
  2915. tmp13 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  2916. tmp14 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  2917. z1 = MULTIPLY(tmp13 + tmp14, FIX(0.790569415)); /* (c2+c4)/2 */
  2918. z2 = MULTIPLY(tmp13 - tmp14, FIX(0.353553391)); /* (c2-c4)/2 */
  2919. z3 = tmp12 + z2;
  2920. tmp10 = z3 + z1;
  2921. tmp11 = z3 - z1;
  2922. tmp12 -= z2 << 2;
  2923. /* Odd part */
  2924. z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  2925. z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  2926. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */
  2927. tmp13 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */
  2928. tmp14 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */
  2929. /* Final output stage */
  2930. wsptr[8*0] = (int) RIGHT_SHIFT(tmp10 + tmp13, CONST_BITS-PASS1_BITS);
  2931. wsptr[8*4] = (int) RIGHT_SHIFT(tmp10 - tmp13, CONST_BITS-PASS1_BITS);
  2932. wsptr[8*1] = (int) RIGHT_SHIFT(tmp11 + tmp14, CONST_BITS-PASS1_BITS);
  2933. wsptr[8*3] = (int) RIGHT_SHIFT(tmp11 - tmp14, CONST_BITS-PASS1_BITS);
  2934. wsptr[8*2] = (int) RIGHT_SHIFT(tmp12, CONST_BITS-PASS1_BITS);
  2935. }
  2936. /* Pass 2: process 5 rows from work array, store into output array.
  2937. * 10-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/20).
  2938. */
  2939. wsptr = workspace;
  2940. for (ctr = 0; ctr < 5; ctr++) {
  2941. outptr = output_buf[ctr] + output_col;
  2942. /* Even part */
  2943. /* Add fudge factor here for final descale. */
  2944. z3 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  2945. z3 <<= CONST_BITS;
  2946. z4 = (INT32) wsptr[4];
  2947. z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */
  2948. z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */
  2949. tmp10 = z3 + z1;
  2950. tmp11 = z3 - z2;
  2951. tmp22 = z3 - ((z1 - z2) << 1); /* c0 = (c4-c8)*2 */
  2952. z2 = (INT32) wsptr[2];
  2953. z3 = (INT32) wsptr[6];
  2954. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */
  2955. tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */
  2956. tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */
  2957. tmp20 = tmp10 + tmp12;
  2958. tmp24 = tmp10 - tmp12;
  2959. tmp21 = tmp11 + tmp13;
  2960. tmp23 = tmp11 - tmp13;
  2961. /* Odd part */
  2962. z1 = (INT32) wsptr[1];
  2963. z2 = (INT32) wsptr[3];
  2964. z3 = (INT32) wsptr[5];
  2965. z3 <<= CONST_BITS;
  2966. z4 = (INT32) wsptr[7];
  2967. tmp11 = z2 + z4;
  2968. tmp13 = z2 - z4;
  2969. tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */
  2970. z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */
  2971. z4 = z3 + tmp12;
  2972. tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */
  2973. tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */
  2974. z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */
  2975. z4 = z3 - tmp12 - (tmp13 << (CONST_BITS - 1));
  2976. tmp12 = ((z1 - tmp13) << CONST_BITS) - z3;
  2977. tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */
  2978. tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */
  2979. /* Final output stage */
  2980. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  2981. CONST_BITS+PASS1_BITS+3)
  2982. & RANGE_MASK];
  2983. outptr[9] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  2984. CONST_BITS+PASS1_BITS+3)
  2985. & RANGE_MASK];
  2986. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  2987. CONST_BITS+PASS1_BITS+3)
  2988. & RANGE_MASK];
  2989. outptr[8] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  2990. CONST_BITS+PASS1_BITS+3)
  2991. & RANGE_MASK];
  2992. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  2993. CONST_BITS+PASS1_BITS+3)
  2994. & RANGE_MASK];
  2995. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  2996. CONST_BITS+PASS1_BITS+3)
  2997. & RANGE_MASK];
  2998. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23 + tmp13,
  2999. CONST_BITS+PASS1_BITS+3)
  3000. & RANGE_MASK];
  3001. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp23 - tmp13,
  3002. CONST_BITS+PASS1_BITS+3)
  3003. & RANGE_MASK];
  3004. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp24 + tmp14,
  3005. CONST_BITS+PASS1_BITS+3)
  3006. & RANGE_MASK];
  3007. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp24 - tmp14,
  3008. CONST_BITS+PASS1_BITS+3)
  3009. & RANGE_MASK];
  3010. wsptr += 8; /* advance pointer to next row */
  3011. }
  3012. }
  3013. /*
  3014. * Perform dequantization and inverse DCT on one block of coefficients,
  3015. * producing a 8x4 output block.
  3016. *
  3017. * 4-point IDCT in pass 1 (columns), 8-point in pass 2 (rows).
  3018. */
  3019. GLOBAL(void)
  3020. jpeg_idct_8x4 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3021. JCOEFPTR coef_block,
  3022. JSAMPARRAY output_buf, JDIMENSION output_col)
  3023. {
  3024. INT32 tmp0, tmp1, tmp2, tmp3;
  3025. INT32 tmp10, tmp11, tmp12, tmp13;
  3026. INT32 z1, z2, z3;
  3027. JCOEFPTR inptr;
  3028. ISLOW_MULT_TYPE * quantptr;
  3029. int * wsptr;
  3030. JSAMPROW outptr;
  3031. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3032. int ctr;
  3033. int workspace[8*4]; /* buffers data between passes */
  3034. SHIFT_TEMPS
  3035. /* Pass 1: process columns from input, store into work array.
  3036. * 4-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/16).
  3037. */
  3038. inptr = coef_block;
  3039. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3040. wsptr = workspace;
  3041. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  3042. /* Even part */
  3043. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3044. tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  3045. tmp10 = (tmp0 + tmp2) << PASS1_BITS;
  3046. tmp12 = (tmp0 - tmp2) << PASS1_BITS;
  3047. /* Odd part */
  3048. /* Same rotation as in the even part of the 8x8 LL&M IDCT */
  3049. z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3050. z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  3051. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  3052. /* Add fudge factor here for final descale. */
  3053. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  3054. tmp0 = RIGHT_SHIFT(z1 + MULTIPLY(z2, FIX_0_765366865), /* c2-c6 */
  3055. CONST_BITS-PASS1_BITS);
  3056. tmp2 = RIGHT_SHIFT(z1 - MULTIPLY(z3, FIX_1_847759065), /* c2+c6 */
  3057. CONST_BITS-PASS1_BITS);
  3058. /* Final output stage */
  3059. wsptr[8*0] = (int) (tmp10 + tmp0);
  3060. wsptr[8*3] = (int) (tmp10 - tmp0);
  3061. wsptr[8*1] = (int) (tmp12 + tmp2);
  3062. wsptr[8*2] = (int) (tmp12 - tmp2);
  3063. }
  3064. /* Pass 2: process rows from work array, store into output array. */
  3065. /* Note that we must descale the results by a factor of 8 == 2**3, */
  3066. /* and also undo the PASS1_BITS scaling. */
  3067. wsptr = workspace;
  3068. for (ctr = 0; ctr < 4; ctr++) {
  3069. outptr = output_buf[ctr] + output_col;
  3070. /* Even part: reverse the even part of the forward DCT. */
  3071. /* The rotator is sqrt(2)*c(-6). */
  3072. z2 = (INT32) wsptr[2];
  3073. z3 = (INT32) wsptr[6];
  3074. z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
  3075. tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865);
  3076. tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065);
  3077. /* Add fudge factor here for final descale. */
  3078. z2 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  3079. z3 = (INT32) wsptr[4];
  3080. tmp0 = (z2 + z3) << CONST_BITS;
  3081. tmp1 = (z2 - z3) << CONST_BITS;
  3082. tmp10 = tmp0 + tmp2;
  3083. tmp13 = tmp0 - tmp2;
  3084. tmp11 = tmp1 + tmp3;
  3085. tmp12 = tmp1 - tmp3;
  3086. /* Odd part per figure 8; the matrix is unitary and hence its
  3087. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  3088. */
  3089. tmp0 = (INT32) wsptr[7];
  3090. tmp1 = (INT32) wsptr[5];
  3091. tmp2 = (INT32) wsptr[3];
  3092. tmp3 = (INT32) wsptr[1];
  3093. z2 = tmp0 + tmp2;
  3094. z3 = tmp1 + tmp3;
  3095. z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* sqrt(2) * c3 */
  3096. z2 = MULTIPLY(z2, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
  3097. z3 = MULTIPLY(z3, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
  3098. z2 += z1;
  3099. z3 += z1;
  3100. z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
  3101. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
  3102. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
  3103. tmp0 += z1 + z2;
  3104. tmp3 += z1 + z3;
  3105. z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
  3106. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
  3107. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
  3108. tmp1 += z1 + z3;
  3109. tmp2 += z1 + z2;
  3110. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  3111. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp3,
  3112. CONST_BITS+PASS1_BITS+3)
  3113. & RANGE_MASK];
  3114. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp3,
  3115. CONST_BITS+PASS1_BITS+3)
  3116. & RANGE_MASK];
  3117. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp2,
  3118. CONST_BITS+PASS1_BITS+3)
  3119. & RANGE_MASK];
  3120. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp2,
  3121. CONST_BITS+PASS1_BITS+3)
  3122. & RANGE_MASK];
  3123. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp1,
  3124. CONST_BITS+PASS1_BITS+3)
  3125. & RANGE_MASK];
  3126. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp1,
  3127. CONST_BITS+PASS1_BITS+3)
  3128. & RANGE_MASK];
  3129. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13 + tmp0,
  3130. CONST_BITS+PASS1_BITS+3)
  3131. & RANGE_MASK];
  3132. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp13 - tmp0,
  3133. CONST_BITS+PASS1_BITS+3)
  3134. & RANGE_MASK];
  3135. wsptr += DCTSIZE; /* advance pointer to next row */
  3136. }
  3137. }
  3138. /*
  3139. * Perform dequantization and inverse DCT on one block of coefficients,
  3140. * producing a reduced-size 6x3 output block.
  3141. *
  3142. * 3-point IDCT in pass 1 (columns), 6-point in pass 2 (rows).
  3143. */
  3144. GLOBAL(void)
  3145. jpeg_idct_6x3 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3146. JCOEFPTR coef_block,
  3147. JSAMPARRAY output_buf, JDIMENSION output_col)
  3148. {
  3149. INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12;
  3150. INT32 z1, z2, z3;
  3151. JCOEFPTR inptr;
  3152. ISLOW_MULT_TYPE * quantptr;
  3153. int * wsptr;
  3154. JSAMPROW outptr;
  3155. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3156. int ctr;
  3157. int workspace[6*3]; /* buffers data between passes */
  3158. SHIFT_TEMPS
  3159. /* Pass 1: process columns from input, store into work array.
  3160. * 3-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/6).
  3161. */
  3162. inptr = coef_block;
  3163. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3164. wsptr = workspace;
  3165. for (ctr = 0; ctr < 6; ctr++, inptr++, quantptr++, wsptr++) {
  3166. /* Even part */
  3167. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3168. tmp0 <<= CONST_BITS;
  3169. /* Add fudge factor here for final descale. */
  3170. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  3171. tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  3172. tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */
  3173. tmp10 = tmp0 + tmp12;
  3174. tmp2 = tmp0 - tmp12 - tmp12;
  3175. /* Odd part */
  3176. tmp12 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3177. tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */
  3178. /* Final output stage */
  3179. wsptr[6*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  3180. wsptr[6*2] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  3181. wsptr[6*1] = (int) RIGHT_SHIFT(tmp2, CONST_BITS-PASS1_BITS);
  3182. }
  3183. /* Pass 2: process 3 rows from work array, store into output array.
  3184. * 6-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/12).
  3185. */
  3186. wsptr = workspace;
  3187. for (ctr = 0; ctr < 3; ctr++) {
  3188. outptr = output_buf[ctr] + output_col;
  3189. /* Even part */
  3190. /* Add fudge factor here for final descale. */
  3191. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  3192. tmp0 <<= CONST_BITS;
  3193. tmp2 = (INT32) wsptr[4];
  3194. tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */
  3195. tmp1 = tmp0 + tmp10;
  3196. tmp11 = tmp0 - tmp10 - tmp10;
  3197. tmp10 = (INT32) wsptr[2];
  3198. tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */
  3199. tmp10 = tmp1 + tmp0;
  3200. tmp12 = tmp1 - tmp0;
  3201. /* Odd part */
  3202. z1 = (INT32) wsptr[1];
  3203. z2 = (INT32) wsptr[3];
  3204. z3 = (INT32) wsptr[5];
  3205. tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  3206. tmp0 = tmp1 + ((z1 + z2) << CONST_BITS);
  3207. tmp2 = tmp1 + ((z3 - z2) << CONST_BITS);
  3208. tmp1 = (z1 - z2 - z3) << CONST_BITS;
  3209. /* Final output stage */
  3210. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  3211. CONST_BITS+PASS1_BITS+3)
  3212. & RANGE_MASK];
  3213. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  3214. CONST_BITS+PASS1_BITS+3)
  3215. & RANGE_MASK];
  3216. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp1,
  3217. CONST_BITS+PASS1_BITS+3)
  3218. & RANGE_MASK];
  3219. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp1,
  3220. CONST_BITS+PASS1_BITS+3)
  3221. & RANGE_MASK];
  3222. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  3223. CONST_BITS+PASS1_BITS+3)
  3224. & RANGE_MASK];
  3225. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  3226. CONST_BITS+PASS1_BITS+3)
  3227. & RANGE_MASK];
  3228. wsptr += 6; /* advance pointer to next row */
  3229. }
  3230. }
  3231. /*
  3232. * Perform dequantization and inverse DCT on one block of coefficients,
  3233. * producing a 4x2 output block.
  3234. *
  3235. * 2-point IDCT in pass 1 (columns), 4-point in pass 2 (rows).
  3236. */
  3237. GLOBAL(void)
  3238. jpeg_idct_4x2 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3239. JCOEFPTR coef_block,
  3240. JSAMPARRAY output_buf, JDIMENSION output_col)
  3241. {
  3242. INT32 tmp0, tmp2, tmp10, tmp12;
  3243. INT32 z1, z2, z3;
  3244. JCOEFPTR inptr;
  3245. ISLOW_MULT_TYPE * quantptr;
  3246. INT32 * wsptr;
  3247. JSAMPROW outptr;
  3248. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3249. int ctr;
  3250. INT32 workspace[4*2]; /* buffers data between passes */
  3251. SHIFT_TEMPS
  3252. /* Pass 1: process columns from input, store into work array. */
  3253. inptr = coef_block;
  3254. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3255. wsptr = workspace;
  3256. for (ctr = 0; ctr < 4; ctr++, inptr++, quantptr++, wsptr++) {
  3257. /* Even part */
  3258. tmp10 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3259. /* Odd part */
  3260. tmp0 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3261. /* Final output stage */
  3262. wsptr[4*0] = tmp10 + tmp0;
  3263. wsptr[4*1] = tmp10 - tmp0;
  3264. }
  3265. /* Pass 2: process 2 rows from work array, store into output array.
  3266. * 4-point IDCT kernel,
  3267. * cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point IDCT].
  3268. */
  3269. wsptr = workspace;
  3270. for (ctr = 0; ctr < 2; ctr++) {
  3271. outptr = output_buf[ctr] + output_col;
  3272. /* Even part */
  3273. /* Add fudge factor here for final descale. */
  3274. tmp0 = wsptr[0] + (ONE << 2);
  3275. tmp2 = wsptr[2];
  3276. tmp10 = (tmp0 + tmp2) << CONST_BITS;
  3277. tmp12 = (tmp0 - tmp2) << CONST_BITS;
  3278. /* Odd part */
  3279. /* Same rotation as in the even part of the 8x8 LL&M IDCT */
  3280. z2 = wsptr[1];
  3281. z3 = wsptr[3];
  3282. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  3283. tmp0 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  3284. tmp2 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  3285. /* Final output stage */
  3286. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  3287. CONST_BITS+3)
  3288. & RANGE_MASK];
  3289. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  3290. CONST_BITS+3)
  3291. & RANGE_MASK];
  3292. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  3293. CONST_BITS+3)
  3294. & RANGE_MASK];
  3295. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  3296. CONST_BITS+3)
  3297. & RANGE_MASK];
  3298. wsptr += 4; /* advance pointer to next row */
  3299. }
  3300. }
  3301. /*
  3302. * Perform dequantization and inverse DCT on one block of coefficients,
  3303. * producing a 2x1 output block.
  3304. *
  3305. * 1-point IDCT in pass 1 (columns), 2-point in pass 2 (rows).
  3306. */
  3307. GLOBAL(void)
  3308. jpeg_idct_2x1 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3309. JCOEFPTR coef_block,
  3310. JSAMPARRAY output_buf, JDIMENSION output_col)
  3311. {
  3312. INT32 tmp0, tmp10;
  3313. ISLOW_MULT_TYPE * quantptr;
  3314. JSAMPROW outptr;
  3315. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3316. SHIFT_TEMPS
  3317. /* Pass 1: empty. */
  3318. /* Pass 2: process 1 row from input, store into output array. */
  3319. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3320. outptr = output_buf[0] + output_col;
  3321. /* Even part */
  3322. tmp10 = DEQUANTIZE(coef_block[0], quantptr[0]);
  3323. /* Add fudge factor here for final descale. */
  3324. tmp10 += ONE << 2;
  3325. /* Odd part */
  3326. tmp0 = DEQUANTIZE(coef_block[1], quantptr[1]);
  3327. /* Final output stage */
  3328. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, 3) & RANGE_MASK];
  3329. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, 3) & RANGE_MASK];
  3330. }
  3331. /*
  3332. * Perform dequantization and inverse DCT on one block of coefficients,
  3333. * producing a 8x16 output block.
  3334. *
  3335. * 16-point IDCT in pass 1 (columns), 8-point in pass 2 (rows).
  3336. */
  3337. GLOBAL(void)
  3338. jpeg_idct_8x16 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3339. JCOEFPTR coef_block,
  3340. JSAMPARRAY output_buf, JDIMENSION output_col)
  3341. {
  3342. INT32 tmp0, tmp1, tmp2, tmp3, tmp10, tmp11, tmp12, tmp13;
  3343. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26, tmp27;
  3344. INT32 z1, z2, z3, z4;
  3345. JCOEFPTR inptr;
  3346. ISLOW_MULT_TYPE * quantptr;
  3347. int * wsptr;
  3348. JSAMPROW outptr;
  3349. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3350. int ctr;
  3351. int workspace[8*16]; /* buffers data between passes */
  3352. SHIFT_TEMPS
  3353. /* Pass 1: process columns from input, store into work array.
  3354. * 16-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/32).
  3355. */
  3356. inptr = coef_block;
  3357. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3358. wsptr = workspace;
  3359. for (ctr = 0; ctr < 8; ctr++, inptr++, quantptr++, wsptr++) {
  3360. /* Even part */
  3361. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3362. tmp0 <<= CONST_BITS;
  3363. /* Add fudge factor here for final descale. */
  3364. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  3365. z1 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  3366. tmp1 = MULTIPLY(z1, FIX(1.306562965)); /* c4[16] = c2[8] */
  3367. tmp2 = MULTIPLY(z1, FIX_0_541196100); /* c12[16] = c6[8] */
  3368. tmp10 = tmp0 + tmp1;
  3369. tmp11 = tmp0 - tmp1;
  3370. tmp12 = tmp0 + tmp2;
  3371. tmp13 = tmp0 - tmp2;
  3372. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  3373. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  3374. z3 = z1 - z2;
  3375. z4 = MULTIPLY(z3, FIX(0.275899379)); /* c14[16] = c7[8] */
  3376. z3 = MULTIPLY(z3, FIX(1.387039845)); /* c2[16] = c1[8] */
  3377. tmp0 = z3 + MULTIPLY(z2, FIX_2_562915447); /* (c6+c2)[16] = (c3+c1)[8] */
  3378. tmp1 = z4 + MULTIPLY(z1, FIX_0_899976223); /* (c6-c14)[16] = (c3-c7)[8] */
  3379. tmp2 = z3 - MULTIPLY(z1, FIX(0.601344887)); /* (c2-c10)[16] = (c1-c5)[8] */
  3380. tmp3 = z4 - MULTIPLY(z2, FIX(0.509795579)); /* (c10-c14)[16] = (c5-c7)[8] */
  3381. tmp20 = tmp10 + tmp0;
  3382. tmp27 = tmp10 - tmp0;
  3383. tmp21 = tmp12 + tmp1;
  3384. tmp26 = tmp12 - tmp1;
  3385. tmp22 = tmp13 + tmp2;
  3386. tmp25 = tmp13 - tmp2;
  3387. tmp23 = tmp11 + tmp3;
  3388. tmp24 = tmp11 - tmp3;
  3389. /* Odd part */
  3390. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3391. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  3392. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  3393. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  3394. tmp11 = z1 + z3;
  3395. tmp1 = MULTIPLY(z1 + z2, FIX(1.353318001)); /* c3 */
  3396. tmp2 = MULTIPLY(tmp11, FIX(1.247225013)); /* c5 */
  3397. tmp3 = MULTIPLY(z1 + z4, FIX(1.093201867)); /* c7 */
  3398. tmp10 = MULTIPLY(z1 - z4, FIX(0.897167586)); /* c9 */
  3399. tmp11 = MULTIPLY(tmp11, FIX(0.666655658)); /* c11 */
  3400. tmp12 = MULTIPLY(z1 - z2, FIX(0.410524528)); /* c13 */
  3401. tmp0 = tmp1 + tmp2 + tmp3 -
  3402. MULTIPLY(z1, FIX(2.286341144)); /* c7+c5+c3-c1 */
  3403. tmp13 = tmp10 + tmp11 + tmp12 -
  3404. MULTIPLY(z1, FIX(1.835730603)); /* c9+c11+c13-c15 */
  3405. z1 = MULTIPLY(z2 + z3, FIX(0.138617169)); /* c15 */
  3406. tmp1 += z1 + MULTIPLY(z2, FIX(0.071888074)); /* c9+c11-c3-c15 */
  3407. tmp2 += z1 - MULTIPLY(z3, FIX(1.125726048)); /* c5+c7+c15-c3 */
  3408. z1 = MULTIPLY(z3 - z2, FIX(1.407403738)); /* c1 */
  3409. tmp11 += z1 - MULTIPLY(z3, FIX(0.766367282)); /* c1+c11-c9-c13 */
  3410. tmp12 += z1 + MULTIPLY(z2, FIX(1.971951411)); /* c1+c5+c13-c7 */
  3411. z2 += z4;
  3412. z1 = MULTIPLY(z2, - FIX(0.666655658)); /* -c11 */
  3413. tmp1 += z1;
  3414. tmp3 += z1 + MULTIPLY(z4, FIX(1.065388962)); /* c3+c11+c15-c7 */
  3415. z2 = MULTIPLY(z2, - FIX(1.247225013)); /* -c5 */
  3416. tmp10 += z2 + MULTIPLY(z4, FIX(3.141271809)); /* c1+c5+c9-c13 */
  3417. tmp12 += z2;
  3418. z2 = MULTIPLY(z3 + z4, - FIX(1.353318001)); /* -c3 */
  3419. tmp2 += z2;
  3420. tmp3 += z2;
  3421. z2 = MULTIPLY(z4 - z3, FIX(0.410524528)); /* c13 */
  3422. tmp10 += z2;
  3423. tmp11 += z2;
  3424. /* Final output stage */
  3425. wsptr[8*0] = (int) RIGHT_SHIFT(tmp20 + tmp0, CONST_BITS-PASS1_BITS);
  3426. wsptr[8*15] = (int) RIGHT_SHIFT(tmp20 - tmp0, CONST_BITS-PASS1_BITS);
  3427. wsptr[8*1] = (int) RIGHT_SHIFT(tmp21 + tmp1, CONST_BITS-PASS1_BITS);
  3428. wsptr[8*14] = (int) RIGHT_SHIFT(tmp21 - tmp1, CONST_BITS-PASS1_BITS);
  3429. wsptr[8*2] = (int) RIGHT_SHIFT(tmp22 + tmp2, CONST_BITS-PASS1_BITS);
  3430. wsptr[8*13] = (int) RIGHT_SHIFT(tmp22 - tmp2, CONST_BITS-PASS1_BITS);
  3431. wsptr[8*3] = (int) RIGHT_SHIFT(tmp23 + tmp3, CONST_BITS-PASS1_BITS);
  3432. wsptr[8*12] = (int) RIGHT_SHIFT(tmp23 - tmp3, CONST_BITS-PASS1_BITS);
  3433. wsptr[8*4] = (int) RIGHT_SHIFT(tmp24 + tmp10, CONST_BITS-PASS1_BITS);
  3434. wsptr[8*11] = (int) RIGHT_SHIFT(tmp24 - tmp10, CONST_BITS-PASS1_BITS);
  3435. wsptr[8*5] = (int) RIGHT_SHIFT(tmp25 + tmp11, CONST_BITS-PASS1_BITS);
  3436. wsptr[8*10] = (int) RIGHT_SHIFT(tmp25 - tmp11, CONST_BITS-PASS1_BITS);
  3437. wsptr[8*6] = (int) RIGHT_SHIFT(tmp26 + tmp12, CONST_BITS-PASS1_BITS);
  3438. wsptr[8*9] = (int) RIGHT_SHIFT(tmp26 - tmp12, CONST_BITS-PASS1_BITS);
  3439. wsptr[8*7] = (int) RIGHT_SHIFT(tmp27 + tmp13, CONST_BITS-PASS1_BITS);
  3440. wsptr[8*8] = (int) RIGHT_SHIFT(tmp27 - tmp13, CONST_BITS-PASS1_BITS);
  3441. }
  3442. /* Pass 2: process rows from work array, store into output array. */
  3443. /* Note that we must descale the results by a factor of 8 == 2**3, */
  3444. /* and also undo the PASS1_BITS scaling. */
  3445. wsptr = workspace;
  3446. for (ctr = 0; ctr < 16; ctr++) {
  3447. outptr = output_buf[ctr] + output_col;
  3448. /* Even part: reverse the even part of the forward DCT. */
  3449. /* The rotator is sqrt(2)*c(-6). */
  3450. z2 = (INT32) wsptr[2];
  3451. z3 = (INT32) wsptr[6];
  3452. z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
  3453. tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865);
  3454. tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065);
  3455. /* Add fudge factor here for final descale. */
  3456. z2 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  3457. z3 = (INT32) wsptr[4];
  3458. tmp0 = (z2 + z3) << CONST_BITS;
  3459. tmp1 = (z2 - z3) << CONST_BITS;
  3460. tmp10 = tmp0 + tmp2;
  3461. tmp13 = tmp0 - tmp2;
  3462. tmp11 = tmp1 + tmp3;
  3463. tmp12 = tmp1 - tmp3;
  3464. /* Odd part per figure 8; the matrix is unitary and hence its
  3465. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  3466. */
  3467. tmp0 = (INT32) wsptr[7];
  3468. tmp1 = (INT32) wsptr[5];
  3469. tmp2 = (INT32) wsptr[3];
  3470. tmp3 = (INT32) wsptr[1];
  3471. z2 = tmp0 + tmp2;
  3472. z3 = tmp1 + tmp3;
  3473. z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* sqrt(2) * c3 */
  3474. z2 = MULTIPLY(z2, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
  3475. z3 = MULTIPLY(z3, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
  3476. z2 += z1;
  3477. z3 += z1;
  3478. z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
  3479. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
  3480. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
  3481. tmp0 += z1 + z2;
  3482. tmp3 += z1 + z3;
  3483. z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
  3484. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
  3485. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
  3486. tmp1 += z1 + z3;
  3487. tmp2 += z1 + z2;
  3488. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  3489. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp3,
  3490. CONST_BITS+PASS1_BITS+3)
  3491. & RANGE_MASK];
  3492. outptr[7] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp3,
  3493. CONST_BITS+PASS1_BITS+3)
  3494. & RANGE_MASK];
  3495. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp2,
  3496. CONST_BITS+PASS1_BITS+3)
  3497. & RANGE_MASK];
  3498. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp2,
  3499. CONST_BITS+PASS1_BITS+3)
  3500. & RANGE_MASK];
  3501. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp1,
  3502. CONST_BITS+PASS1_BITS+3)
  3503. & RANGE_MASK];
  3504. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp1,
  3505. CONST_BITS+PASS1_BITS+3)
  3506. & RANGE_MASK];
  3507. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp13 + tmp0,
  3508. CONST_BITS+PASS1_BITS+3)
  3509. & RANGE_MASK];
  3510. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp13 - tmp0,
  3511. CONST_BITS+PASS1_BITS+3)
  3512. & RANGE_MASK];
  3513. wsptr += DCTSIZE; /* advance pointer to next row */
  3514. }
  3515. }
  3516. /*
  3517. * Perform dequantization and inverse DCT on one block of coefficients,
  3518. * producing a 7x14 output block.
  3519. *
  3520. * 14-point IDCT in pass 1 (columns), 7-point in pass 2 (rows).
  3521. */
  3522. GLOBAL(void)
  3523. jpeg_idct_7x14 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3524. JCOEFPTR coef_block,
  3525. JSAMPARRAY output_buf, JDIMENSION output_col)
  3526. {
  3527. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16;
  3528. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25, tmp26;
  3529. INT32 z1, z2, z3, z4;
  3530. JCOEFPTR inptr;
  3531. ISLOW_MULT_TYPE * quantptr;
  3532. int * wsptr;
  3533. JSAMPROW outptr;
  3534. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3535. int ctr;
  3536. int workspace[7*14]; /* buffers data between passes */
  3537. SHIFT_TEMPS
  3538. /* Pass 1: process columns from input, store into work array.
  3539. * 14-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/28).
  3540. */
  3541. inptr = coef_block;
  3542. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3543. wsptr = workspace;
  3544. for (ctr = 0; ctr < 7; ctr++, inptr++, quantptr++, wsptr++) {
  3545. /* Even part */
  3546. z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3547. z1 <<= CONST_BITS;
  3548. /* Add fudge factor here for final descale. */
  3549. z1 += ONE << (CONST_BITS-PASS1_BITS-1);
  3550. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  3551. z2 = MULTIPLY(z4, FIX(1.274162392)); /* c4 */
  3552. z3 = MULTIPLY(z4, FIX(0.314692123)); /* c12 */
  3553. z4 = MULTIPLY(z4, FIX(0.881747734)); /* c8 */
  3554. tmp10 = z1 + z2;
  3555. tmp11 = z1 + z3;
  3556. tmp12 = z1 - z4;
  3557. tmp23 = RIGHT_SHIFT(z1 - ((z2 + z3 - z4) << 1), /* c0 = (c4+c12-c8)*2 */
  3558. CONST_BITS-PASS1_BITS);
  3559. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  3560. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  3561. z3 = MULTIPLY(z1 + z2, FIX(1.105676686)); /* c6 */
  3562. tmp13 = z3 + MULTIPLY(z1, FIX(0.273079590)); /* c2-c6 */
  3563. tmp14 = z3 - MULTIPLY(z2, FIX(1.719280954)); /* c6+c10 */
  3564. tmp15 = MULTIPLY(z1, FIX(0.613604268)) - /* c10 */
  3565. MULTIPLY(z2, FIX(1.378756276)); /* c2 */
  3566. tmp20 = tmp10 + tmp13;
  3567. tmp26 = tmp10 - tmp13;
  3568. tmp21 = tmp11 + tmp14;
  3569. tmp25 = tmp11 - tmp14;
  3570. tmp22 = tmp12 + tmp15;
  3571. tmp24 = tmp12 - tmp15;
  3572. /* Odd part */
  3573. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3574. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  3575. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  3576. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  3577. tmp13 = z4 << CONST_BITS;
  3578. tmp14 = z1 + z3;
  3579. tmp11 = MULTIPLY(z1 + z2, FIX(1.334852607)); /* c3 */
  3580. tmp12 = MULTIPLY(tmp14, FIX(1.197448846)); /* c5 */
  3581. tmp10 = tmp11 + tmp12 + tmp13 - MULTIPLY(z1, FIX(1.126980169)); /* c3+c5-c1 */
  3582. tmp14 = MULTIPLY(tmp14, FIX(0.752406978)); /* c9 */
  3583. tmp16 = tmp14 - MULTIPLY(z1, FIX(1.061150426)); /* c9+c11-c13 */
  3584. z1 -= z2;
  3585. tmp15 = MULTIPLY(z1, FIX(0.467085129)) - tmp13; /* c11 */
  3586. tmp16 += tmp15;
  3587. z1 += z4;
  3588. z4 = MULTIPLY(z2 + z3, - FIX(0.158341681)) - tmp13; /* -c13 */
  3589. tmp11 += z4 - MULTIPLY(z2, FIX(0.424103948)); /* c3-c9-c13 */
  3590. tmp12 += z4 - MULTIPLY(z3, FIX(2.373959773)); /* c3+c5-c13 */
  3591. z4 = MULTIPLY(z3 - z2, FIX(1.405321284)); /* c1 */
  3592. tmp14 += z4 + tmp13 - MULTIPLY(z3, FIX(1.6906431334)); /* c1+c9-c11 */
  3593. tmp15 += z4 + MULTIPLY(z2, FIX(0.674957567)); /* c1+c11-c5 */
  3594. tmp13 = (z1 - z3) << PASS1_BITS;
  3595. /* Final output stage */
  3596. wsptr[7*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  3597. wsptr[7*13] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  3598. wsptr[7*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  3599. wsptr[7*12] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  3600. wsptr[7*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  3601. wsptr[7*11] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  3602. wsptr[7*3] = (int) (tmp23 + tmp13);
  3603. wsptr[7*10] = (int) (tmp23 - tmp13);
  3604. wsptr[7*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  3605. wsptr[7*9] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  3606. wsptr[7*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  3607. wsptr[7*8] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  3608. wsptr[7*6] = (int) RIGHT_SHIFT(tmp26 + tmp16, CONST_BITS-PASS1_BITS);
  3609. wsptr[7*7] = (int) RIGHT_SHIFT(tmp26 - tmp16, CONST_BITS-PASS1_BITS);
  3610. }
  3611. /* Pass 2: process 14 rows from work array, store into output array.
  3612. * 7-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/14).
  3613. */
  3614. wsptr = workspace;
  3615. for (ctr = 0; ctr < 14; ctr++) {
  3616. outptr = output_buf[ctr] + output_col;
  3617. /* Even part */
  3618. /* Add fudge factor here for final descale. */
  3619. tmp23 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  3620. tmp23 <<= CONST_BITS;
  3621. z1 = (INT32) wsptr[2];
  3622. z2 = (INT32) wsptr[4];
  3623. z3 = (INT32) wsptr[6];
  3624. tmp20 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */
  3625. tmp22 = MULTIPLY(z1 - z2, FIX(0.314692123)); /* c6 */
  3626. tmp21 = tmp20 + tmp22 + tmp23 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */
  3627. tmp10 = z1 + z3;
  3628. z2 -= tmp10;
  3629. tmp10 = MULTIPLY(tmp10, FIX(1.274162392)) + tmp23; /* c2 */
  3630. tmp20 += tmp10 - MULTIPLY(z3, FIX(0.077722536)); /* c2-c4-c6 */
  3631. tmp22 += tmp10 - MULTIPLY(z1, FIX(2.470602249)); /* c2+c4+c6 */
  3632. tmp23 += MULTIPLY(z2, FIX(1.414213562)); /* c0 */
  3633. /* Odd part */
  3634. z1 = (INT32) wsptr[1];
  3635. z2 = (INT32) wsptr[3];
  3636. z3 = (INT32) wsptr[5];
  3637. tmp11 = MULTIPLY(z1 + z2, FIX(0.935414347)); /* (c3+c1-c5)/2 */
  3638. tmp12 = MULTIPLY(z1 - z2, FIX(0.170262339)); /* (c3+c5-c1)/2 */
  3639. tmp10 = tmp11 - tmp12;
  3640. tmp11 += tmp12;
  3641. tmp12 = MULTIPLY(z2 + z3, - FIX(1.378756276)); /* -c1 */
  3642. tmp11 += tmp12;
  3643. z2 = MULTIPLY(z1 + z3, FIX(0.613604268)); /* c5 */
  3644. tmp10 += z2;
  3645. tmp12 += z2 + MULTIPLY(z3, FIX(1.870828693)); /* c3+c1-c5 */
  3646. /* Final output stage */
  3647. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  3648. CONST_BITS+PASS1_BITS+3)
  3649. & RANGE_MASK];
  3650. outptr[6] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  3651. CONST_BITS+PASS1_BITS+3)
  3652. & RANGE_MASK];
  3653. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  3654. CONST_BITS+PASS1_BITS+3)
  3655. & RANGE_MASK];
  3656. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  3657. CONST_BITS+PASS1_BITS+3)
  3658. & RANGE_MASK];
  3659. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  3660. CONST_BITS+PASS1_BITS+3)
  3661. & RANGE_MASK];
  3662. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  3663. CONST_BITS+PASS1_BITS+3)
  3664. & RANGE_MASK];
  3665. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp23,
  3666. CONST_BITS+PASS1_BITS+3)
  3667. & RANGE_MASK];
  3668. wsptr += 7; /* advance pointer to next row */
  3669. }
  3670. }
  3671. /*
  3672. * Perform dequantization and inverse DCT on one block of coefficients,
  3673. * producing a 6x12 output block.
  3674. *
  3675. * 12-point IDCT in pass 1 (columns), 6-point in pass 2 (rows).
  3676. */
  3677. GLOBAL(void)
  3678. jpeg_idct_6x12 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3679. JCOEFPTR coef_block,
  3680. JSAMPARRAY output_buf, JDIMENSION output_col)
  3681. {
  3682. INT32 tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
  3683. INT32 tmp20, tmp21, tmp22, tmp23, tmp24, tmp25;
  3684. INT32 z1, z2, z3, z4;
  3685. JCOEFPTR inptr;
  3686. ISLOW_MULT_TYPE * quantptr;
  3687. int * wsptr;
  3688. JSAMPROW outptr;
  3689. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3690. int ctr;
  3691. int workspace[6*12]; /* buffers data between passes */
  3692. SHIFT_TEMPS
  3693. /* Pass 1: process columns from input, store into work array.
  3694. * 12-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/24).
  3695. */
  3696. inptr = coef_block;
  3697. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3698. wsptr = workspace;
  3699. for (ctr = 0; ctr < 6; ctr++, inptr++, quantptr++, wsptr++) {
  3700. /* Even part */
  3701. z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3702. z3 <<= CONST_BITS;
  3703. /* Add fudge factor here for final descale. */
  3704. z3 += ONE << (CONST_BITS-PASS1_BITS-1);
  3705. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  3706. z4 = MULTIPLY(z4, FIX(1.224744871)); /* c4 */
  3707. tmp10 = z3 + z4;
  3708. tmp11 = z3 - z4;
  3709. z1 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  3710. z4 = MULTIPLY(z1, FIX(1.366025404)); /* c2 */
  3711. z1 <<= CONST_BITS;
  3712. z2 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  3713. z2 <<= CONST_BITS;
  3714. tmp12 = z1 - z2;
  3715. tmp21 = z3 + tmp12;
  3716. tmp24 = z3 - tmp12;
  3717. tmp12 = z4 + z2;
  3718. tmp20 = tmp10 + tmp12;
  3719. tmp25 = tmp10 - tmp12;
  3720. tmp12 = z4 - z1 - z2;
  3721. tmp22 = tmp11 + tmp12;
  3722. tmp23 = tmp11 - tmp12;
  3723. /* Odd part */
  3724. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3725. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  3726. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  3727. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  3728. tmp11 = MULTIPLY(z2, FIX(1.306562965)); /* c3 */
  3729. tmp14 = MULTIPLY(z2, - FIX_0_541196100); /* -c9 */
  3730. tmp10 = z1 + z3;
  3731. tmp15 = MULTIPLY(tmp10 + z4, FIX(0.860918669)); /* c7 */
  3732. tmp12 = tmp15 + MULTIPLY(tmp10, FIX(0.261052384)); /* c5-c7 */
  3733. tmp10 = tmp12 + tmp11 + MULTIPLY(z1, FIX(0.280143716)); /* c1-c5 */
  3734. tmp13 = MULTIPLY(z3 + z4, - FIX(1.045510580)); /* -(c7+c11) */
  3735. tmp12 += tmp13 + tmp14 - MULTIPLY(z3, FIX(1.478575242)); /* c1+c5-c7-c11 */
  3736. tmp13 += tmp15 - tmp11 + MULTIPLY(z4, FIX(1.586706681)); /* c1+c11 */
  3737. tmp15 += tmp14 - MULTIPLY(z1, FIX(0.676326758)) - /* c7-c11 */
  3738. MULTIPLY(z4, FIX(1.982889723)); /* c5+c7 */
  3739. z1 -= z4;
  3740. z2 -= z3;
  3741. z3 = MULTIPLY(z1 + z2, FIX_0_541196100); /* c9 */
  3742. tmp11 = z3 + MULTIPLY(z1, FIX_0_765366865); /* c3-c9 */
  3743. tmp14 = z3 - MULTIPLY(z2, FIX_1_847759065); /* c3+c9 */
  3744. /* Final output stage */
  3745. wsptr[6*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  3746. wsptr[6*11] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  3747. wsptr[6*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  3748. wsptr[6*10] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  3749. wsptr[6*2] = (int) RIGHT_SHIFT(tmp22 + tmp12, CONST_BITS-PASS1_BITS);
  3750. wsptr[6*9] = (int) RIGHT_SHIFT(tmp22 - tmp12, CONST_BITS-PASS1_BITS);
  3751. wsptr[6*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  3752. wsptr[6*8] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  3753. wsptr[6*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  3754. wsptr[6*7] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  3755. wsptr[6*5] = (int) RIGHT_SHIFT(tmp25 + tmp15, CONST_BITS-PASS1_BITS);
  3756. wsptr[6*6] = (int) RIGHT_SHIFT(tmp25 - tmp15, CONST_BITS-PASS1_BITS);
  3757. }
  3758. /* Pass 2: process 12 rows from work array, store into output array.
  3759. * 6-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/12).
  3760. */
  3761. wsptr = workspace;
  3762. for (ctr = 0; ctr < 12; ctr++) {
  3763. outptr = output_buf[ctr] + output_col;
  3764. /* Even part */
  3765. /* Add fudge factor here for final descale. */
  3766. tmp10 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  3767. tmp10 <<= CONST_BITS;
  3768. tmp12 = (INT32) wsptr[4];
  3769. tmp20 = MULTIPLY(tmp12, FIX(0.707106781)); /* c4 */
  3770. tmp11 = tmp10 + tmp20;
  3771. tmp21 = tmp10 - tmp20 - tmp20;
  3772. tmp20 = (INT32) wsptr[2];
  3773. tmp10 = MULTIPLY(tmp20, FIX(1.224744871)); /* c2 */
  3774. tmp20 = tmp11 + tmp10;
  3775. tmp22 = tmp11 - tmp10;
  3776. /* Odd part */
  3777. z1 = (INT32) wsptr[1];
  3778. z2 = (INT32) wsptr[3];
  3779. z3 = (INT32) wsptr[5];
  3780. tmp11 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  3781. tmp10 = tmp11 + ((z1 + z2) << CONST_BITS);
  3782. tmp12 = tmp11 + ((z3 - z2) << CONST_BITS);
  3783. tmp11 = (z1 - z2 - z3) << CONST_BITS;
  3784. /* Final output stage */
  3785. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp20 + tmp10,
  3786. CONST_BITS+PASS1_BITS+3)
  3787. & RANGE_MASK];
  3788. outptr[5] = range_limit[(int) RIGHT_SHIFT(tmp20 - tmp10,
  3789. CONST_BITS+PASS1_BITS+3)
  3790. & RANGE_MASK];
  3791. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp21 + tmp11,
  3792. CONST_BITS+PASS1_BITS+3)
  3793. & RANGE_MASK];
  3794. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp21 - tmp11,
  3795. CONST_BITS+PASS1_BITS+3)
  3796. & RANGE_MASK];
  3797. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp22 + tmp12,
  3798. CONST_BITS+PASS1_BITS+3)
  3799. & RANGE_MASK];
  3800. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp22 - tmp12,
  3801. CONST_BITS+PASS1_BITS+3)
  3802. & RANGE_MASK];
  3803. wsptr += 6; /* advance pointer to next row */
  3804. }
  3805. }
  3806. /*
  3807. * Perform dequantization and inverse DCT on one block of coefficients,
  3808. * producing a 5x10 output block.
  3809. *
  3810. * 10-point IDCT in pass 1 (columns), 5-point in pass 2 (rows).
  3811. */
  3812. GLOBAL(void)
  3813. jpeg_idct_5x10 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3814. JCOEFPTR coef_block,
  3815. JSAMPARRAY output_buf, JDIMENSION output_col)
  3816. {
  3817. INT32 tmp10, tmp11, tmp12, tmp13, tmp14;
  3818. INT32 tmp20, tmp21, tmp22, tmp23, tmp24;
  3819. INT32 z1, z2, z3, z4, z5;
  3820. JCOEFPTR inptr;
  3821. ISLOW_MULT_TYPE * quantptr;
  3822. int * wsptr;
  3823. JSAMPROW outptr;
  3824. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3825. int ctr;
  3826. int workspace[5*10]; /* buffers data between passes */
  3827. SHIFT_TEMPS
  3828. /* Pass 1: process columns from input, store into work array.
  3829. * 10-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/20).
  3830. */
  3831. inptr = coef_block;
  3832. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3833. wsptr = workspace;
  3834. for (ctr = 0; ctr < 5; ctr++, inptr++, quantptr++, wsptr++) {
  3835. /* Even part */
  3836. z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3837. z3 <<= CONST_BITS;
  3838. /* Add fudge factor here for final descale. */
  3839. z3 += ONE << (CONST_BITS-PASS1_BITS-1);
  3840. z4 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  3841. z1 = MULTIPLY(z4, FIX(1.144122806)); /* c4 */
  3842. z2 = MULTIPLY(z4, FIX(0.437016024)); /* c8 */
  3843. tmp10 = z3 + z1;
  3844. tmp11 = z3 - z2;
  3845. tmp22 = RIGHT_SHIFT(z3 - ((z1 - z2) << 1), /* c0 = (c4-c8)*2 */
  3846. CONST_BITS-PASS1_BITS);
  3847. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  3848. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  3849. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c6 */
  3850. tmp12 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c2-c6 */
  3851. tmp13 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c2+c6 */
  3852. tmp20 = tmp10 + tmp12;
  3853. tmp24 = tmp10 - tmp12;
  3854. tmp21 = tmp11 + tmp13;
  3855. tmp23 = tmp11 - tmp13;
  3856. /* Odd part */
  3857. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  3858. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  3859. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  3860. z4 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  3861. tmp11 = z2 + z4;
  3862. tmp13 = z2 - z4;
  3863. tmp12 = MULTIPLY(tmp13, FIX(0.309016994)); /* (c3-c7)/2 */
  3864. z5 = z3 << CONST_BITS;
  3865. z2 = MULTIPLY(tmp11, FIX(0.951056516)); /* (c3+c7)/2 */
  3866. z4 = z5 + tmp12;
  3867. tmp10 = MULTIPLY(z1, FIX(1.396802247)) + z2 + z4; /* c1 */
  3868. tmp14 = MULTIPLY(z1, FIX(0.221231742)) - z2 + z4; /* c9 */
  3869. z2 = MULTIPLY(tmp11, FIX(0.587785252)); /* (c1-c9)/2 */
  3870. z4 = z5 - tmp12 - (tmp13 << (CONST_BITS - 1));
  3871. tmp12 = (z1 - tmp13 - z3) << PASS1_BITS;
  3872. tmp11 = MULTIPLY(z1, FIX(1.260073511)) - z2 - z4; /* c3 */
  3873. tmp13 = MULTIPLY(z1, FIX(0.642039522)) - z2 + z4; /* c7 */
  3874. /* Final output stage */
  3875. wsptr[5*0] = (int) RIGHT_SHIFT(tmp20 + tmp10, CONST_BITS-PASS1_BITS);
  3876. wsptr[5*9] = (int) RIGHT_SHIFT(tmp20 - tmp10, CONST_BITS-PASS1_BITS);
  3877. wsptr[5*1] = (int) RIGHT_SHIFT(tmp21 + tmp11, CONST_BITS-PASS1_BITS);
  3878. wsptr[5*8] = (int) RIGHT_SHIFT(tmp21 - tmp11, CONST_BITS-PASS1_BITS);
  3879. wsptr[5*2] = (int) (tmp22 + tmp12);
  3880. wsptr[5*7] = (int) (tmp22 - tmp12);
  3881. wsptr[5*3] = (int) RIGHT_SHIFT(tmp23 + tmp13, CONST_BITS-PASS1_BITS);
  3882. wsptr[5*6] = (int) RIGHT_SHIFT(tmp23 - tmp13, CONST_BITS-PASS1_BITS);
  3883. wsptr[5*4] = (int) RIGHT_SHIFT(tmp24 + tmp14, CONST_BITS-PASS1_BITS);
  3884. wsptr[5*5] = (int) RIGHT_SHIFT(tmp24 - tmp14, CONST_BITS-PASS1_BITS);
  3885. }
  3886. /* Pass 2: process 10 rows from work array, store into output array.
  3887. * 5-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/10).
  3888. */
  3889. wsptr = workspace;
  3890. for (ctr = 0; ctr < 10; ctr++) {
  3891. outptr = output_buf[ctr] + output_col;
  3892. /* Even part */
  3893. /* Add fudge factor here for final descale. */
  3894. tmp12 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  3895. tmp12 <<= CONST_BITS;
  3896. tmp13 = (INT32) wsptr[2];
  3897. tmp14 = (INT32) wsptr[4];
  3898. z1 = MULTIPLY(tmp13 + tmp14, FIX(0.790569415)); /* (c2+c4)/2 */
  3899. z2 = MULTIPLY(tmp13 - tmp14, FIX(0.353553391)); /* (c2-c4)/2 */
  3900. z3 = tmp12 + z2;
  3901. tmp10 = z3 + z1;
  3902. tmp11 = z3 - z1;
  3903. tmp12 -= z2 << 2;
  3904. /* Odd part */
  3905. z2 = (INT32) wsptr[1];
  3906. z3 = (INT32) wsptr[3];
  3907. z1 = MULTIPLY(z2 + z3, FIX(0.831253876)); /* c3 */
  3908. tmp13 = z1 + MULTIPLY(z2, FIX(0.513743148)); /* c1-c3 */
  3909. tmp14 = z1 - MULTIPLY(z3, FIX(2.176250899)); /* c1+c3 */
  3910. /* Final output stage */
  3911. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp13,
  3912. CONST_BITS+PASS1_BITS+3)
  3913. & RANGE_MASK];
  3914. outptr[4] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp13,
  3915. CONST_BITS+PASS1_BITS+3)
  3916. & RANGE_MASK];
  3917. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp11 + tmp14,
  3918. CONST_BITS+PASS1_BITS+3)
  3919. & RANGE_MASK];
  3920. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp11 - tmp14,
  3921. CONST_BITS+PASS1_BITS+3)
  3922. & RANGE_MASK];
  3923. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12,
  3924. CONST_BITS+PASS1_BITS+3)
  3925. & RANGE_MASK];
  3926. wsptr += 5; /* advance pointer to next row */
  3927. }
  3928. }
  3929. /*
  3930. * Perform dequantization and inverse DCT on one block of coefficients,
  3931. * producing a 4x8 output block.
  3932. *
  3933. * 8-point IDCT in pass 1 (columns), 4-point in pass 2 (rows).
  3934. */
  3935. GLOBAL(void)
  3936. jpeg_idct_4x8 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  3937. JCOEFPTR coef_block,
  3938. JSAMPARRAY output_buf, JDIMENSION output_col)
  3939. {
  3940. INT32 tmp0, tmp1, tmp2, tmp3;
  3941. INT32 tmp10, tmp11, tmp12, tmp13;
  3942. INT32 z1, z2, z3;
  3943. JCOEFPTR inptr;
  3944. ISLOW_MULT_TYPE * quantptr;
  3945. int * wsptr;
  3946. JSAMPROW outptr;
  3947. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  3948. int ctr;
  3949. int workspace[4*8]; /* buffers data between passes */
  3950. SHIFT_TEMPS
  3951. /* Pass 1: process columns from input, store into work array. */
  3952. /* Note results are scaled up by sqrt(8) compared to a true IDCT; */
  3953. /* furthermore, we scale the results by 2**PASS1_BITS. */
  3954. inptr = coef_block;
  3955. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  3956. wsptr = workspace;
  3957. for (ctr = 4; ctr > 0; ctr--) {
  3958. /* Due to quantization, we will usually find that many of the input
  3959. * coefficients are zero, especially the AC terms. We can exploit this
  3960. * by short-circuiting the IDCT calculation for any column in which all
  3961. * the AC terms are zero. In that case each output is equal to the
  3962. * DC coefficient (with scale factor as needed).
  3963. * With typical images and quantization tables, half or more of the
  3964. * column DCT calculations can be simplified this way.
  3965. */
  3966. if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 &&
  3967. inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 &&
  3968. inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 &&
  3969. inptr[DCTSIZE*7] == 0) {
  3970. /* AC terms all zero */
  3971. int dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) << PASS1_BITS;
  3972. wsptr[4*0] = dcval;
  3973. wsptr[4*1] = dcval;
  3974. wsptr[4*2] = dcval;
  3975. wsptr[4*3] = dcval;
  3976. wsptr[4*4] = dcval;
  3977. wsptr[4*5] = dcval;
  3978. wsptr[4*6] = dcval;
  3979. wsptr[4*7] = dcval;
  3980. inptr++; /* advance pointers to next column */
  3981. quantptr++;
  3982. wsptr++;
  3983. continue;
  3984. }
  3985. /* Even part: reverse the even part of the forward DCT. */
  3986. /* The rotator is sqrt(2)*c(-6). */
  3987. z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  3988. z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
  3989. z1 = MULTIPLY(z2 + z3, FIX_0_541196100);
  3990. tmp2 = z1 + MULTIPLY(z2, FIX_0_765366865);
  3991. tmp3 = z1 - MULTIPLY(z3, FIX_1_847759065);
  3992. z2 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  3993. z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  3994. z2 <<= CONST_BITS;
  3995. z3 <<= CONST_BITS;
  3996. /* Add fudge factor here for final descale. */
  3997. z2 += ONE << (CONST_BITS-PASS1_BITS-1);
  3998. tmp0 = z2 + z3;
  3999. tmp1 = z2 - z3;
  4000. tmp10 = tmp0 + tmp2;
  4001. tmp13 = tmp0 - tmp2;
  4002. tmp11 = tmp1 + tmp3;
  4003. tmp12 = tmp1 - tmp3;
  4004. /* Odd part per figure 8; the matrix is unitary and hence its
  4005. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  4006. */
  4007. tmp0 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
  4008. tmp1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  4009. tmp2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  4010. tmp3 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  4011. z2 = tmp0 + tmp2;
  4012. z3 = tmp1 + tmp3;
  4013. z1 = MULTIPLY(z2 + z3, FIX_1_175875602); /* sqrt(2) * c3 */
  4014. z2 = MULTIPLY(z2, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
  4015. z3 = MULTIPLY(z3, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
  4016. z2 += z1;
  4017. z3 += z1;
  4018. z1 = MULTIPLY(tmp0 + tmp3, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
  4019. tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
  4020. tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
  4021. tmp0 += z1 + z2;
  4022. tmp3 += z1 + z3;
  4023. z1 = MULTIPLY(tmp1 + tmp2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
  4024. tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
  4025. tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
  4026. tmp1 += z1 + z3;
  4027. tmp2 += z1 + z2;
  4028. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  4029. wsptr[4*0] = (int) RIGHT_SHIFT(tmp10 + tmp3, CONST_BITS-PASS1_BITS);
  4030. wsptr[4*7] = (int) RIGHT_SHIFT(tmp10 - tmp3, CONST_BITS-PASS1_BITS);
  4031. wsptr[4*1] = (int) RIGHT_SHIFT(tmp11 + tmp2, CONST_BITS-PASS1_BITS);
  4032. wsptr[4*6] = (int) RIGHT_SHIFT(tmp11 - tmp2, CONST_BITS-PASS1_BITS);
  4033. wsptr[4*2] = (int) RIGHT_SHIFT(tmp12 + tmp1, CONST_BITS-PASS1_BITS);
  4034. wsptr[4*5] = (int) RIGHT_SHIFT(tmp12 - tmp1, CONST_BITS-PASS1_BITS);
  4035. wsptr[4*3] = (int) RIGHT_SHIFT(tmp13 + tmp0, CONST_BITS-PASS1_BITS);
  4036. wsptr[4*4] = (int) RIGHT_SHIFT(tmp13 - tmp0, CONST_BITS-PASS1_BITS);
  4037. inptr++; /* advance pointers to next column */
  4038. quantptr++;
  4039. wsptr++;
  4040. }
  4041. /* Pass 2: process 8 rows from work array, store into output array.
  4042. * 4-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/16).
  4043. */
  4044. wsptr = workspace;
  4045. for (ctr = 0; ctr < 8; ctr++) {
  4046. outptr = output_buf[ctr] + output_col;
  4047. /* Even part */
  4048. /* Add fudge factor here for final descale. */
  4049. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  4050. tmp2 = (INT32) wsptr[2];
  4051. tmp10 = (tmp0 + tmp2) << CONST_BITS;
  4052. tmp12 = (tmp0 - tmp2) << CONST_BITS;
  4053. /* Odd part */
  4054. /* Same rotation as in the even part of the 8x8 LL&M IDCT */
  4055. z2 = (INT32) wsptr[1];
  4056. z3 = (INT32) wsptr[3];
  4057. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  4058. tmp0 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  4059. tmp2 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  4060. /* Final output stage */
  4061. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  4062. CONST_BITS+PASS1_BITS+3)
  4063. & RANGE_MASK];
  4064. outptr[3] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  4065. CONST_BITS+PASS1_BITS+3)
  4066. & RANGE_MASK];
  4067. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp12 + tmp2,
  4068. CONST_BITS+PASS1_BITS+3)
  4069. & RANGE_MASK];
  4070. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp12 - tmp2,
  4071. CONST_BITS+PASS1_BITS+3)
  4072. & RANGE_MASK];
  4073. wsptr += 4; /* advance pointer to next row */
  4074. }
  4075. }
  4076. /*
  4077. * Perform dequantization and inverse DCT on one block of coefficients,
  4078. * producing a reduced-size 3x6 output block.
  4079. *
  4080. * 6-point IDCT in pass 1 (columns), 3-point in pass 2 (rows).
  4081. */
  4082. GLOBAL(void)
  4083. jpeg_idct_3x6 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  4084. JCOEFPTR coef_block,
  4085. JSAMPARRAY output_buf, JDIMENSION output_col)
  4086. {
  4087. INT32 tmp0, tmp1, tmp2, tmp10, tmp11, tmp12;
  4088. INT32 z1, z2, z3;
  4089. JCOEFPTR inptr;
  4090. ISLOW_MULT_TYPE * quantptr;
  4091. int * wsptr;
  4092. JSAMPROW outptr;
  4093. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  4094. int ctr;
  4095. int workspace[3*6]; /* buffers data between passes */
  4096. SHIFT_TEMPS
  4097. /* Pass 1: process columns from input, store into work array.
  4098. * 6-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/12).
  4099. */
  4100. inptr = coef_block;
  4101. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  4102. wsptr = workspace;
  4103. for (ctr = 0; ctr < 3; ctr++, inptr++, quantptr++, wsptr++) {
  4104. /* Even part */
  4105. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  4106. tmp0 <<= CONST_BITS;
  4107. /* Add fudge factor here for final descale. */
  4108. tmp0 += ONE << (CONST_BITS-PASS1_BITS-1);
  4109. tmp2 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]);
  4110. tmp10 = MULTIPLY(tmp2, FIX(0.707106781)); /* c4 */
  4111. tmp1 = tmp0 + tmp10;
  4112. tmp11 = RIGHT_SHIFT(tmp0 - tmp10 - tmp10, CONST_BITS-PASS1_BITS);
  4113. tmp10 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  4114. tmp0 = MULTIPLY(tmp10, FIX(1.224744871)); /* c2 */
  4115. tmp10 = tmp1 + tmp0;
  4116. tmp12 = tmp1 - tmp0;
  4117. /* Odd part */
  4118. z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  4119. z2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  4120. z3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
  4121. tmp1 = MULTIPLY(z1 + z3, FIX(0.366025404)); /* c5 */
  4122. tmp0 = tmp1 + ((z1 + z2) << CONST_BITS);
  4123. tmp2 = tmp1 + ((z3 - z2) << CONST_BITS);
  4124. tmp1 = (z1 - z2 - z3) << PASS1_BITS;
  4125. /* Final output stage */
  4126. wsptr[3*0] = (int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS-PASS1_BITS);
  4127. wsptr[3*5] = (int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS-PASS1_BITS);
  4128. wsptr[3*1] = (int) (tmp11 + tmp1);
  4129. wsptr[3*4] = (int) (tmp11 - tmp1);
  4130. wsptr[3*2] = (int) RIGHT_SHIFT(tmp12 + tmp2, CONST_BITS-PASS1_BITS);
  4131. wsptr[3*3] = (int) RIGHT_SHIFT(tmp12 - tmp2, CONST_BITS-PASS1_BITS);
  4132. }
  4133. /* Pass 2: process 6 rows from work array, store into output array.
  4134. * 3-point IDCT kernel, cK represents sqrt(2) * cos(K*pi/6).
  4135. */
  4136. wsptr = workspace;
  4137. for (ctr = 0; ctr < 6; ctr++) {
  4138. outptr = output_buf[ctr] + output_col;
  4139. /* Even part */
  4140. /* Add fudge factor here for final descale. */
  4141. tmp0 = (INT32) wsptr[0] + (ONE << (PASS1_BITS+2));
  4142. tmp0 <<= CONST_BITS;
  4143. tmp2 = (INT32) wsptr[2];
  4144. tmp12 = MULTIPLY(tmp2, FIX(0.707106781)); /* c2 */
  4145. tmp10 = tmp0 + tmp12;
  4146. tmp2 = tmp0 - tmp12 - tmp12;
  4147. /* Odd part */
  4148. tmp12 = (INT32) wsptr[1];
  4149. tmp0 = MULTIPLY(tmp12, FIX(1.224744871)); /* c1 */
  4150. /* Final output stage */
  4151. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0,
  4152. CONST_BITS+PASS1_BITS+3)
  4153. & RANGE_MASK];
  4154. outptr[2] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0,
  4155. CONST_BITS+PASS1_BITS+3)
  4156. & RANGE_MASK];
  4157. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp2,
  4158. CONST_BITS+PASS1_BITS+3)
  4159. & RANGE_MASK];
  4160. wsptr += 3; /* advance pointer to next row */
  4161. }
  4162. }
  4163. /*
  4164. * Perform dequantization and inverse DCT on one block of coefficients,
  4165. * producing a 2x4 output block.
  4166. *
  4167. * 4-point IDCT in pass 1 (columns), 2-point in pass 2 (rows).
  4168. */
  4169. GLOBAL(void)
  4170. jpeg_idct_2x4 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  4171. JCOEFPTR coef_block,
  4172. JSAMPARRAY output_buf, JDIMENSION output_col)
  4173. {
  4174. INT32 tmp0, tmp2, tmp10, tmp12;
  4175. INT32 z1, z2, z3;
  4176. JCOEFPTR inptr;
  4177. ISLOW_MULT_TYPE * quantptr;
  4178. INT32 * wsptr;
  4179. JSAMPROW outptr;
  4180. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  4181. int ctr;
  4182. INT32 workspace[2*4]; /* buffers data between passes */
  4183. SHIFT_TEMPS
  4184. /* Pass 1: process columns from input, store into work array.
  4185. * 4-point IDCT kernel,
  4186. * cK represents sqrt(2) * cos(K*pi/16) [refers to 8-point IDCT].
  4187. */
  4188. inptr = coef_block;
  4189. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  4190. wsptr = workspace;
  4191. for (ctr = 0; ctr < 2; ctr++, inptr++, quantptr++, wsptr++) {
  4192. /* Even part */
  4193. tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
  4194. tmp2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
  4195. tmp10 = (tmp0 + tmp2) << CONST_BITS;
  4196. tmp12 = (tmp0 - tmp2) << CONST_BITS;
  4197. /* Odd part */
  4198. /* Same rotation as in the even part of the 8x8 LL&M IDCT */
  4199. z2 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
  4200. z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
  4201. z1 = MULTIPLY(z2 + z3, FIX_0_541196100); /* c6 */
  4202. tmp0 = z1 + MULTIPLY(z2, FIX_0_765366865); /* c2-c6 */
  4203. tmp2 = z1 - MULTIPLY(z3, FIX_1_847759065); /* c2+c6 */
  4204. /* Final output stage */
  4205. wsptr[2*0] = tmp10 + tmp0;
  4206. wsptr[2*3] = tmp10 - tmp0;
  4207. wsptr[2*1] = tmp12 + tmp2;
  4208. wsptr[2*2] = tmp12 - tmp2;
  4209. }
  4210. /* Pass 2: process 4 rows from work array, store into output array. */
  4211. wsptr = workspace;
  4212. for (ctr = 0; ctr < 4; ctr++) {
  4213. outptr = output_buf[ctr] + output_col;
  4214. /* Even part */
  4215. /* Add fudge factor here for final descale. */
  4216. tmp10 = wsptr[0] + (ONE << (CONST_BITS+2));
  4217. /* Odd part */
  4218. tmp0 = wsptr[1];
  4219. /* Final output stage */
  4220. outptr[0] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, CONST_BITS+3)
  4221. & RANGE_MASK];
  4222. outptr[1] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, CONST_BITS+3)
  4223. & RANGE_MASK];
  4224. wsptr += 2; /* advance pointer to next row */
  4225. }
  4226. }
  4227. /*
  4228. * Perform dequantization and inverse DCT on one block of coefficients,
  4229. * producing a 1x2 output block.
  4230. *
  4231. * 2-point IDCT in pass 1 (columns), 1-point in pass 2 (rows).
  4232. */
  4233. GLOBAL(void)
  4234. jpeg_idct_1x2 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
  4235. JCOEFPTR coef_block,
  4236. JSAMPARRAY output_buf, JDIMENSION output_col)
  4237. {
  4238. INT32 tmp0, tmp10;
  4239. ISLOW_MULT_TYPE * quantptr;
  4240. JSAMPLE *range_limit = IDCT_range_limit(cinfo);
  4241. SHIFT_TEMPS
  4242. /* Process 1 column from input, store into output array. */
  4243. quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
  4244. /* Even part */
  4245. tmp10 = DEQUANTIZE(coef_block[DCTSIZE*0], quantptr[DCTSIZE*0]);
  4246. /* Add fudge factor here for final descale. */
  4247. tmp10 += ONE << 2;
  4248. /* Odd part */
  4249. tmp0 = DEQUANTIZE(coef_block[DCTSIZE*1], quantptr[DCTSIZE*1]);
  4250. /* Final output stage */
  4251. output_buf[0][output_col] = range_limit[(int) RIGHT_SHIFT(tmp10 + tmp0, 3)
  4252. & RANGE_MASK];
  4253. output_buf[1][output_col] = range_limit[(int) RIGHT_SHIFT(tmp10 - tmp0, 3)
  4254. & RANGE_MASK];
  4255. }
  4256. #endif /* IDCT_SCALING_SUPPORTED */
  4257. #endif /* DCT_ISLOW_SUPPORTED */