PageRenderTime 69ms CodeModel.GetById 23ms RepoModel.GetById 0ms app.codeStats 1ms

/project/jni/sdl-1.3/src/video/SDL_blit_N.c

https://github.com/aichunyu/FFPlayer
C | 2514 lines | 2155 code | 165 blank | 194 comment | 208 complexity | d4b695a71205fc371eeb8f9b6ca443d1 MD5 | raw file
Possible License(s): LGPL-3.0, 0BSD, Apache-2.0, LGPL-2.1, GPL-2.0, CC-BY-SA-3.0, LGPL-2.0, BSD-3-Clause
  1. /*
  2. Simple DirectMedia Layer
  3. Copyright (C) 1997-2012 Sam Lantinga <slouken@libsdl.org>
  4. This software is provided 'as-is', without any express or implied
  5. warranty. In no event will the authors be held liable for any damages
  6. arising from the use of this software.
  7. Permission is granted to anyone to use this software for any purpose,
  8. including commercial applications, and to alter it and redistribute it
  9. freely, subject to the following restrictions:
  10. 1. The origin of this software must not be misrepresented; you must not
  11. claim that you wrote the original software. If you use this software
  12. in a product, an acknowledgment in the product documentation would be
  13. appreciated but is not required.
  14. 2. Altered source versions must be plainly marked as such, and must not be
  15. misrepresented as being the original software.
  16. 3. This notice may not be removed or altered from any source distribution.
  17. */
  18. #include "SDL_config.h"
  19. #include "SDL_video.h"
  20. #include "SDL_endian.h"
  21. #include "SDL_cpuinfo.h"
  22. #include "SDL_blit.h"
  23. #include "SDL_assert.h"
  24. /* Functions to blit from N-bit surfaces to other surfaces */
  25. #if SDL_ALTIVEC_BLITTERS
  26. #ifdef HAVE_ALTIVEC_H
  27. #include <altivec.h>
  28. #endif
  29. #ifdef __MACOSX__
  30. #include <sys/sysctl.h>
  31. static size_t
  32. GetL3CacheSize(void)
  33. {
  34. const char key[] = "hw.l3cachesize";
  35. u_int64_t result = 0;
  36. size_t typeSize = sizeof(result);
  37. int err = sysctlbyname(key, &result, &typeSize, NULL, 0);
  38. if (0 != err)
  39. return 0;
  40. return result;
  41. }
  42. #else
  43. static size_t
  44. GetL3CacheSize(void)
  45. {
  46. /* XXX: Just guess G4 */
  47. return 2097152;
  48. }
  49. #endif /* __MACOSX__ */
  50. #if (defined(__MACOSX__) && (__GNUC__ < 4))
  51. #define VECUINT8_LITERAL(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) \
  52. (vector unsigned char) ( a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p )
  53. #define VECUINT16_LITERAL(a,b,c,d,e,f,g,h) \
  54. (vector unsigned short) ( a,b,c,d,e,f,g,h )
  55. #else
  56. #define VECUINT8_LITERAL(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p) \
  57. (vector unsigned char) { a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p }
  58. #define VECUINT16_LITERAL(a,b,c,d,e,f,g,h) \
  59. (vector unsigned short) { a,b,c,d,e,f,g,h }
  60. #endif
  61. #define UNALIGNED_PTR(x) (((size_t) x) & 0x0000000F)
  62. #define VSWIZZLE32(a,b,c,d) (vector unsigned char) \
  63. ( 0x00+a, 0x00+b, 0x00+c, 0x00+d, \
  64. 0x04+a, 0x04+b, 0x04+c, 0x04+d, \
  65. 0x08+a, 0x08+b, 0x08+c, 0x08+d, \
  66. 0x0C+a, 0x0C+b, 0x0C+c, 0x0C+d )
  67. #define MAKE8888(dstfmt, r, g, b, a) \
  68. ( ((r<<dstfmt->Rshift)&dstfmt->Rmask) | \
  69. ((g<<dstfmt->Gshift)&dstfmt->Gmask) | \
  70. ((b<<dstfmt->Bshift)&dstfmt->Bmask) | \
  71. ((a<<dstfmt->Ashift)&dstfmt->Amask) )
  72. /*
  73. * Data Stream Touch...Altivec cache prefetching.
  74. *
  75. * Don't use this on a G5...however, the speed boost is very significant
  76. * on a G4.
  77. */
  78. #define DST_CHAN_SRC 1
  79. #define DST_CHAN_DEST 2
  80. /* macro to set DST control word value... */
  81. #define DST_CTRL(size, count, stride) \
  82. (((size) << 24) | ((count) << 16) | (stride))
  83. #define VEC_ALIGNER(src) ((UNALIGNED_PTR(src)) \
  84. ? vec_lvsl(0, src) \
  85. : vec_add(vec_lvsl(8, src), vec_splat_u8(8)))
  86. /* Calculate the permute vector used for 32->32 swizzling */
  87. static vector unsigned char
  88. calc_swizzle32(const SDL_PixelFormat * srcfmt, const SDL_PixelFormat * dstfmt)
  89. {
  90. /*
  91. * We have to assume that the bits that aren't used by other
  92. * colors is alpha, and it's one complete byte, since some formats
  93. * leave alpha with a zero mask, but we should still swizzle the bits.
  94. */
  95. /* ARGB */
  96. const static const struct SDL_PixelFormat default_pixel_format = {
  97. 0, NULL, 0, 0,
  98. {0, 0},
  99. 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000,
  100. 0, 0, 0, 0,
  101. 16, 8, 0, 24,
  102. 0, NULL
  103. };
  104. if (!srcfmt) {
  105. srcfmt = &default_pixel_format;
  106. }
  107. if (!dstfmt) {
  108. dstfmt = &default_pixel_format;
  109. }
  110. const vector unsigned char plus = VECUINT8_LITERAL(0x00, 0x00, 0x00, 0x00,
  111. 0x04, 0x04, 0x04, 0x04,
  112. 0x08, 0x08, 0x08, 0x08,
  113. 0x0C, 0x0C, 0x0C,
  114. 0x0C);
  115. vector unsigned char vswiz;
  116. vector unsigned int srcvec;
  117. #define RESHIFT(X) (3 - ((X) >> 3))
  118. Uint32 rmask = RESHIFT(srcfmt->Rshift) << (dstfmt->Rshift);
  119. Uint32 gmask = RESHIFT(srcfmt->Gshift) << (dstfmt->Gshift);
  120. Uint32 bmask = RESHIFT(srcfmt->Bshift) << (dstfmt->Bshift);
  121. Uint32 amask;
  122. /* Use zero for alpha if either surface doesn't have alpha */
  123. if (dstfmt->Amask) {
  124. amask =
  125. ((srcfmt->Amask) ? RESHIFT(srcfmt->
  126. Ashift) : 0x10) << (dstfmt->Ashift);
  127. } else {
  128. amask =
  129. 0x10101010 & ((dstfmt->Rmask | dstfmt->Gmask | dstfmt->Bmask) ^
  130. 0xFFFFFFFF);
  131. }
  132. #undef RESHIFT
  133. ((unsigned int *) (char *) &srcvec)[0] = (rmask | gmask | bmask | amask);
  134. vswiz = vec_add(plus, (vector unsigned char) vec_splat(srcvec, 0));
  135. return (vswiz);
  136. }
  137. static void Blit_RGB888_RGB565(SDL_BlitInfo * info);
  138. static void
  139. Blit_RGB888_RGB565Altivec(SDL_BlitInfo * info)
  140. {
  141. int height = info->dst_h;
  142. Uint8 *src = (Uint8 *) info->src;
  143. int srcskip = info->src_skip;
  144. Uint8 *dst = (Uint8 *) info->dst;
  145. int dstskip = info->dst_skip;
  146. SDL_PixelFormat *srcfmt = info->src_fmt;
  147. vector unsigned char valpha = vec_splat_u8(0);
  148. vector unsigned char vpermute = calc_swizzle32(srcfmt, NULL);
  149. vector unsigned char vgmerge = VECUINT8_LITERAL(0x00, 0x02, 0x00, 0x06,
  150. 0x00, 0x0a, 0x00, 0x0e,
  151. 0x00, 0x12, 0x00, 0x16,
  152. 0x00, 0x1a, 0x00, 0x1e);
  153. vector unsigned short v1 = vec_splat_u16(1);
  154. vector unsigned short v3 = vec_splat_u16(3);
  155. vector unsigned short v3f =
  156. VECUINT16_LITERAL(0x003f, 0x003f, 0x003f, 0x003f,
  157. 0x003f, 0x003f, 0x003f, 0x003f);
  158. vector unsigned short vfc =
  159. VECUINT16_LITERAL(0x00fc, 0x00fc, 0x00fc, 0x00fc,
  160. 0x00fc, 0x00fc, 0x00fc, 0x00fc);
  161. vector unsigned short vf800 = (vector unsigned short) vec_splat_u8(-7);
  162. vf800 = vec_sl(vf800, vec_splat_u16(8));
  163. while (height--) {
  164. vector unsigned char valigner;
  165. vector unsigned char voverflow;
  166. vector unsigned char vsrc;
  167. int width = info->dst_w;
  168. int extrawidth;
  169. /* do scalar until we can align... */
  170. #define ONE_PIXEL_BLEND(condition, widthvar) \
  171. while (condition) { \
  172. Uint32 Pixel; \
  173. unsigned sR, sG, sB, sA; \
  174. DISEMBLE_RGBA((Uint8 *)src, 4, srcfmt, Pixel, \
  175. sR, sG, sB, sA); \
  176. *(Uint16 *)(dst) = (((sR << 8) & 0x0000F800) | \
  177. ((sG << 3) & 0x000007E0) | \
  178. ((sB >> 3) & 0x0000001F)); \
  179. dst += 2; \
  180. src += 4; \
  181. widthvar--; \
  182. }
  183. ONE_PIXEL_BLEND(((UNALIGNED_PTR(dst)) && (width)), width);
  184. /* After all that work, here's the vector part! */
  185. extrawidth = (width % 8); /* trailing unaligned stores */
  186. width -= extrawidth;
  187. vsrc = vec_ld(0, src);
  188. valigner = VEC_ALIGNER(src);
  189. while (width) {
  190. vector unsigned short vpixel, vrpixel, vgpixel, vbpixel;
  191. vector unsigned int vsrc1, vsrc2;
  192. vector unsigned char vdst;
  193. voverflow = vec_ld(15, src);
  194. vsrc = vec_perm(vsrc, voverflow, valigner);
  195. vsrc1 = (vector unsigned int) vec_perm(vsrc, valpha, vpermute);
  196. src += 16;
  197. vsrc = voverflow;
  198. voverflow = vec_ld(15, src);
  199. vsrc = vec_perm(vsrc, voverflow, valigner);
  200. vsrc2 = (vector unsigned int) vec_perm(vsrc, valpha, vpermute);
  201. /* 1555 */
  202. vpixel = (vector unsigned short) vec_packpx(vsrc1, vsrc2);
  203. vgpixel = (vector unsigned short) vec_perm(vsrc1, vsrc2, vgmerge);
  204. vgpixel = vec_and(vgpixel, vfc);
  205. vgpixel = vec_sl(vgpixel, v3);
  206. vrpixel = vec_sl(vpixel, v1);
  207. vrpixel = vec_and(vrpixel, vf800);
  208. vbpixel = vec_and(vpixel, v3f);
  209. vdst =
  210. vec_or((vector unsigned char) vrpixel,
  211. (vector unsigned char) vgpixel);
  212. /* 565 */
  213. vdst = vec_or(vdst, (vector unsigned char) vbpixel);
  214. vec_st(vdst, 0, dst);
  215. width -= 8;
  216. src += 16;
  217. dst += 16;
  218. vsrc = voverflow;
  219. }
  220. SDL_assert(width == 0);
  221. /* do scalar until we can align... */
  222. ONE_PIXEL_BLEND((extrawidth), extrawidth);
  223. #undef ONE_PIXEL_BLEND
  224. src += srcskip; /* move to next row, accounting for pitch. */
  225. dst += dstskip;
  226. }
  227. }
  228. static void
  229. Blit_RGB565_32Altivec(SDL_BlitInfo * info)
  230. {
  231. int height = info->dst_h;
  232. Uint8 *src = (Uint8 *) info->src;
  233. int srcskip = info->src_skip;
  234. Uint8 *dst = (Uint8 *) info->dst;
  235. int dstskip = info->dst_skip;
  236. SDL_PixelFormat *srcfmt = info->src_fmt;
  237. SDL_PixelFormat *dstfmt = info->dst_fmt;
  238. unsigned alpha;
  239. vector unsigned char valpha;
  240. vector unsigned char vpermute;
  241. vector unsigned short vf800;
  242. vector unsigned int v8 = vec_splat_u32(8);
  243. vector unsigned int v16 = vec_add(v8, v8);
  244. vector unsigned short v2 = vec_splat_u16(2);
  245. vector unsigned short v3 = vec_splat_u16(3);
  246. /*
  247. 0x10 - 0x1f is the alpha
  248. 0x00 - 0x0e evens are the red
  249. 0x01 - 0x0f odds are zero
  250. */
  251. vector unsigned char vredalpha1 = VECUINT8_LITERAL(0x10, 0x00, 0x01, 0x01,
  252. 0x10, 0x02, 0x01, 0x01,
  253. 0x10, 0x04, 0x01, 0x01,
  254. 0x10, 0x06, 0x01,
  255. 0x01);
  256. vector unsigned char vredalpha2 =
  257. (vector unsigned
  258. char) (vec_add((vector unsigned int) vredalpha1, vec_sl(v8, v16))
  259. );
  260. /*
  261. 0x00 - 0x0f is ARxx ARxx ARxx ARxx
  262. 0x11 - 0x0f odds are blue
  263. */
  264. vector unsigned char vblue1 = VECUINT8_LITERAL(0x00, 0x01, 0x02, 0x11,
  265. 0x04, 0x05, 0x06, 0x13,
  266. 0x08, 0x09, 0x0a, 0x15,
  267. 0x0c, 0x0d, 0x0e, 0x17);
  268. vector unsigned char vblue2 =
  269. (vector unsigned char) (vec_add((vector unsigned int) vblue1, v8)
  270. );
  271. /*
  272. 0x00 - 0x0f is ARxB ARxB ARxB ARxB
  273. 0x10 - 0x0e evens are green
  274. */
  275. vector unsigned char vgreen1 = VECUINT8_LITERAL(0x00, 0x01, 0x10, 0x03,
  276. 0x04, 0x05, 0x12, 0x07,
  277. 0x08, 0x09, 0x14, 0x0b,
  278. 0x0c, 0x0d, 0x16, 0x0f);
  279. vector unsigned char vgreen2 =
  280. (vector unsigned
  281. char) (vec_add((vector unsigned int) vgreen1, vec_sl(v8, v8))
  282. );
  283. SDL_assert(srcfmt->BytesPerPixel == 2);
  284. SDL_assert(dstfmt->BytesPerPixel == 4);
  285. vf800 = (vector unsigned short) vec_splat_u8(-7);
  286. vf800 = vec_sl(vf800, vec_splat_u16(8));
  287. if (dstfmt->Amask && info->a) {
  288. ((unsigned char *) &valpha)[0] = alpha = info->a;
  289. valpha = vec_splat(valpha, 0);
  290. } else {
  291. alpha = 0;
  292. valpha = vec_splat_u8(0);
  293. }
  294. vpermute = calc_swizzle32(NULL, dstfmt);
  295. while (height--) {
  296. vector unsigned char valigner;
  297. vector unsigned char voverflow;
  298. vector unsigned char vsrc;
  299. int width = info->dst_w;
  300. int extrawidth;
  301. /* do scalar until we can align... */
  302. #define ONE_PIXEL_BLEND(condition, widthvar) \
  303. while (condition) { \
  304. unsigned sR, sG, sB; \
  305. unsigned short Pixel = *((unsigned short *)src); \
  306. sR = (Pixel >> 8) & 0xf8; \
  307. sG = (Pixel >> 3) & 0xfc; \
  308. sB = (Pixel << 3) & 0xf8; \
  309. ASSEMBLE_RGBA(dst, 4, dstfmt, sR, sG, sB, alpha); \
  310. src += 2; \
  311. dst += 4; \
  312. widthvar--; \
  313. }
  314. ONE_PIXEL_BLEND(((UNALIGNED_PTR(dst)) && (width)), width);
  315. /* After all that work, here's the vector part! */
  316. extrawidth = (width % 8); /* trailing unaligned stores */
  317. width -= extrawidth;
  318. vsrc = vec_ld(0, src);
  319. valigner = VEC_ALIGNER(src);
  320. while (width) {
  321. vector unsigned short vR, vG, vB;
  322. vector unsigned char vdst1, vdst2;
  323. voverflow = vec_ld(15, src);
  324. vsrc = vec_perm(vsrc, voverflow, valigner);
  325. vR = vec_and((vector unsigned short) vsrc, vf800);
  326. vB = vec_sl((vector unsigned short) vsrc, v3);
  327. vG = vec_sl(vB, v2);
  328. vdst1 =
  329. (vector unsigned char) vec_perm((vector unsigned char) vR,
  330. valpha, vredalpha1);
  331. vdst1 = vec_perm(vdst1, (vector unsigned char) vB, vblue1);
  332. vdst1 = vec_perm(vdst1, (vector unsigned char) vG, vgreen1);
  333. vdst1 = vec_perm(vdst1, valpha, vpermute);
  334. vec_st(vdst1, 0, dst);
  335. vdst2 =
  336. (vector unsigned char) vec_perm((vector unsigned char) vR,
  337. valpha, vredalpha2);
  338. vdst2 = vec_perm(vdst2, (vector unsigned char) vB, vblue2);
  339. vdst2 = vec_perm(vdst2, (vector unsigned char) vG, vgreen2);
  340. vdst2 = vec_perm(vdst2, valpha, vpermute);
  341. vec_st(vdst2, 16, dst);
  342. width -= 8;
  343. dst += 32;
  344. src += 16;
  345. vsrc = voverflow;
  346. }
  347. SDL_assert(width == 0);
  348. /* do scalar until we can align... */
  349. ONE_PIXEL_BLEND((extrawidth), extrawidth);
  350. #undef ONE_PIXEL_BLEND
  351. src += srcskip; /* move to next row, accounting for pitch. */
  352. dst += dstskip;
  353. }
  354. }
  355. static void
  356. Blit_RGB555_32Altivec(SDL_BlitInfo * info)
  357. {
  358. int height = info->dst_h;
  359. Uint8 *src = (Uint8 *) info->src;
  360. int srcskip = info->src_skip;
  361. Uint8 *dst = (Uint8 *) info->dst;
  362. int dstskip = info->dst_skip;
  363. SDL_PixelFormat *srcfmt = info->src_fmt;
  364. SDL_PixelFormat *dstfmt = info->dst_fmt;
  365. unsigned alpha;
  366. vector unsigned char valpha;
  367. vector unsigned char vpermute;
  368. vector unsigned short vf800;
  369. vector unsigned int v8 = vec_splat_u32(8);
  370. vector unsigned int v16 = vec_add(v8, v8);
  371. vector unsigned short v1 = vec_splat_u16(1);
  372. vector unsigned short v3 = vec_splat_u16(3);
  373. /*
  374. 0x10 - 0x1f is the alpha
  375. 0x00 - 0x0e evens are the red
  376. 0x01 - 0x0f odds are zero
  377. */
  378. vector unsigned char vredalpha1 = VECUINT8_LITERAL(0x10, 0x00, 0x01, 0x01,
  379. 0x10, 0x02, 0x01, 0x01,
  380. 0x10, 0x04, 0x01, 0x01,
  381. 0x10, 0x06, 0x01,
  382. 0x01);
  383. vector unsigned char vredalpha2 =
  384. (vector unsigned
  385. char) (vec_add((vector unsigned int) vredalpha1, vec_sl(v8, v16))
  386. );
  387. /*
  388. 0x00 - 0x0f is ARxx ARxx ARxx ARxx
  389. 0x11 - 0x0f odds are blue
  390. */
  391. vector unsigned char vblue1 = VECUINT8_LITERAL(0x00, 0x01, 0x02, 0x11,
  392. 0x04, 0x05, 0x06, 0x13,
  393. 0x08, 0x09, 0x0a, 0x15,
  394. 0x0c, 0x0d, 0x0e, 0x17);
  395. vector unsigned char vblue2 =
  396. (vector unsigned char) (vec_add((vector unsigned int) vblue1, v8)
  397. );
  398. /*
  399. 0x00 - 0x0f is ARxB ARxB ARxB ARxB
  400. 0x10 - 0x0e evens are green
  401. */
  402. vector unsigned char vgreen1 = VECUINT8_LITERAL(0x00, 0x01, 0x10, 0x03,
  403. 0x04, 0x05, 0x12, 0x07,
  404. 0x08, 0x09, 0x14, 0x0b,
  405. 0x0c, 0x0d, 0x16, 0x0f);
  406. vector unsigned char vgreen2 =
  407. (vector unsigned
  408. char) (vec_add((vector unsigned int) vgreen1, vec_sl(v8, v8))
  409. );
  410. SDL_assert(srcfmt->BytesPerPixel == 2);
  411. SDL_assert(dstfmt->BytesPerPixel == 4);
  412. vf800 = (vector unsigned short) vec_splat_u8(-7);
  413. vf800 = vec_sl(vf800, vec_splat_u16(8));
  414. if (dstfmt->Amask && info->a) {
  415. ((unsigned char *) &valpha)[0] = alpha = info->a;
  416. valpha = vec_splat(valpha, 0);
  417. } else {
  418. alpha = 0;
  419. valpha = vec_splat_u8(0);
  420. }
  421. vpermute = calc_swizzle32(NULL, dstfmt);
  422. while (height--) {
  423. vector unsigned char valigner;
  424. vector unsigned char voverflow;
  425. vector unsigned char vsrc;
  426. int width = info->dst_w;
  427. int extrawidth;
  428. /* do scalar until we can align... */
  429. #define ONE_PIXEL_BLEND(condition, widthvar) \
  430. while (condition) { \
  431. unsigned sR, sG, sB; \
  432. unsigned short Pixel = *((unsigned short *)src); \
  433. sR = (Pixel >> 7) & 0xf8; \
  434. sG = (Pixel >> 2) & 0xf8; \
  435. sB = (Pixel << 3) & 0xf8; \
  436. ASSEMBLE_RGBA(dst, 4, dstfmt, sR, sG, sB, alpha); \
  437. src += 2; \
  438. dst += 4; \
  439. widthvar--; \
  440. }
  441. ONE_PIXEL_BLEND(((UNALIGNED_PTR(dst)) && (width)), width);
  442. /* After all that work, here's the vector part! */
  443. extrawidth = (width % 8); /* trailing unaligned stores */
  444. width -= extrawidth;
  445. vsrc = vec_ld(0, src);
  446. valigner = VEC_ALIGNER(src);
  447. while (width) {
  448. vector unsigned short vR, vG, vB;
  449. vector unsigned char vdst1, vdst2;
  450. voverflow = vec_ld(15, src);
  451. vsrc = vec_perm(vsrc, voverflow, valigner);
  452. vR = vec_and(vec_sl((vector unsigned short) vsrc, v1), vf800);
  453. vB = vec_sl((vector unsigned short) vsrc, v3);
  454. vG = vec_sl(vB, v3);
  455. vdst1 =
  456. (vector unsigned char) vec_perm((vector unsigned char) vR,
  457. valpha, vredalpha1);
  458. vdst1 = vec_perm(vdst1, (vector unsigned char) vB, vblue1);
  459. vdst1 = vec_perm(vdst1, (vector unsigned char) vG, vgreen1);
  460. vdst1 = vec_perm(vdst1, valpha, vpermute);
  461. vec_st(vdst1, 0, dst);
  462. vdst2 =
  463. (vector unsigned char) vec_perm((vector unsigned char) vR,
  464. valpha, vredalpha2);
  465. vdst2 = vec_perm(vdst2, (vector unsigned char) vB, vblue2);
  466. vdst2 = vec_perm(vdst2, (vector unsigned char) vG, vgreen2);
  467. vdst2 = vec_perm(vdst2, valpha, vpermute);
  468. vec_st(vdst2, 16, dst);
  469. width -= 8;
  470. dst += 32;
  471. src += 16;
  472. vsrc = voverflow;
  473. }
  474. SDL_assert(width == 0);
  475. /* do scalar until we can align... */
  476. ONE_PIXEL_BLEND((extrawidth), extrawidth);
  477. #undef ONE_PIXEL_BLEND
  478. src += srcskip; /* move to next row, accounting for pitch. */
  479. dst += dstskip;
  480. }
  481. }
  482. static void BlitNtoNKey(SDL_BlitInfo * info);
  483. static void BlitNtoNKeyCopyAlpha(SDL_BlitInfo * info);
  484. static void
  485. Blit32to32KeyAltivec(SDL_BlitInfo * info)
  486. {
  487. int height = info->dst_h;
  488. Uint32 *srcp = (Uint32 *) info->src;
  489. int srcskip = info->src_skip / 4;
  490. Uint32 *dstp = (Uint32 *) info->dst;
  491. int dstskip = info->dst_skip / 4;
  492. SDL_PixelFormat *srcfmt = info->src_fmt;
  493. int srcbpp = srcfmt->BytesPerPixel;
  494. SDL_PixelFormat *dstfmt = info->dst_fmt;
  495. int dstbpp = dstfmt->BytesPerPixel;
  496. int copy_alpha = (srcfmt->Amask && dstfmt->Amask);
  497. unsigned alpha = dstfmt->Amask ? info->a : 0;
  498. Uint32 rgbmask = srcfmt->Rmask | srcfmt->Gmask | srcfmt->Bmask;
  499. Uint32 ckey = info->colorkey;
  500. vector unsigned int valpha;
  501. vector unsigned char vpermute;
  502. vector unsigned char vzero;
  503. vector unsigned int vckey;
  504. vector unsigned int vrgbmask;
  505. vpermute = calc_swizzle32(srcfmt, dstfmt);
  506. if (info->dst_w < 16) {
  507. if (copy_alpha) {
  508. BlitNtoNKeyCopyAlpha(info);
  509. } else {
  510. BlitNtoNKey(info);
  511. }
  512. return;
  513. }
  514. vzero = vec_splat_u8(0);
  515. if (alpha) {
  516. ((unsigned char *) &valpha)[0] = (unsigned char) alpha;
  517. valpha =
  518. (vector unsigned int) vec_splat((vector unsigned char) valpha, 0);
  519. } else {
  520. valpha = (vector unsigned int) vzero;
  521. }
  522. ckey &= rgbmask;
  523. ((unsigned int *) (char *) &vckey)[0] = ckey;
  524. vckey = vec_splat(vckey, 0);
  525. ((unsigned int *) (char *) &vrgbmask)[0] = rgbmask;
  526. vrgbmask = vec_splat(vrgbmask, 0);
  527. while (height--) {
  528. #define ONE_PIXEL_BLEND(condition, widthvar) \
  529. if (copy_alpha) { \
  530. while (condition) { \
  531. Uint32 Pixel; \
  532. unsigned sR, sG, sB, sA; \
  533. DISEMBLE_RGBA((Uint8 *)srcp, srcbpp, srcfmt, Pixel, \
  534. sR, sG, sB, sA); \
  535. if ( (Pixel & rgbmask) != ckey ) { \
  536. ASSEMBLE_RGBA((Uint8 *)dstp, dstbpp, dstfmt, \
  537. sR, sG, sB, sA); \
  538. } \
  539. dstp = (Uint32 *) (((Uint8 *) dstp) + dstbpp); \
  540. srcp = (Uint32 *) (((Uint8 *) srcp) + srcbpp); \
  541. widthvar--; \
  542. } \
  543. } else { \
  544. while (condition) { \
  545. Uint32 Pixel; \
  546. unsigned sR, sG, sB; \
  547. RETRIEVE_RGB_PIXEL((Uint8 *)srcp, srcbpp, Pixel); \
  548. if ( Pixel != ckey ) { \
  549. RGB_FROM_PIXEL(Pixel, srcfmt, sR, sG, sB); \
  550. ASSEMBLE_RGBA((Uint8 *)dstp, dstbpp, dstfmt, \
  551. sR, sG, sB, alpha); \
  552. } \
  553. dstp = (Uint32 *) (((Uint8 *)dstp) + dstbpp); \
  554. srcp = (Uint32 *) (((Uint8 *)srcp) + srcbpp); \
  555. widthvar--; \
  556. } \
  557. }
  558. int width = info->dst_w;
  559. ONE_PIXEL_BLEND((UNALIGNED_PTR(dstp)) && (width), width);
  560. SDL_assert(width > 0);
  561. if (width > 0) {
  562. int extrawidth = (width % 4);
  563. vector unsigned char valigner = VEC_ALIGNER(srcp);
  564. vector unsigned int vs = vec_ld(0, srcp);
  565. width -= extrawidth;
  566. SDL_assert(width >= 4);
  567. while (width) {
  568. vector unsigned char vsel;
  569. vector unsigned int vd;
  570. vector unsigned int voverflow = vec_ld(15, srcp);
  571. /* load the source vec */
  572. vs = vec_perm(vs, voverflow, valigner);
  573. /* vsel is set for items that match the key */
  574. vsel = (vector unsigned char) vec_and(vs, vrgbmask);
  575. vsel = (vector unsigned char) vec_cmpeq(vs, vckey);
  576. /* permute the src vec to the dest format */
  577. vs = vec_perm(vs, valpha, vpermute);
  578. /* load the destination vec */
  579. vd = vec_ld(0, dstp);
  580. /* select the source and dest into vs */
  581. vd = (vector unsigned int) vec_sel((vector unsigned char) vs,
  582. (vector unsigned char) vd,
  583. vsel);
  584. vec_st(vd, 0, dstp);
  585. srcp += 4;
  586. width -= 4;
  587. dstp += 4;
  588. vs = voverflow;
  589. }
  590. ONE_PIXEL_BLEND((extrawidth), extrawidth);
  591. #undef ONE_PIXEL_BLEND
  592. srcp += srcskip;
  593. dstp += dstskip;
  594. }
  595. }
  596. }
  597. /* Altivec code to swizzle one 32-bit surface to a different 32-bit format. */
  598. /* Use this on a G5 */
  599. static void
  600. ConvertAltivec32to32_noprefetch(SDL_BlitInfo * info)
  601. {
  602. int height = info->dst_h;
  603. Uint32 *src = (Uint32 *) info->src;
  604. int srcskip = info->src_skip / 4;
  605. Uint32 *dst = (Uint32 *) info->dst;
  606. int dstskip = info->dst_skip / 4;
  607. SDL_PixelFormat *srcfmt = info->src_fmt;
  608. SDL_PixelFormat *dstfmt = info->dst_fmt;
  609. vector unsigned int vzero = vec_splat_u32(0);
  610. vector unsigned char vpermute = calc_swizzle32(srcfmt, dstfmt);
  611. if (dstfmt->Amask && !srcfmt->Amask) {
  612. if (info->a) {
  613. vector unsigned char valpha;
  614. ((unsigned char *) &valpha)[0] = info->a;
  615. vzero = (vector unsigned int) vec_splat(valpha, 0);
  616. }
  617. }
  618. SDL_assert(srcfmt->BytesPerPixel == 4);
  619. SDL_assert(dstfmt->BytesPerPixel == 4);
  620. while (height--) {
  621. vector unsigned char valigner;
  622. vector unsigned int vbits;
  623. vector unsigned int voverflow;
  624. Uint32 bits;
  625. Uint8 r, g, b, a;
  626. int width = info->dst_w;
  627. int extrawidth;
  628. /* do scalar until we can align... */
  629. while ((UNALIGNED_PTR(dst)) && (width)) {
  630. bits = *(src++);
  631. RGBA_FROM_8888(bits, srcfmt, r, g, b, a);
  632. if(!srcfmt->Amask)
  633. a = info->a;
  634. *(dst++) = MAKE8888(dstfmt, r, g, b, a);
  635. width--;
  636. }
  637. /* After all that work, here's the vector part! */
  638. extrawidth = (width % 4);
  639. width -= extrawidth;
  640. valigner = VEC_ALIGNER(src);
  641. vbits = vec_ld(0, src);
  642. while (width) {
  643. voverflow = vec_ld(15, src);
  644. src += 4;
  645. width -= 4;
  646. vbits = vec_perm(vbits, voverflow, valigner); /* src is ready. */
  647. vbits = vec_perm(vbits, vzero, vpermute); /* swizzle it. */
  648. vec_st(vbits, 0, dst); /* store it back out. */
  649. dst += 4;
  650. vbits = voverflow;
  651. }
  652. SDL_assert(width == 0);
  653. /* cover pixels at the end of the row that didn't fit in 16 bytes. */
  654. while (extrawidth) {
  655. bits = *(src++); /* max 7 pixels, don't bother with prefetch. */
  656. RGBA_FROM_8888(bits, srcfmt, r, g, b, a);
  657. if(!srcfmt->Amask)
  658. a = info->a;
  659. *(dst++) = MAKE8888(dstfmt, r, g, b, a);
  660. extrawidth--;
  661. }
  662. src += srcskip;
  663. dst += dstskip;
  664. }
  665. }
  666. /* Altivec code to swizzle one 32-bit surface to a different 32-bit format. */
  667. /* Use this on a G4 */
  668. static void
  669. ConvertAltivec32to32_prefetch(SDL_BlitInfo * info)
  670. {
  671. const int scalar_dst_lead = sizeof(Uint32) * 4;
  672. const int vector_dst_lead = sizeof(Uint32) * 16;
  673. int height = info->dst_h;
  674. Uint32 *src = (Uint32 *) info->src;
  675. int srcskip = info->src_skip / 4;
  676. Uint32 *dst = (Uint32 *) info->dst;
  677. int dstskip = info->dst_skip / 4;
  678. SDL_PixelFormat *srcfmt = info->src_fmt;
  679. SDL_PixelFormat *dstfmt = info->dst_fmt;
  680. vector unsigned int vzero = vec_splat_u32(0);
  681. vector unsigned char vpermute = calc_swizzle32(srcfmt, dstfmt);
  682. if (dstfmt->Amask && !srcfmt->Amask) {
  683. if (info->a) {
  684. vector unsigned char valpha;
  685. ((unsigned char *) &valpha)[0] = info->a;
  686. vzero = (vector unsigned int) vec_splat(valpha, 0);
  687. }
  688. }
  689. SDL_assert(srcfmt->BytesPerPixel == 4);
  690. SDL_assert(dstfmt->BytesPerPixel == 4);
  691. while (height--) {
  692. vector unsigned char valigner;
  693. vector unsigned int vbits;
  694. vector unsigned int voverflow;
  695. Uint32 bits;
  696. Uint8 r, g, b, a;
  697. int width = info->dst_w;
  698. int extrawidth;
  699. /* do scalar until we can align... */
  700. while ((UNALIGNED_PTR(dst)) && (width)) {
  701. vec_dstt(src + scalar_dst_lead, DST_CTRL(2, 32, 1024),
  702. DST_CHAN_SRC);
  703. vec_dstst(dst + scalar_dst_lead, DST_CTRL(2, 32, 1024),
  704. DST_CHAN_DEST);
  705. bits = *(src++);
  706. RGBA_FROM_8888(bits, srcfmt, r, g, b, a);
  707. if(!srcfmt->Amask)
  708. a = info->a;
  709. *(dst++) = MAKE8888(dstfmt, r, g, b, a);
  710. width--;
  711. }
  712. /* After all that work, here's the vector part! */
  713. extrawidth = (width % 4);
  714. width -= extrawidth;
  715. valigner = VEC_ALIGNER(src);
  716. vbits = vec_ld(0, src);
  717. while (width) {
  718. vec_dstt(src + vector_dst_lead, DST_CTRL(2, 32, 1024),
  719. DST_CHAN_SRC);
  720. vec_dstst(dst + vector_dst_lead, DST_CTRL(2, 32, 1024),
  721. DST_CHAN_DEST);
  722. voverflow = vec_ld(15, src);
  723. src += 4;
  724. width -= 4;
  725. vbits = vec_perm(vbits, voverflow, valigner); /* src is ready. */
  726. vbits = vec_perm(vbits, vzero, vpermute); /* swizzle it. */
  727. vec_st(vbits, 0, dst); /* store it back out. */
  728. dst += 4;
  729. vbits = voverflow;
  730. }
  731. SDL_assert(width == 0);
  732. /* cover pixels at the end of the row that didn't fit in 16 bytes. */
  733. while (extrawidth) {
  734. bits = *(src++); /* max 7 pixels, don't bother with prefetch. */
  735. RGBA_FROM_8888(bits, srcfmt, r, g, b, a);
  736. if(!srcfmt->Amask)
  737. a = info->a;
  738. *(dst++) = MAKE8888(dstfmt, r, g, b, a);
  739. extrawidth--;
  740. }
  741. src += srcskip;
  742. dst += dstskip;
  743. }
  744. vec_dss(DST_CHAN_SRC);
  745. vec_dss(DST_CHAN_DEST);
  746. }
  747. static Uint32
  748. GetBlitFeatures(void)
  749. {
  750. static Uint32 features = 0xffffffff;
  751. if (features == 0xffffffff) {
  752. /* Provide an override for testing .. */
  753. char *override = SDL_getenv("SDL_ALTIVEC_BLIT_FEATURES");
  754. if (override) {
  755. features = 0;
  756. SDL_sscanf(override, "%u", &features);
  757. } else {
  758. features = (0
  759. /* Feature 1 is has-MMX */
  760. | ((SDL_HasMMX())? 1 : 0)
  761. /* Feature 2 is has-AltiVec */
  762. | ((SDL_HasAltiVec())? 2 : 0)
  763. /* Feature 4 is dont-use-prefetch */
  764. /* !!!! FIXME: Check for G5 or later, not the cache size! Always prefetch on a G4. */
  765. | ((GetL3CacheSize() == 0) ? 4 : 0)
  766. );
  767. }
  768. }
  769. return features;
  770. }
  771. #if __MWERKS__
  772. #pragma altivec_model off
  773. #endif
  774. #else
  775. /* Feature 1 is has-MMX */
  776. #define GetBlitFeatures() ((Uint32)(SDL_HasMMX() ? 1 : 0))
  777. #endif
  778. /* This is now endian dependent */
  779. #if SDL_BYTEORDER == SDL_LIL_ENDIAN
  780. #define HI 1
  781. #define LO 0
  782. #else /* SDL_BYTEORDER == SDL_BIG_ENDIAN */
  783. #define HI 0
  784. #define LO 1
  785. #endif
  786. /* Special optimized blit for RGB 8-8-8 --> RGB 3-3-2 */
  787. #define RGB888_RGB332(dst, src) { \
  788. dst = (Uint8)((((src)&0x00E00000)>>16)| \
  789. (((src)&0x0000E000)>>11)| \
  790. (((src)&0x000000C0)>>6)); \
  791. }
  792. static void
  793. Blit_RGB888_index8(SDL_BlitInfo * info)
  794. {
  795. #ifndef USE_DUFFS_LOOP
  796. int c;
  797. #endif
  798. int width, height;
  799. Uint32 *src;
  800. const Uint8 *map;
  801. Uint8 *dst;
  802. int srcskip, dstskip;
  803. /* Set up some basic variables */
  804. width = info->dst_w;
  805. height = info->dst_h;
  806. src = (Uint32 *) info->src;
  807. srcskip = info->src_skip / 4;
  808. dst = info->dst;
  809. dstskip = info->dst_skip;
  810. map = info->table;
  811. if (map == NULL) {
  812. while (height--) {
  813. #ifdef USE_DUFFS_LOOP
  814. /* *INDENT-OFF* */
  815. DUFFS_LOOP(
  816. RGB888_RGB332(*dst++, *src);
  817. , width);
  818. /* *INDENT-ON* */
  819. #else
  820. for (c = width / 4; c; --c) {
  821. /* Pack RGB into 8bit pixel */
  822. ++src;
  823. RGB888_RGB332(*dst++, *src);
  824. ++src;
  825. RGB888_RGB332(*dst++, *src);
  826. ++src;
  827. RGB888_RGB332(*dst++, *src);
  828. ++src;
  829. }
  830. switch (width & 3) {
  831. case 3:
  832. RGB888_RGB332(*dst++, *src);
  833. ++src;
  834. case 2:
  835. RGB888_RGB332(*dst++, *src);
  836. ++src;
  837. case 1:
  838. RGB888_RGB332(*dst++, *src);
  839. ++src;
  840. }
  841. #endif /* USE_DUFFS_LOOP */
  842. src += srcskip;
  843. dst += dstskip;
  844. }
  845. } else {
  846. int Pixel;
  847. while (height--) {
  848. #ifdef USE_DUFFS_LOOP
  849. /* *INDENT-OFF* */
  850. DUFFS_LOOP(
  851. RGB888_RGB332(Pixel, *src);
  852. *dst++ = map[Pixel];
  853. ++src;
  854. , width);
  855. /* *INDENT-ON* */
  856. #else
  857. for (c = width / 4; c; --c) {
  858. /* Pack RGB into 8bit pixel */
  859. RGB888_RGB332(Pixel, *src);
  860. *dst++ = map[Pixel];
  861. ++src;
  862. RGB888_RGB332(Pixel, *src);
  863. *dst++ = map[Pixel];
  864. ++src;
  865. RGB888_RGB332(Pixel, *src);
  866. *dst++ = map[Pixel];
  867. ++src;
  868. RGB888_RGB332(Pixel, *src);
  869. *dst++ = map[Pixel];
  870. ++src;
  871. }
  872. switch (width & 3) {
  873. case 3:
  874. RGB888_RGB332(Pixel, *src);
  875. *dst++ = map[Pixel];
  876. ++src;
  877. case 2:
  878. RGB888_RGB332(Pixel, *src);
  879. *dst++ = map[Pixel];
  880. ++src;
  881. case 1:
  882. RGB888_RGB332(Pixel, *src);
  883. *dst++ = map[Pixel];
  884. ++src;
  885. }
  886. #endif /* USE_DUFFS_LOOP */
  887. src += srcskip;
  888. dst += dstskip;
  889. }
  890. }
  891. }
  892. /* Special optimized blit for RGB 8-8-8 --> RGB 5-5-5 */
  893. #define RGB888_RGB555(dst, src) { \
  894. *(Uint16 *)(dst) = (Uint16)((((*src)&0x00F80000)>>9)| \
  895. (((*src)&0x0000F800)>>6)| \
  896. (((*src)&0x000000F8)>>3)); \
  897. }
  898. #define RGB888_RGB555_TWO(dst, src) { \
  899. *(Uint32 *)(dst) = (((((src[HI])&0x00F80000)>>9)| \
  900. (((src[HI])&0x0000F800)>>6)| \
  901. (((src[HI])&0x000000F8)>>3))<<16)| \
  902. (((src[LO])&0x00F80000)>>9)| \
  903. (((src[LO])&0x0000F800)>>6)| \
  904. (((src[LO])&0x000000F8)>>3); \
  905. }
  906. static void
  907. Blit_RGB888_RGB555(SDL_BlitInfo * info)
  908. {
  909. #ifndef USE_DUFFS_LOOP
  910. int c;
  911. #endif
  912. int width, height;
  913. Uint32 *src;
  914. Uint16 *dst;
  915. int srcskip, dstskip;
  916. /* Set up some basic variables */
  917. width = info->dst_w;
  918. height = info->dst_h;
  919. src = (Uint32 *) info->src;
  920. srcskip = info->src_skip / 4;
  921. dst = (Uint16 *) info->dst;
  922. dstskip = info->dst_skip / 2;
  923. #ifdef USE_DUFFS_LOOP
  924. while (height--) {
  925. /* *INDENT-OFF* */
  926. DUFFS_LOOP(
  927. RGB888_RGB555(dst, src);
  928. ++src;
  929. ++dst;
  930. , width);
  931. /* *INDENT-ON* */
  932. src += srcskip;
  933. dst += dstskip;
  934. }
  935. #else
  936. /* Memory align at 4-byte boundary, if necessary */
  937. if ((long) dst & 0x03) {
  938. /* Don't do anything if width is 0 */
  939. if (width == 0) {
  940. return;
  941. }
  942. --width;
  943. while (height--) {
  944. /* Perform copy alignment */
  945. RGB888_RGB555(dst, src);
  946. ++src;
  947. ++dst;
  948. /* Copy in 4 pixel chunks */
  949. for (c = width / 4; c; --c) {
  950. RGB888_RGB555_TWO(dst, src);
  951. src += 2;
  952. dst += 2;
  953. RGB888_RGB555_TWO(dst, src);
  954. src += 2;
  955. dst += 2;
  956. }
  957. /* Get any leftovers */
  958. switch (width & 3) {
  959. case 3:
  960. RGB888_RGB555(dst, src);
  961. ++src;
  962. ++dst;
  963. case 2:
  964. RGB888_RGB555_TWO(dst, src);
  965. src += 2;
  966. dst += 2;
  967. break;
  968. case 1:
  969. RGB888_RGB555(dst, src);
  970. ++src;
  971. ++dst;
  972. break;
  973. }
  974. src += srcskip;
  975. dst += dstskip;
  976. }
  977. } else {
  978. while (height--) {
  979. /* Copy in 4 pixel chunks */
  980. for (c = width / 4; c; --c) {
  981. RGB888_RGB555_TWO(dst, src);
  982. src += 2;
  983. dst += 2;
  984. RGB888_RGB555_TWO(dst, src);
  985. src += 2;
  986. dst += 2;
  987. }
  988. /* Get any leftovers */
  989. switch (width & 3) {
  990. case 3:
  991. RGB888_RGB555(dst, src);
  992. ++src;
  993. ++dst;
  994. case 2:
  995. RGB888_RGB555_TWO(dst, src);
  996. src += 2;
  997. dst += 2;
  998. break;
  999. case 1:
  1000. RGB888_RGB555(dst, src);
  1001. ++src;
  1002. ++dst;
  1003. break;
  1004. }
  1005. src += srcskip;
  1006. dst += dstskip;
  1007. }
  1008. }
  1009. #endif /* USE_DUFFS_LOOP */
  1010. }
  1011. /* Special optimized blit for RGB 8-8-8 --> RGB 5-6-5 */
  1012. #define RGB888_RGB565(dst, src) { \
  1013. *(Uint16 *)(dst) = (Uint16)((((*src)&0x00F80000)>>8)| \
  1014. (((*src)&0x0000FC00)>>5)| \
  1015. (((*src)&0x000000F8)>>3)); \
  1016. }
  1017. #define RGB888_RGB565_TWO(dst, src) { \
  1018. *(Uint32 *)(dst) = (((((src[HI])&0x00F80000)>>8)| \
  1019. (((src[HI])&0x0000FC00)>>5)| \
  1020. (((src[HI])&0x000000F8)>>3))<<16)| \
  1021. (((src[LO])&0x00F80000)>>8)| \
  1022. (((src[LO])&0x0000FC00)>>5)| \
  1023. (((src[LO])&0x000000F8)>>3); \
  1024. }
  1025. static void
  1026. Blit_RGB888_RGB565(SDL_BlitInfo * info)
  1027. {
  1028. #ifndef USE_DUFFS_LOOP
  1029. int c;
  1030. #endif
  1031. int width, height;
  1032. Uint32 *src;
  1033. Uint16 *dst;
  1034. int srcskip, dstskip;
  1035. /* Set up some basic variables */
  1036. width = info->dst_w;
  1037. height = info->dst_h;
  1038. src = (Uint32 *) info->src;
  1039. srcskip = info->src_skip / 4;
  1040. dst = (Uint16 *) info->dst;
  1041. dstskip = info->dst_skip / 2;
  1042. #ifdef USE_DUFFS_LOOP
  1043. while (height--) {
  1044. /* *INDENT-OFF* */
  1045. DUFFS_LOOP(
  1046. RGB888_RGB565(dst, src);
  1047. ++src;
  1048. ++dst;
  1049. , width);
  1050. /* *INDENT-ON* */
  1051. src += srcskip;
  1052. dst += dstskip;
  1053. }
  1054. #else
  1055. /* Memory align at 4-byte boundary, if necessary */
  1056. if ((long) dst & 0x03) {
  1057. /* Don't do anything if width is 0 */
  1058. if (width == 0) {
  1059. return;
  1060. }
  1061. --width;
  1062. while (height--) {
  1063. /* Perform copy alignment */
  1064. RGB888_RGB565(dst, src);
  1065. ++src;
  1066. ++dst;
  1067. /* Copy in 4 pixel chunks */
  1068. for (c = width / 4; c; --c) {
  1069. RGB888_RGB565_TWO(dst, src);
  1070. src += 2;
  1071. dst += 2;
  1072. RGB888_RGB565_TWO(dst, src);
  1073. src += 2;
  1074. dst += 2;
  1075. }
  1076. /* Get any leftovers */
  1077. switch (width & 3) {
  1078. case 3:
  1079. RGB888_RGB565(dst, src);
  1080. ++src;
  1081. ++dst;
  1082. case 2:
  1083. RGB888_RGB565_TWO(dst, src);
  1084. src += 2;
  1085. dst += 2;
  1086. break;
  1087. case 1:
  1088. RGB888_RGB565(dst, src);
  1089. ++src;
  1090. ++dst;
  1091. break;
  1092. }
  1093. src += srcskip;
  1094. dst += dstskip;
  1095. }
  1096. } else {
  1097. while (height--) {
  1098. /* Copy in 4 pixel chunks */
  1099. for (c = width / 4; c; --c) {
  1100. RGB888_RGB565_TWO(dst, src);
  1101. src += 2;
  1102. dst += 2;
  1103. RGB888_RGB565_TWO(dst, src);
  1104. src += 2;
  1105. dst += 2;
  1106. }
  1107. /* Get any leftovers */
  1108. switch (width & 3) {
  1109. case 3:
  1110. RGB888_RGB565(dst, src);
  1111. ++src;
  1112. ++dst;
  1113. case 2:
  1114. RGB888_RGB565_TWO(dst, src);
  1115. src += 2;
  1116. dst += 2;
  1117. break;
  1118. case 1:
  1119. RGB888_RGB565(dst, src);
  1120. ++src;
  1121. ++dst;
  1122. break;
  1123. }
  1124. src += srcskip;
  1125. dst += dstskip;
  1126. }
  1127. }
  1128. #endif /* USE_DUFFS_LOOP */
  1129. }
  1130. /* Special optimized blit for RGB 5-6-5 --> 32-bit RGB surfaces */
  1131. #define RGB565_32(dst, src, map) (map[src[LO]*2] + map[src[HI]*2+1])
  1132. static void
  1133. Blit_RGB565_32(SDL_BlitInfo * info, const Uint32 * map)
  1134. {
  1135. #ifndef USE_DUFFS_LOOP
  1136. int c;
  1137. #endif
  1138. int width, height;
  1139. Uint8 *src;
  1140. Uint32 *dst;
  1141. int srcskip, dstskip;
  1142. /* Set up some basic variables */
  1143. width = info->dst_w;
  1144. height = info->dst_h;
  1145. src = (Uint8 *) info->src;
  1146. srcskip = info->src_skip;
  1147. dst = (Uint32 *) info->dst;
  1148. dstskip = info->dst_skip / 4;
  1149. #ifdef USE_DUFFS_LOOP
  1150. while (height--) {
  1151. /* *INDENT-OFF* */
  1152. DUFFS_LOOP(
  1153. {
  1154. *dst++ = RGB565_32(dst, src, map);
  1155. src += 2;
  1156. },
  1157. width);
  1158. /* *INDENT-ON* */
  1159. src += srcskip;
  1160. dst += dstskip;
  1161. }
  1162. #else
  1163. while (height--) {
  1164. /* Copy in 4 pixel chunks */
  1165. for (c = width / 4; c; --c) {
  1166. *dst++ = RGB565_32(dst, src, map);
  1167. src += 2;
  1168. *dst++ = RGB565_32(dst, src, map);
  1169. src += 2;
  1170. *dst++ = RGB565_32(dst, src, map);
  1171. src += 2;
  1172. *dst++ = RGB565_32(dst, src, map);
  1173. src += 2;
  1174. }
  1175. /* Get any leftovers */
  1176. switch (width & 3) {
  1177. case 3:
  1178. *dst++ = RGB565_32(dst, src, map);
  1179. src += 2;
  1180. case 2:
  1181. *dst++ = RGB565_32(dst, src, map);
  1182. src += 2;
  1183. case 1:
  1184. *dst++ = RGB565_32(dst, src, map);
  1185. src += 2;
  1186. break;
  1187. }
  1188. src += srcskip;
  1189. dst += dstskip;
  1190. }
  1191. #endif /* USE_DUFFS_LOOP */
  1192. }
  1193. /* Special optimized blit for RGB 5-6-5 --> ARGB 8-8-8-8 */
  1194. static const Uint32 RGB565_ARGB8888_LUT[512] = {
  1195. 0x00000000, 0xff000000, 0x00000008, 0xff002000,
  1196. 0x00000010, 0xff004000, 0x00000018, 0xff006100,
  1197. 0x00000020, 0xff008100, 0x00000029, 0xff00a100,
  1198. 0x00000031, 0xff00c200, 0x00000039, 0xff00e200,
  1199. 0x00000041, 0xff080000, 0x0000004a, 0xff082000,
  1200. 0x00000052, 0xff084000, 0x0000005a, 0xff086100,
  1201. 0x00000062, 0xff088100, 0x0000006a, 0xff08a100,
  1202. 0x00000073, 0xff08c200, 0x0000007b, 0xff08e200,
  1203. 0x00000083, 0xff100000, 0x0000008b, 0xff102000,
  1204. 0x00000094, 0xff104000, 0x0000009c, 0xff106100,
  1205. 0x000000a4, 0xff108100, 0x000000ac, 0xff10a100,
  1206. 0x000000b4, 0xff10c200, 0x000000bd, 0xff10e200,
  1207. 0x000000c5, 0xff180000, 0x000000cd, 0xff182000,
  1208. 0x000000d5, 0xff184000, 0x000000de, 0xff186100,
  1209. 0x000000e6, 0xff188100, 0x000000ee, 0xff18a100,
  1210. 0x000000f6, 0xff18c200, 0x000000ff, 0xff18e200,
  1211. 0x00000400, 0xff200000, 0x00000408, 0xff202000,
  1212. 0x00000410, 0xff204000, 0x00000418, 0xff206100,
  1213. 0x00000420, 0xff208100, 0x00000429, 0xff20a100,
  1214. 0x00000431, 0xff20c200, 0x00000439, 0xff20e200,
  1215. 0x00000441, 0xff290000, 0x0000044a, 0xff292000,
  1216. 0x00000452, 0xff294000, 0x0000045a, 0xff296100,
  1217. 0x00000462, 0xff298100, 0x0000046a, 0xff29a100,
  1218. 0x00000473, 0xff29c200, 0x0000047b, 0xff29e200,
  1219. 0x00000483, 0xff310000, 0x0000048b, 0xff312000,
  1220. 0x00000494, 0xff314000, 0x0000049c, 0xff316100,
  1221. 0x000004a4, 0xff318100, 0x000004ac, 0xff31a100,
  1222. 0x000004b4, 0xff31c200, 0x000004bd, 0xff31e200,
  1223. 0x000004c5, 0xff390000, 0x000004cd, 0xff392000,
  1224. 0x000004d5, 0xff394000, 0x000004de, 0xff396100,
  1225. 0x000004e6, 0xff398100, 0x000004ee, 0xff39a100,
  1226. 0x000004f6, 0xff39c200, 0x000004ff, 0xff39e200,
  1227. 0x00000800, 0xff410000, 0x00000808, 0xff412000,
  1228. 0x00000810, 0xff414000, 0x00000818, 0xff416100,
  1229. 0x00000820, 0xff418100, 0x00000829, 0xff41a100,
  1230. 0x00000831, 0xff41c200, 0x00000839, 0xff41e200,
  1231. 0x00000841, 0xff4a0000, 0x0000084a, 0xff4a2000,
  1232. 0x00000852, 0xff4a4000, 0x0000085a, 0xff4a6100,
  1233. 0x00000862, 0xff4a8100, 0x0000086a, 0xff4aa100,
  1234. 0x00000873, 0xff4ac200, 0x0000087b, 0xff4ae200,
  1235. 0x00000883, 0xff520000, 0x0000088b, 0xff522000,
  1236. 0x00000894, 0xff524000, 0x0000089c, 0xff526100,
  1237. 0x000008a4, 0xff528100, 0x000008ac, 0xff52a100,
  1238. 0x000008b4, 0xff52c200, 0x000008bd, 0xff52e200,
  1239. 0x000008c5, 0xff5a0000, 0x000008cd, 0xff5a2000,
  1240. 0x000008d5, 0xff5a4000, 0x000008de, 0xff5a6100,
  1241. 0x000008e6, 0xff5a8100, 0x000008ee, 0xff5aa100,
  1242. 0x000008f6, 0xff5ac200, 0x000008ff, 0xff5ae200,
  1243. 0x00000c00, 0xff620000, 0x00000c08, 0xff622000,
  1244. 0x00000c10, 0xff624000, 0x00000c18, 0xff626100,
  1245. 0x00000c20, 0xff628100, 0x00000c29, 0xff62a100,
  1246. 0x00000c31, 0xff62c200, 0x00000c39, 0xff62e200,
  1247. 0x00000c41, 0xff6a0000, 0x00000c4a, 0xff6a2000,
  1248. 0x00000c52, 0xff6a4000, 0x00000c5a, 0xff6a6100,
  1249. 0x00000c62, 0xff6a8100, 0x00000c6a, 0xff6aa100,
  1250. 0x00000c73, 0xff6ac200, 0x00000c7b, 0xff6ae200,
  1251. 0x00000c83, 0xff730000, 0x00000c8b, 0xff732000,
  1252. 0x00000c94, 0xff734000, 0x00000c9c, 0xff736100,
  1253. 0x00000ca4, 0xff738100, 0x00000cac, 0xff73a100,
  1254. 0x00000cb4, 0xff73c200, 0x00000cbd, 0xff73e200,
  1255. 0x00000cc5, 0xff7b0000, 0x00000ccd, 0xff7b2000,
  1256. 0x00000cd5, 0xff7b4000, 0x00000cde, 0xff7b6100,
  1257. 0x00000ce6, 0xff7b8100, 0x00000cee, 0xff7ba100,
  1258. 0x00000cf6, 0xff7bc200, 0x00000cff, 0xff7be200,
  1259. 0x00001000, 0xff830000, 0x00001008, 0xff832000,
  1260. 0x00001010, 0xff834000, 0x00001018, 0xff836100,
  1261. 0x00001020, 0xff838100, 0x00001029, 0xff83a100,
  1262. 0x00001031, 0xff83c200, 0x00001039, 0xff83e200,
  1263. 0x00001041, 0xff8b0000, 0x0000104a, 0xff8b2000,
  1264. 0x00001052, 0xff8b4000, 0x0000105a, 0xff8b6100,
  1265. 0x00001062, 0xff8b8100, 0x0000106a, 0xff8ba100,
  1266. 0x00001073, 0xff8bc200, 0x0000107b, 0xff8be200,
  1267. 0x00001083, 0xff940000, 0x0000108b, 0xff942000,
  1268. 0x00001094, 0xff944000, 0x0000109c, 0xff946100,
  1269. 0x000010a4, 0xff948100, 0x000010ac, 0xff94a100,
  1270. 0x000010b4, 0xff94c200, 0x000010bd, 0xff94e200,
  1271. 0x000010c5, 0xff9c0000, 0x000010cd, 0xff9c2000,
  1272. 0x000010d5, 0xff9c4000, 0x000010de, 0xff9c6100,
  1273. 0x000010e6, 0xff9c8100, 0x000010ee, 0xff9ca100,
  1274. 0x000010f6, 0xff9cc200, 0x000010ff, 0xff9ce200,
  1275. 0x00001400, 0xffa40000, 0x00001408, 0xffa42000,
  1276. 0x00001410, 0xffa44000, 0x00001418, 0xffa46100,
  1277. 0x00001420, 0xffa48100, 0x00001429, 0xffa4a100,
  1278. 0x00001431, 0xffa4c200, 0x00001439, 0xffa4e200,
  1279. 0x00001441, 0xffac0000, 0x0000144a, 0xffac2000,
  1280. 0x00001452, 0xffac4000, 0x0000145a, 0xffac6100,
  1281. 0x00001462, 0xffac8100, 0x0000146a, 0xffaca100,
  1282. 0x00001473, 0xffacc200, 0x0000147b, 0xfface200,
  1283. 0x00001483, 0xffb40000, 0x0000148b, 0xffb42000,
  1284. 0x00001494, 0xffb44000, 0x0000149c, 0xffb46100,
  1285. 0x000014a4, 0xffb48100, 0x000014ac, 0xffb4a100,
  1286. 0x000014b4, 0xffb4c200, 0x000014bd, 0xffb4e200,
  1287. 0x000014c5, 0xffbd0000, 0x000014cd, 0xffbd2000,
  1288. 0x000014d5, 0xffbd4000, 0x000014de, 0xffbd6100,
  1289. 0x000014e6, 0xffbd8100, 0x000014ee, 0xffbda100,
  1290. 0x000014f6, 0xffbdc200, 0x000014ff, 0xffbde200,
  1291. 0x00001800, 0xffc50000, 0x00001808, 0xffc52000,
  1292. 0x00001810, 0xffc54000, 0x00001818, 0xffc56100,
  1293. 0x00001820, 0xffc58100, 0x00001829, 0xffc5a100,
  1294. 0x00001831, 0xffc5c200, 0x00001839, 0xffc5e200,
  1295. 0x00001841, 0xffcd0000, 0x0000184a, 0xffcd2000,
  1296. 0x00001852, 0xffcd4000, 0x0000185a, 0xffcd6100,
  1297. 0x00001862, 0xffcd8100, 0x0000186a, 0xffcda100,
  1298. 0x00001873, 0xffcdc200, 0x0000187b, 0xffcde200,
  1299. 0x00001883, 0xffd50000, 0x0000188b, 0xffd52000,
  1300. 0x00001894, 0xffd54000, 0x0000189c, 0xffd56100,
  1301. 0x000018a4, 0xffd58100, 0x000018ac, 0xffd5a100,
  1302. 0x000018b4, 0xffd5c200, 0x000018bd, 0xffd5e200,
  1303. 0x000018c5, 0xffde0000, 0x000018cd, 0xffde2000,
  1304. 0x000018d5, 0xffde4000, 0x000018de, 0xffde6100,
  1305. 0x000018e6, 0xffde8100, 0x000018ee, 0xffdea100,
  1306. 0x000018f6, 0xffdec200, 0x000018ff, 0xffdee200,
  1307. 0x00001c00, 0xffe60000, 0x00001c08, 0xffe62000,
  1308. 0x00001c10, 0xffe64000, 0x00001c18, 0xffe66100,
  1309. 0x00001c20, 0xffe68100, 0x00001c29, 0xffe6a100,
  1310. 0x00001c31, 0xffe6c200, 0x00001c39, 0xffe6e200,
  1311. 0x00001c41, 0xffee0000, 0x00001c4a, 0xffee2000,
  1312. 0x00001c52, 0xffee4000, 0x00001c5a, 0xffee6100,
  1313. 0x00001c62, 0xffee8100, 0x00001c6a, 0xffeea100,
  1314. 0x00001c73, 0xffeec200, 0x00001c7b, 0xffeee200,
  1315. 0x00001c83, 0xfff60000, 0x00001c8b, 0xfff62000,
  1316. 0x00001c94, 0xfff64000, 0x00001c9c, 0xfff66100,
  1317. 0x00001ca4, 0xfff68100, 0x00001cac, 0xfff6a100,
  1318. 0x00001cb4, 0xfff6c200, 0x00001cbd, 0xfff6e200,
  1319. 0x00001cc5, 0xffff0000, 0x00001ccd, 0xffff2000,
  1320. 0x00001cd5, 0xffff4000, 0x00001cde, 0xffff6100,
  1321. 0x00001ce6, 0xffff8100, 0x00001cee, 0xffffa100,
  1322. 0x00001cf6, 0xffffc200, 0x00001cff, 0xffffe200
  1323. };
  1324. static void
  1325. Blit_RGB565_ARGB8888(SDL_BlitInfo * info)
  1326. {
  1327. Blit_RGB565_32(info, RGB565_ARGB8888_LUT);
  1328. }
  1329. /* Special optimized blit for RGB 5-6-5 --> ABGR 8-8-8-8 */
  1330. static const Uint32 RGB565_ABGR8888_LUT[512] = {
  1331. 0xff000000, 0x00000000, 0xff080000, 0x00002000,
  1332. 0xff100000, 0x00004000, 0xff180000, 0x00006100,
  1333. 0xff200000, 0x00008100, 0xff290000, 0x0000a100,
  1334. 0xff310000, 0x0000c200, 0xff390000, 0x0000e200,
  1335. 0xff410000, 0x00000008, 0xff4a0000, 0x00002008,
  1336. 0xff520000, 0x00004008, 0xff5a0000, 0x00006108,
  1337. 0xff620000, 0x00008108, 0xff6a0000, 0x0000a108,
  1338. 0xff730000, 0x0000c208, 0xff7b0000, 0x0000e208,
  1339. 0xff830000, 0x00000010, 0xff8b0000, 0x00002010,
  1340. 0xff940000, 0x00004010, 0xff9c0000, 0x00006110,
  1341. 0xffa40000, 0x00008110, 0xffac0000, 0x0000a110,
  1342. 0xffb40000, 0x0000c210, 0xffbd0000, 0x0000e210,
  1343. 0xffc50000, 0x00000018, 0xffcd0000, 0x00002018,
  1344. 0xffd50000, 0x00004018, 0xffde0000, 0x00006118,
  1345. 0xffe60000, 0x00008118, 0xffee0000, 0x0000a118,
  1346. 0xfff60000, 0x0000c218, 0xffff0000, 0x0000e218,
  1347. 0xff000400, 0x00000020, 0xff080400, 0x00002020,
  1348. 0xff100400, 0x00004020, 0xff180400, 0x00006120,
  1349. 0xff200400, 0x00008120, 0xff290400, 0x0000a120,
  1350. 0xff310400, 0x0000c220, 0xff390400, 0x0000e220,
  1351. 0xff410400, 0x00000029, 0xff4a0400, 0x00002029,
  1352. 0xff520400, 0x00004029, 0xff5a0400, 0x00006129,
  1353. 0xff620400, 0x00008129, 0xff6a0400, 0x0000a129,
  1354. 0xff730400, 0x0000c229, 0xff7b0400, 0x0000e229,
  1355. 0xff830400, 0x00000031, 0xff8b0400, 0x00002031,
  1356. 0xff940400, 0x00004031, 0xff9c0400, 0x00006131,
  1357. 0xffa40400, 0x00008131, 0xffac0400, 0x0000a131,
  1358. 0xffb40400, 0x0000c231, 0xffbd0400, 0x0000e231,
  1359. 0xffc50400, 0x00000039, 0xffcd0400, 0x00002039,
  1360. 0xffd50400, 0x00004039, 0xffde0400, 0x00006139,
  1361. 0xffe60400, 0x00008139, 0xffee0400, 0x0000a139,
  1362. 0xfff60400, 0x0000c239, 0xffff0400, 0x0000e239,
  1363. 0xff000800, 0x00000041, 0xff080800, 0x00002041,
  1364. 0xff100800, 0x00004041, 0xff180800, 0x00006141,
  1365. 0xff200800, 0x00008141, 0xff290800, 0x0000a141,
  1366. 0xff310800, 0x0000c241, 0xff390800, 0x0000e241,
  1367. 0xff410800, 0x0000004a, 0xff4a0800, 0x0000204a,
  1368. 0xff520800, 0x0000404a, 0xff5a0800, 0x0000614a,
  1369. 0xff620800, 0x0000814a, 0xff6a0800, 0x0000a14a,
  1370. 0xff730800, 0x0000c24a, 0xff7b0800, 0x0000e24a,
  1371. 0xff830800, 0x00000052, 0xff8b0800, 0x00002052,
  1372. 0xff940800, 0x00004052, 0xff9c0800, 0x00006152,
  1373. 0xffa40800, 0x00008152, 0xffac0800, 0x0000a152,
  1374. 0xffb40800, 0x0000c252, 0xffbd0800, 0x0000e252,
  1375. 0xffc50800, 0x0000005a, 0xffcd0800, 0x0000205a,
  1376. 0xffd50800, 0x0000405a, 0xffde0800, 0x0000615a,
  1377. 0xffe60800, 0x0000815a, 0xffee0800, 0x0000a15a,
  1378. 0xfff60800, 0x0000c25a, 0xffff0800, 0x0000e25a,
  1379. 0xff000c00, 0x00000062, 0xff080c00, 0x00002062,
  1380. 0xff100c00, 0x00004062, 0xff180c00, 0x00006162,
  1381. 0xff200c00, 0x00008162, 0xff290c00, 0x0000a162,
  1382. 0xff310c00, 0x0000c262, 0xff390c00, 0x0000e262,
  1383. 0xff410c00, 0x0000006a, 0xff4a0c00, 0x0000206a,
  1384. 0xff520c00, 0x0000406a, 0xff5a0c00, 0x0000616a,
  1385. 0xff620c00, 0x0000816a, 0xff6a0c00, 0x0000a16a,
  1386. 0xff730c00, 0x0000c26a, 0xff7b0c00, 0x0000e26a,
  1387. 0xff830c00, 0x00000073, 0xff8b0c00, 0x00002073,
  1388. 0xff940c00, 0x00004073, 0xff9c0c00, 0x00006173,
  1389. 0xffa40c00, 0x00008173, 0xffac0c00, 0x0000a173,
  1390. 0xffb40c00, 0x0000c273, 0xffbd0c00, 0x0000e273,
  1391. 0xffc50c00, 0x0000007b, 0xffcd0c00, 0x0000207b,
  1392. 0xffd50c00, 0x0000407b, 0xffde0c00, 0x0000617b,
  1393. 0xffe60c00, 0x0000817b, 0xffee0c00, 0x0000a17b,
  1394. 0xfff60c00, 0x0000c27b, 0xffff0c00, 0x0000e27b,
  1395. 0xff001000, 0x00000083, 0xff081000, 0x00002083,
  1396. 0xff101000, 0x00004083, 0xff181000, 0x00006183,
  1397. 0xff201000, 0x00008183, 0xff291000, 0x0000a183,
  1398. 0xff311000, 0x0000c283, 0xff391000, 0x0000e283,
  1399. 0xff411000, 0x0000008b, 0xff4a1000, 0x0000208b,
  1400. 0xff521000, 0x0000408b, 0xff5a1000, 0x0000618b,
  1401. 0xff621000, 0x0000818b, 0xff6a1000, 0x0000a18b,
  1402. 0xff731000, 0x0000c28b, 0xff7b1000, 0x0000e28b,
  1403. 0xff831000, 0x00000094, 0xff8b1000, 0x00002094,
  1404. 0xff941000, 0x00004094, 0xff9c1000, 0x00006194,
  1405. 0xffa41000, 0x00008194, 0xffac1000, 0x0000a194,
  1406. 0xffb41000, 0x0000c294, 0xffbd1000, 0x0000e294,
  1407. 0xffc51000, 0x0000009c, 0xffcd1000, 0x0000209c,
  1408. 0xffd51000, 0x0000409c, 0xffde1000, 0x0000619c,
  1409. 0xffe61000, 0x0000819c, 0xffee1000, 0x0000a19c,
  1410. 0xfff61000, 0x0000c29c, 0xffff1000, 0x0000e29c,
  1411. 0xff001400, 0x000000a4, 0xff081400, 0x000020a4,
  1412. 0xff101400, 0x000040a4, 0xff181400, 0x000061a4,
  1413. 0xff201400, 0x000081a4, 0xff291400, 0x0000a1a4,
  1414. 0xff311400, 0x0000c2a4, 0xff391400, 0x0000e2a4,
  1415. 0xff411400, 0x000000ac, 0xff4a1400, 0x000020ac,
  1416. 0xff521400, 0x000040ac, 0xff5a1400, 0x000061ac,
  1417. 0xff621400, 0x000081ac, 0xff6a1400, 0x0000a1ac,
  1418. 0xff731400, 0x0000c2ac, 0xff7b1400, 0x0000e2ac,
  1419. 0xff831400, 0x000000b4, 0xff8b1400, 0x000020b4,
  1420. 0xff941400, 0x000040b4, 0xff9c1400, 0x000061b4,
  1421. 0xffa41400, 0x000081b4, 0xffac1400, 0x0000a1b4,
  1422. 0xffb41400, 0x0000c2b4, 0xffbd1400, 0x0000e2b4,
  1423. 0xffc51400, 0x000000bd, 0xffcd1400, 0x000020bd,
  1424. 0xffd51400, 0x000040bd, 0xffde1400, 0x000061bd,
  1425. 0xffe61400, 0x000081bd, 0xffee1400, 0x0000a1bd,
  1426. 0xfff61400, 0x0000c2bd, 0xffff1400, 0x0000e2bd,
  1427. 0xff001800, 0x000000c5, 0xff081800, 0x000020c5,
  1428. 0xff101800, 0x000040c5, 0xff181800, 0x000061c5,
  1429. 0xff201800, 0x000081c5, 0xff291800, 0x0000a1c5,
  1430. 0xff311800, 0x0000c2c5, 0xff391800, 0x0000e2c5,
  1431. 0xff411800, 0x000000cd, 0xff4a1800, 0x000020cd,
  1432. 0xff521800, 0x000040cd, 0xff5a1800, 0x000061cd,
  1433. 0xff621800, 0x000081cd, 0xff6a1800, 0x0000a1cd,
  1434. 0xff731800, 0x0000c2cd, 0xff7b1800, 0x0000e2cd,
  1435. 0xff831800, 0x000000d5, 0xff8b1800, 0x000020d5,
  1436. 0xff941800, 0x000040d5, 0xff9c1800, 0x000061d5,
  1437. 0xffa41800, 0x000081d5, 0xffac1800, 0x0000a1d5,
  1438. 0xffb41800, 0x0000c2d5, 0xffbd1800, 0x0000e2d5,
  1439. 0xffc51800, 0x000000de, 0xffcd1800, 0x000020de,
  1440. 0xffd51800, 0x000040de, 0xffde1800, 0x000061de,
  1441. 0xffe61800, 0x000081de, 0xffee1800, 0x0000a1de,
  1442. 0xfff61800, 0x0000c2de, 0xffff1800, 0x0000e2de,
  1443. 0xff001c00, 0x000000e6, 0xff081c00, 0x000020e6,
  1444. 0xff101c00, 0x000040e6, 0xff181c00, 0x000061e6,
  1445. 0xff201c00, 0x000081e6, 0xff291c00, 0x0000a1e6,
  1446. 0xff311c00, 0x0000c2e6, 0xff391c00, 0x0000e2e6,
  1447. 0xff411c00, 0x000000ee, 0xff4a1c00, 0x000020ee,
  1448. 0xff521c00, 0x000040ee, 0xff5a1c00, 0x000061ee,
  1449. 0xff621c00, 0x000081ee, 0xff6a1c00, 0x0000a1ee,
  1450. 0xff731c00, 0x0000c2ee, 0xff7b1c00, 0x0000e2ee,
  1451. 0xff831c00, 0x000000f6, 0xff8b1c00, 0x000020f6,
  1452. 0xff941c00, 0x000040f6, 0xff9c1c00, 0x000061f6,
  1453. 0xffa41c00, 0x000081f6, 0xffac1c00, 0x0000a1f6,
  1454. 0xffb41c00, 0x0000c2f6, 0xffbd1c00, 0x0000e2f6,
  1455. 0xffc51c00, 0x000000ff, 0xffcd1c00, 0x000020ff,
  1456. 0xffd51c00, 0x000040ff, 0xffde1c00, 0x000061ff,
  1457. 0xffe61c00, 0x000081ff, 0xffee1c00, 0x0000a1ff,
  1458. 0xfff61c00, 0x0000c2ff, 0xffff1c00, 0x0000e2ff
  1459. };
  1460. static void
  1461. Blit_RGB565_ABGR8888(SDL_BlitInfo * info)
  1462. {
  1463. Blit_RGB565_32(info, RGB565_ABGR8888_LUT);
  1464. }
  1465. /* Special optimized blit for RGB 5-6-5 --> RGBA 8-8-8-8 */
  1466. static const Uint32 RGB565_RGBA8888_LUT[512] = {
  1467. 0x000000ff, 0x00000000, 0x000008ff, 0x00200000,
  1468. 0x000010ff, 0x00400000, 0x000018ff, 0x00610000,
  1469. 0x000020ff, 0x00810000, 0x000029ff, 0x00a10000,
  1470. 0x000031ff, 0x00c20000, 0x000039ff, 0x00e20000,
  1471. 0x000041ff, 0x08000000, 0x00004aff, 0x08200000,
  1472. 0x000052ff, 0x08400000, 0x00005aff, 0x08610000,
  1473. 0x000062ff, 0x08810000, 0x00006aff, 0x08a10000,
  1474. 0x000073ff, 0x08c20000, 0x00007bff, 0x08e20000,
  1475. 0x000083ff, 0x10000000, 0x00008bff, 0x10200000,
  1476. 0x000094ff, 0x10400000, 0x00009cff, 0x10610000,
  1477. 0x0000a4ff, 0x10810000, 0x0000acff, 0x10a10000,
  1478. 0x0000b4ff, 0x10c20000, 0x0000bdff, 0x10e20000,
  1479. 0x0000c5ff, 0x18000000, 0x0000cdff, 0x18200000,
  1480. 0x0000d5ff, 0x18400000, 0x0000deff, 0x18610000,
  1481. 0x0000e6ff, 0x18810000, 0x0000eeff, 0x18a10000,
  1482. 0x0000f6ff, 0x18c20000, 0x0000ffff, 0x18e20000,
  1483. 0x000400ff, 0x20000000, 0x000408ff, 0x20200000,
  1484. 0x000410ff, 0x20400000, 0x000418ff, 0x20610000,
  1485. 0x000420ff, 0x20810000, 0x000429ff, 0x20a10000,
  1486. 0x000431ff, 0x20c20000, 0x000439ff, 0x20e20000,
  1487. 0x000441ff, 0x29000000, 0x00044aff, 0x29200000,
  1488. 0x000452ff, 0x29400000, 0x00045aff, 0x29610000,
  1489. 0x000462ff, 0x29810000, 0x00046aff, 0x29a10000,
  1490. 0x000473ff, 0x29c20000, 0x00047bff, 0x29e20000,
  1491. 0x000483ff, 0x31000000, 0x00048bff, 0x31200000,
  1492. 0x000494ff, 0x31400000, 0x00049cff, 0x31610000,
  1493. 0x0004a4ff, 0x31810000, 0x0004acff, 0x31a10000,
  1494. 0x0004b4ff, 0x31c20000, 0x0004bdff, 0x31e20000,
  1495. 0x0004c5ff, 0x39000000, 0x0004cdff, 0x39200000,
  1496. 0x0004d5ff, 0x39400000, 0x0004deff, 0x39610000,
  1497. 0x0004e6ff, 0x39810000, 0x0004eeff, 0x39a10000,
  1498. 0x0004f6ff, 0x39c20000, 0x0004ffff, 0x39e20000,
  1499. 0x000800ff, 0x41000000, 0x000808ff, 0x41200000,
  1500. 0x000810ff, 0x41400000, 0x000818ff, 0x41610000,
  1501. 0x000820ff, 0x41810000, 0x000829ff, 0x41a10000,
  1502. 0x000831ff, 0x41c20000, 0x000839ff, 0x41e20000,
  1503. 0x000841ff, 0x4a000000, 0x00084aff, 0x4a200000,
  1504. 0x000852ff, 0x4a400000, 0x00085aff, 0x4a610000,
  1505. 0x000862ff, 0x4a810000, 0x00086aff, 0x4aa10000,
  1506. 0x000873ff, 0x4ac20000, 0x00087bff, 0x4ae20000,
  1507. 0x000883ff, 0x52000000, 0x00088bff, 0x52200000,
  1508. 0x000894ff, 0x52400000, 0x00089cff, 0x52610000,
  1509. 0x0008a4ff, 0x52810000, 0x0008acff, 0x52a10000,
  1510. 0x0008b4ff, 0x52c20000, 0x0008bdff, 0x52e20000,
  1511. 0x0008c5ff, 0x5a000000, 0x0008cdff, 0x5a200000,
  1512. 0x0008d5ff, 0x5a400000, 0x0008deff, 0x5a610000,
  1513. 0x0008e6ff, 0x5a810000, 0x0008eeff, 0x5aa10000,
  1514. 0x0008f6ff, 0x5ac20000, 0x0008ffff, 0x5ae20000,
  1515. 0x000c00ff, 0x62000000, 0x000c08ff, 0x62200000,
  1516. 0x000c10ff, 0x62400000, 0x000c18ff, 0x62610000,
  1517. 0x000c20ff, 0x62810000, 0x000c29ff, 0x62a10000,
  1518. 0x000c31ff, 0x62c20000, 0x000c39ff, 0x62e20000,
  1519. 0x000c41ff, 0x6a000000, 0x000c4aff, 0x6a200000,
  1520. 0x000c52ff, 0x6a400000, 0x000c5aff, 0x6a610000,
  1521. 0x000c62ff, 0x6a810000, 0x000c6aff, 0x6aa10000,
  1522. 0x000c73ff, 0x6ac20000, 0x000c7bff, 0x6ae20000,
  1523. 0x000c83ff, 0x73000000, 0x000c8bff, 0x73200000,
  1524. 0x000c94ff, 0x73400000, 0x000c9cff, 0x73610000,
  1525. 0x000ca4ff, 0x73810000, 0x000cacff, 0x73a10000,
  1526. 0x000cb4ff, 0x73c20000, 0x000cbdff, 0x73e20000,
  1527. 0x000cc5ff, 0x7b000000, 0x000ccdff, 0x7b200000,
  1528. 0x000cd5ff, 0x7b400000, 0x000cdeff, 0x7b610000,
  1529. 0x000ce6ff, 0x7b810000, 0x000ceeff, 0x7ba10000,
  1530. 0x000cf6ff, 0x7bc20000, 0x000cffff, 0x7be20000,
  1531. 0x001000ff, 0x83000000, 0x001008ff, 0x83200000,
  1532. 0x001010ff, 0x83400000, 0x001018ff, 0x83610000,
  1533. 0x001020ff, 0x83810000, 0x001029ff, 0x83a10000,
  1534. 0x001031ff, 0x83c20000, 0x001039ff, 0x83e20000,
  1535. 0x001041ff, 0x8b000000, 0x00104aff, 0x8b200000,
  1536. 0x001052ff, 0x8b400000, 0x00105aff, 0x8b610000,
  1537. 0x001062ff, 0x8b810000, 0x00106aff, 0x8ba10000,
  1538. 0x001073ff, 0x8bc20000, 0x00107bff, 0x8be20000,
  1539. 0x001083ff, 0x94000000, 0x00108bff, 0x94200000,
  1540. 0x001094ff, 0x94400000, 0x00109cff, 0x94610000,
  1541. 0x0010a4ff, 0x94810000, 0x0010acff, 0x94a10000,
  1542. 0x0010b4ff, 0x94c20000, 0x0010bdff, 0x94e20000,
  1543. 0x0010c5ff, 0x9c000000, 0x0010cdff, 0x9c200000,
  1544. 0x0010d5ff, 0x9c400000, 0x0010deff, 0x9c610000,
  1545. 0x0010e6ff, 0x9c810000, 0x0010eeff, 0x9ca10000,
  1546. 0x0010f6ff, 0x9cc20000, 0x0010ffff, 0x9ce20000,
  1547. 0x001400ff, 0xa4000000, 0x001408ff, 0xa4200000,
  1548. 0x001410ff, 0xa4400000, 0x001418ff, 0xa4610000,
  1549. 0x001420ff, 0xa4810000, 0x001429ff, 0xa4a10000,
  1550. 0x001431ff, 0xa4c20000, 0x001439ff, 0xa4e20000,
  1551. 0x001441ff, 0xac000000, 0x00144aff, 0xac200000,
  1552. 0x001452ff, 0xac400000, 0x00145aff, 0xac610000,
  1553. 0x001462ff, 0xac810000, 0x00146aff, 0xaca10000,
  1554. 0x001473ff, 0xacc20000, 0x00147bff, 0xace20000,
  1555. 0x001483ff, 0xb4000000, 0x00148bff, 0xb4200000,
  1556. 0x001494ff, 0xb4400000, 0x00149cff, 0xb4610000,
  1557. 0x0014a4ff, 0xb4810000, 0x0014acff, 0xb4a10000,
  1558. 0x0014b4ff, 0xb4c20000, 0x0014bdff, 0xb4e20000,
  1559. 0x0014c5ff, 0xbd000000, 0x0014cdff, 0xbd200000,
  1560. 0x0014d5ff, 0xbd400000, 0x0014deff, 0xbd610000,
  1561. 0x0014e6ff, 0xbd810000, 0x0014eeff, 0xbda10000,
  1562. 0x0014f6ff, 0xbdc20000, 0x0014ffff, 0xbde20000,
  1563. 0x001800ff, 0xc5000000, 0x001808ff, 0xc5200000,
  1564. 0x001810ff, 0xc5400000, 0x001818ff, 0xc5610000,
  1565. 0x001820ff, 0xc5810000, 0x001829ff, 0xc5a10000,
  1566. 0x001831ff, 0xc5c20000, 0x001839ff, 0xc5e20000,
  1567. 0x001841ff, 0xcd000000, 0x00184aff, 0xcd200000,
  1568. 0x001852ff, 0xcd400000, 0x00185aff, 0xcd610000,
  1569. 0x001862ff, 0xcd810000, 0x00186aff, 0xcda10000,
  1570. 0x001873ff, 0xcdc20000, 0x00187bff, 0xcde20000,
  1571. 0x001883ff, 0xd5000000, 0x00188bff, 0xd5200000,
  1572. 0x001894ff, 0xd5400000, 0x00189cff, 0xd5610000,
  1573. 0x0018a4ff, 0xd5810000, 0x0018acff, 0xd5a10000,
  1574. 0x0018b4ff, 0xd5c20000, 0x0018bdff, 0xd5e20000,
  1575. 0x0018c5ff, 0xde000000, 0x0018cdff, 0xde200000,
  1576. 0x0018d5ff, 0xde400000, 0x0018deff, 0xde610000,
  1577. 0x0018e6ff, 0xde810000, 0x0018eeff, 0xdea10000,
  1578. 0x0018f6ff, 0xdec20000, 0x0018ffff, 0xdee20000,
  1579. 0x001c00ff, 0xe6000000, 0x001c08ff, 0xe6200000,
  1580. 0x001c10ff, 0xe6400000, 0x001c18ff, 0xe6610000,
  1581. 0x001c20ff, 0xe6810000, 0x001c29ff, 0xe6a10000,
  1582. 0x001c31ff, 0xe6c20000, 0x001c39ff, 0xe6e20000,
  1583. 0x001c41ff, 0xee000000, 0x001c4aff, 0xee200000,
  1584. 0x001c52ff, 0xee400000, 0x001c5aff, 0xee610000,
  1585. 0x001c62ff, 0xee810000, 0x001c6aff, 0xeea10000,
  1586. 0x001c73ff, 0xeec20000, 0x001c7bff, 0xeee20000,
  1587. 0x001c83ff, 0xf6000000, 0x001c8bff, 0xf6200000,
  1588. 0x001c94ff, 0xf6400000, 0x001c9cff, 0xf6610000,
  1589. 0x001ca4ff, 0xf6810000, 0x001cacff, 0xf6a10000,
  1590. 0x001cb4ff, 0xf6c20000, 0x001cbdff, 0xf6e20000,
  1591. 0x001cc5ff, 0xff000000, 0x001ccdff, 0xff200000,
  1592. 0x001cd5ff, 0xff400000, 0x001cdeff, 0xff610000,
  1593. 0x001ce6ff, 0xff810000, 0x001ceeff, 0xffa10000,
  1594. 0x001cf6ff, 0xffc20000, 0x001cffff, 0xffe20000,
  1595. };
  1596. static void
  1597. Blit_RGB565_RGBA8888(SDL_BlitInfo * info)
  1598. {
  1599. Blit_RGB565_32(info, RGB565_RGBA8888_LUT);
  1600. }
  1601. /* Special optimized blit for RGB 5-6-5 --> BGRA 8-8-8-8 */
  1602. static const Uint32 RGB565_BGRA8888_LUT[512] = {
  1603. 0x00000000, 0x000000ff, 0x08000000, 0x002000ff,
  1604. 0x10000000, 0x004000ff, 0x18000000, 0x006100ff,
  1605. 0x20000000, 0x008100ff, 0x29000000, 0x00a100ff,
  1606. 0x31000000, 0x00c200ff, 0x39000000, 0x00e200ff,
  1607. 0x41000000, 0x000008ff, 0x4a000000, 0x002008ff,
  1608. 0x52000000, 0x004008ff, 0x5a000000, 0x006108ff,
  1609. 0x62000000, 0x008108ff, 0x6a000000, 0x00a108ff,
  1610. 0x73000000, 0x00c208ff, 0x7b000000, 0x00e208ff,
  1611. 0x83000000, 0x000010ff, 0x8b000000, 0x002010ff,
  1612. 0x94000000, 0x004010ff, 0x9c000000, 0x006110ff,
  1613. 0xa4000000, 0x008110ff, 0xac000000, 0x00a110ff,
  1614. 0xb4000000, 0x00c210ff, 0xbd000000, 0x00e210ff,
  1615. 0xc5000000, 0x000018ff, 0xcd000000, 0x002018ff,
  1616. 0xd5000000, 0x004018ff, 0xde000000, 0x006118ff,
  1617. 0xe6000000, 0x008118ff, 0xee000000, 0x00a118ff,
  1618. 0xf6000000, 0x00c218ff, 0xff000000, 0x00e218ff,
  1619. 0x00040000, 0x000020ff, 0x08040000, 0x002020ff,
  1620. 0x10040000, 0x004020ff, 0x18040000, 0x006120ff,
  1621. 0x20040000, 0x008120ff, 0x29040000, 0x00a120ff,
  1622. 0x31040000, 0x00c220ff, 0x39040000, 0x00e220ff,
  1623. 0x41040000, 0x000029ff, 0x4a040000, 0x002029ff,
  1624. 0x52040000, 0x004029ff, 0x5a040000, 0x006129ff,
  1625. 0x62040000, 0x008129ff, 0x6a040000, 0x00a129ff,
  1626. 0x73040000, 0x00c229ff, 0x7b040000, 0x00e229ff,
  1627. 0x83040000, 0x000031ff, 0x8b040000, 0x002031ff,
  1628. 0x94040000, 0x004031ff, 0x9c040000, 0x006131ff,
  1629. 0xa4040000, 0x008131ff, 0xac040000, 0x00a131ff,
  1630. 0xb4040000, 0x00c231ff, 0xbd040000, 0x00e231ff,
  1631. 0xc5040000, 0x000039ff, 0xcd040000, 0x002039ff,
  1632. 0xd5040000, 0x004039ff, 0xde040000, 0x006139ff,
  1633. 0xe6040000, 0x008139ff, 0xee040000, 0x00a139ff,
  1634. 0xf6040000, 0x00c239ff, 0xff040000, 0x00e239ff,
  1635. 0x00080000, 0x000041ff, 0x08080000, 0x002041ff,
  1636. 0x10080000, 0x004041ff, 0x18080000, 0x006141ff,
  1637. 0x20080000, 0x008141ff, 0x29080000, 0x00a141ff,
  1638. 0x31080000, 0x00c241ff, 0x39080000, 0x00e241ff,
  1639. 0x41080000, 0x00004aff, 0x4a080000, 0x00204aff,
  1640. 0x52080000, 0x00404aff, 0x5a080000, 0x00614aff,
  1641. 0x62080000, 0x00814aff, 0x6a080000, 0x00a14aff,
  1642. 0x73080000, 0x00c24aff, 0x7b080000, 0x00e24aff,
  1643. 0x83080000, 0x000052ff, 0x8b080000, 0x002052ff,
  1644. 0x94080000, 0x004052ff, 0x9c080000, 0x006152ff,
  1645. 0xa4080000, 0x008152ff, 0xac080000, 0x00a152ff,
  1646. 0xb4080000, 0x00c252ff, 0xbd080000, 0x00e252ff,
  1647. 0xc5080000, 0x00005aff, 0xcd080000, 0x00205aff,
  1648. 0xd5080000, 0x00405aff, 0xde080000, 0x00615aff,
  1649. 0xe6080000, 0x00815aff, 0xee080000, 0x00a15aff,
  1650. 0xf6080000, 0x00c25aff, 0xff080000, 0x00e25aff,
  1651. 0x000c0000, 0x000062ff, 0x080c0000, 0x002062ff,
  1652. 0x100c0000, 0x004062ff, 0x180c0000, 0x006162ff,
  1653. 0x200c0000, 0x008162ff, 0x290c0000, 0x00a162ff,
  1654. 0x310c0000, 0x00c262ff, 0x390c0000, 0x00e262ff,
  1655. 0x410c0000, 0x00006aff, 0x4a0c0000, 0x00206aff,
  1656. 0x520c0000, 0x00406aff, 0x5a0c0000, 0x00616aff,
  1657. 0x620c0000, 0x00816aff, 0x6a0c0000, 0x00a16aff,
  1658. 0x730c0000, 0x00c26aff, 0x7b0c0000, 0x00e26aff,
  1659. 0x830c0000, 0x000073ff, 0x8b0c0000, 0x002073ff,
  1660. 0x940c0000, 0x004073ff, 0x9c0c0000, 0x006173ff,
  1661. 0xa40c0000, 0x008173ff, 0xac0c0000, 0x00a173ff,
  1662. 0xb40c0000, 0x00c273ff, 0xbd0c0000, 0x00e273ff,
  1663. 0xc50c0000, 0x00007bff, 0xcd0c0000, 0x00207bff,
  1664. 0xd50c0000, 0x00407bff, 0xde0c0000, 0x00617bff,
  1665. 0xe60c0000, 0x00817bff, 0xee0c0000, 0x00a17bff,
  1666. 0xf60c0000, 0x00c27bff, 0xff0c0000, 0x00e27bff,
  1667. 0x00100000, 0x000083ff, 0x08100000, 0x002083ff,
  1668. 0x10100000, 0x004083ff, 0x18100000, 0x006183ff,
  1669. 0x20100000, 0x008183ff, 0x29100000, 0x00a183ff,
  1670. 0x31100000, 0x00c283ff, 0x39100000, 0x00e283ff,
  1671. 0x41100000, 0x00008bff, 0x4a100000, 0x00208bff,
  1672. 0x52100000, 0x00408bff, 0x5a100000, 0x00618bff,
  1673. 0x62100000, 0x00818bff, 0x6a100000, 0x00a18bff,
  1674. 0x73100000, 0x00c28bff, 0x7b100000, 0x00e28bff,
  1675. 0x83100000, 0x000094ff, 0x8b100000, 0x002094ff,
  1676. 0x94100000, 0x004094ff, 0x9c100000, 0x006194ff,
  1677. 0xa4100000, 0x008194ff, 0xac100000, 0x00a194ff,
  1678. 0xb4100000, 0x00c294ff, 0xbd100000, 0x00e294ff,
  1679. 0xc5100000, 0x00009cff, 0xcd100000, 0x00209cff,
  1680. 0xd5100000, 0x00409cff, 0xde100000, 0x00619cff,
  1681. 0xe6100000, 0x00819cff, 0xee100000, 0x00a19cff,
  1682. 0xf6100000, 0x00c29cff, 0xff100000, 0x00e29cff,
  1683. 0x00140000, 0x0000a4ff, 0x08140000, 0x0020a4ff,
  1684. 0x10140000, 0x0040a4ff, 0x18140000, 0x0061a4ff,
  1685. 0x20140000, 0x0081a4ff, 0x29140000, 0x00a1a4ff,
  1686. 0x31140000, 0x00c2a4ff, 0x39140000, 0x00e2a4ff,
  1687. 0x41140000, 0x0000acff, 0x4a140000, 0x0020acff,
  1688. 0x52140000, 0x0040acff, 0x5a140000, 0x0061acff,
  1689. 0x62140000, 0x0081acff, 0x6a140000, 0x00a1acff,
  1690. 0x73140000, 0x00c2acff, 0x7b140000, 0x00e2acff,
  1691. 0x83140000, 0x0000b4ff, 0x8b140000, 0x0020b4ff,
  1692. 0x94140000, 0x0040b4ff, 0x9c140000, 0x0061b4ff,
  1693. 0xa4140000, 0x0081b4ff, 0xac140000, 0x00a1b4ff,
  1694. 0xb4140000, 0x00c2b4ff, 0xbd140000, 0x00e2b4ff,
  1695. 0xc5140000, 0x0000bdff, 0xcd140000, 0x0020bdff,
  1696. 0xd5140000, 0x0040bdff, 0xde140000, 0x0061bdff,
  1697. 0xe6140000, 0x0081bdff, 0xee140000, 0x00a1bdff,
  1698. 0xf6140000, 0x00c2bdff, 0xff140000, 0x00e2bdff,
  1699. 0x00180000, 0x0000c5ff, 0x08180000, 0x0020c5ff,
  1700. 0x10180000, 0x0040c5ff, 0x18180000, 0x0061c5ff,
  1701. 0x20180000, 0x0081c5ff, 0x29180000, 0x00a1c5ff,
  1702. 0x31180000, 0x00c2c5ff, 0x39180000, 0x00e2c5ff,
  1703. 0x41180000, 0x0000cdff, 0x4a180000, 0x0020cdff,
  1704. 0x52180000, 0x0040cdff, 0x5a180000, 0x0061cdff,
  1705. 0x62180000, 0x0081cdff, 0x6a180000, 0x00a1cdff,
  1706. 0x73180000, 0x00c2cdff, 0x7b180000, 0x00e2cdff,
  1707. 0x83180000, 0x0000d5ff, 0x8b180000, 0x0020d5ff,
  1708. 0x94180000, 0x0040d5ff, 0x9c180000, 0x0061d5ff,
  1709. 0xa4180000, 0x0081d5ff, 0xac180000, 0x00a1d5ff,
  1710. 0xb4180000, 0x00c2d5ff, 0xbd180000, 0x00e2d5ff,
  1711. 0xc5180000, 0x0000deff, 0xcd180000, 0x0020deff,
  1712. 0xd5180000, 0x0040deff, 0xde180000, 0x0061deff,
  1713. 0xe6180000, 0x0081deff, 0xee180000, 0x00a1deff,
  1714. 0xf6180000, 0x00c2deff, 0xff180000, 0x00e2deff,
  1715. 0x001c0000, 0x0000e6ff, 0x081c0000, 0x0020e6ff,
  1716. 0x101c0000, 0x0040e6ff, 0x181c0000, 0x0061e6ff,
  1717. 0x201c0000, 0x0081e6ff, 0x291c0000, 0x00a1e6ff,
  1718. 0x311c0000, 0x00c2e6ff, 0x391c0000, 0x00e2e6ff,
  1719. 0x411c0000, 0x0000eeff, 0x4a1c0000, 0x0020eeff,
  1720. 0x521c0000, 0x0040eeff, 0x5a1c0000, 0x0061eeff,
  1721. 0x621c0000, 0x0081eeff, 0x6a1c0000, 0x00a1eeff,
  1722. 0x731c0000, 0x00c2eeff, 0x7b1c0000, 0x00e2eeff,
  1723. 0x831c0000, 0x0000f6ff, 0x8b1c0000, 0x0020f6ff,
  1724. 0x941c0000, 0x0040f6ff, 0x9c1c0000, 0x0061f6ff,
  1725. 0xa41c0000, 0x0081f6ff, 0xac1c0000, 0x00a1f6ff,
  1726. 0xb41c0000, 0x00c2f6ff, 0xbd1c0000, 0x00e2f6ff,
  1727. 0xc51c0000, 0x0000ffff, 0xcd1c0000, 0x0020ffff,
  1728. 0xd51c0000, 0x0040ffff, 0xde1c0000, 0x0061ffff,
  1729. 0xe61c0000, 0x0081ffff, 0xee1c0000, 0x00a1ffff,
  1730. 0xf61c0000, 0x00c2ffff, 0xff1c0000, 0x00e2ffff
  1731. };
  1732. static void
  1733. Blit_RGB565_BGRA8888(SDL_BlitInfo * info)
  1734. {
  1735. Blit_RGB565_32(info, RGB565_BGRA8888_LUT);
  1736. }
  1737. /* Special optimized blit for RGB 8-8-8 --> RGB 3-3-2 */
  1738. #ifndef RGB888_RGB332
  1739. #define RGB888_RGB332(dst, src) { \
  1740. dst = (((src)&0x00E00000)>>16)| \
  1741. (((src)&0x0000E000)>>11)| \
  1742. (((src)&0x000000C0)>>6); \
  1743. }
  1744. #endif
  1745. static void
  1746. Blit_RGB888_index8_map(SDL_BlitInfo * info)
  1747. {
  1748. #ifndef USE_DUFFS_LOOP
  1749. int c;
  1750. #endif
  1751. int Pixel;
  1752. int width, height;
  1753. Uint32 *src;
  1754. const Uint8 *map;
  1755. Uint8 *dst;
  1756. int srcskip, dstskip;
  1757. /* Set up some basic variables */
  1758. width = info->dst_w;
  1759. height = info->dst_h;
  1760. src = (Uint32 *) info->src;
  1761. srcskip = info->src_skip / 4;
  1762. dst = info->dst;
  1763. dstskip = info->dst_skip;
  1764. map = info->table;
  1765. #ifdef USE_DUFFS_LOOP
  1766. while (height--) {
  1767. /* *INDENT-OFF* */
  1768. DUFFS_LOOP(
  1769. RGB888_RGB332(Pixel, *src);
  1770. *dst++ = map[Pixel];
  1771. ++src;
  1772. , width);
  1773. /* *INDENT-ON* */
  1774. src += srcskip;
  1775. dst += dstskip;
  1776. }
  1777. #else
  1778. while (height--) {
  1779. for (c = width / 4; c; --c) {
  1780. /* Pack RGB into 8bit pixel */
  1781. RGB888_RGB332(Pixel, *src);
  1782. *dst++ = map[Pixel];
  1783. ++src;
  1784. RGB888_RGB332(Pixel, *src);
  1785. *dst++ = map[Pixel];
  1786. ++src;
  1787. RGB888_RGB332(Pixel, *src);
  1788. *dst++ = map[Pixel];
  1789. ++src;
  1790. RGB888_RGB332(Pixel, *src);
  1791. *dst++ = map[Pixel];
  1792. ++src;
  1793. }
  1794. switch (width & 3) {
  1795. case 3:
  1796. RGB888_RGB332(Pixel, *src);
  1797. *dst++ = map[Pixel];
  1798. ++src;
  1799. case 2:
  1800. RGB888_RGB332(Pixel, *src);
  1801. *dst++ = map[Pixel];
  1802. ++src;
  1803. case 1:
  1804. RGB888_RGB332(Pixel, *src);
  1805. *dst++ = map[Pixel];
  1806. ++src;
  1807. }
  1808. src += srcskip;
  1809. dst += dstskip;
  1810. }
  1811. #endif /* USE_DUFFS_LOOP */
  1812. }
  1813. static void
  1814. BlitNto1(SDL_BlitInfo * info)
  1815. {
  1816. #ifndef USE_DUFFS_LOOP
  1817. int c;
  1818. #endif
  1819. int width, height;
  1820. Uint8 *src;
  1821. const Uint8 *map;
  1822. Uint8 *dst;
  1823. int srcskip, dstskip;
  1824. int srcbpp;
  1825. Uint32 Pixel;
  1826. int sR, sG, sB;
  1827. SDL_PixelFormat *srcfmt;
  1828. /* Set up some basic variables */
  1829. width = info->dst_w;
  1830. height = info->dst_h;
  1831. src = info->src;
  1832. srcskip = info->src_skip;
  1833. dst = info->dst;
  1834. dstskip = info->dst_skip;
  1835. map = info->table;
  1836. srcfmt = info->src_fmt;
  1837. srcbpp = srcfmt->BytesPerPixel;
  1838. if (map == NULL) {
  1839. while (height--) {
  1840. #ifdef USE_DUFFS_LOOP
  1841. /* *INDENT-OFF* */
  1842. DUFFS_LOOP(
  1843. DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel,
  1844. sR, sG, sB);
  1845. if ( 1 ) {
  1846. /* Pack RGB into 8bit pixel */
  1847. *dst = ((sR>>5)<<(3+2))|
  1848. ((sG>>5)<<(2)) |
  1849. ((sB>>6)<<(0)) ;
  1850. }
  1851. dst++;
  1852. src += srcbpp;
  1853. , width);
  1854. /* *INDENT-ON* */
  1855. #else
  1856. for (c = width; c; --c) {
  1857. DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel, sR, sG, sB);
  1858. if (1) {
  1859. /* Pack RGB into 8bit pixel */
  1860. *dst = ((sR >> 5) << (3 + 2)) |
  1861. ((sG >> 5) << (2)) | ((sB >> 6) << (0));
  1862. }
  1863. dst++;
  1864. src += srcbpp;
  1865. }
  1866. #endif
  1867. src += srcskip;
  1868. dst += dstskip;
  1869. }
  1870. } else {
  1871. while (height--) {
  1872. #ifdef USE_DUFFS_LOOP
  1873. /* *INDENT-OFF* */
  1874. DUFFS_LOOP(
  1875. DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel,
  1876. sR, sG, sB);
  1877. if ( 1 ) {
  1878. /* Pack RGB into 8bit pixel */
  1879. *dst = map[((sR>>5)<<(3+2))|
  1880. ((sG>>5)<<(2)) |
  1881. ((sB>>6)<<(0)) ];
  1882. }
  1883. dst++;
  1884. src += srcbpp;
  1885. , width);
  1886. /* *INDENT-ON* */
  1887. #else
  1888. for (c = width; c; --c) {
  1889. DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel, sR, sG, sB);
  1890. if (1) {
  1891. /* Pack RGB into 8bit pixel */
  1892. *dst = map[((sR >> 5) << (3 + 2)) |
  1893. ((sG >> 5) << (2)) | ((sB >> 6) << (0))];
  1894. }
  1895. dst++;
  1896. src += srcbpp;
  1897. }
  1898. #endif /* USE_DUFFS_LOOP */
  1899. src += srcskip;
  1900. dst += dstskip;
  1901. }
  1902. }
  1903. }
  1904. /* blits 32 bit RGB<->RGBA with both surfaces having the same R,G,B fields */
  1905. static void
  1906. Blit4to4MaskAlpha(SDL_BlitInfo * info)
  1907. {
  1908. int width = info->dst_w;
  1909. int height = info->dst_h;
  1910. Uint32 *src = (Uint32 *) info->src;
  1911. int srcskip = info->src_skip;
  1912. Uint32 *dst = (Uint32 *) info->dst;
  1913. int dstskip = info->dst_skip;
  1914. SDL_PixelFormat *srcfmt = info->src_fmt;
  1915. SDL_PixelFormat *dstfmt = info->dst_fmt;
  1916. if (dstfmt->Amask) {
  1917. /* RGB->RGBA, SET_ALPHA */
  1918. Uint32 mask = (info->a >> dstfmt->Aloss) << dstfmt->Ashift;
  1919. while (height--) {
  1920. /* *INDENT-OFF* */
  1921. DUFFS_LOOP(
  1922. {
  1923. *dst = *src | mask;
  1924. ++dst;
  1925. ++src;
  1926. },
  1927. width);
  1928. /* *INDENT-ON* */
  1929. src = (Uint32 *) ((Uint8 *) src + srcskip);
  1930. dst = (Uint32 *) ((Uint8 *) dst + dstskip);
  1931. }
  1932. } else {
  1933. /* RGBA->RGB, NO_ALPHA */
  1934. Uint32 mask = srcfmt->Rmask | srcfmt->Gmask | srcfmt->Bmask;
  1935. while (height--) {
  1936. /* *INDENT-OFF* */
  1937. DUFFS_LOOP(
  1938. {
  1939. *dst = *src & mask;
  1940. ++dst;
  1941. ++src;
  1942. },
  1943. width);
  1944. /* *INDENT-ON* */
  1945. src = (Uint32 *) ((Uint8 *) src + srcskip);
  1946. dst = (Uint32 *) ((Uint8 *) dst + dstskip);
  1947. }
  1948. }
  1949. }
  1950. static void
  1951. BlitNtoN(SDL_BlitInfo * info)
  1952. {
  1953. int width = info->dst_w;
  1954. int height = info->dst_h;
  1955. Uint8 *src = info->src;
  1956. int srcskip = info->src_skip;
  1957. Uint8 *dst = info->dst;
  1958. int dstskip = info->dst_skip;
  1959. SDL_PixelFormat *srcfmt = info->src_fmt;
  1960. int srcbpp = srcfmt->BytesPerPixel;
  1961. SDL_PixelFormat *dstfmt = info->dst_fmt;
  1962. int dstbpp = dstfmt->BytesPerPixel;
  1963. unsigned alpha = dstfmt->Amask ? info->a : 0;
  1964. while (height--) {
  1965. /* *INDENT-OFF* */
  1966. DUFFS_LOOP(
  1967. {
  1968. Uint32 Pixel;
  1969. unsigned sR;
  1970. unsigned sG;
  1971. unsigned sB;
  1972. DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel, sR, sG, sB);
  1973. ASSEMBLE_RGBA(dst, dstbpp, dstfmt, sR, sG, sB, alpha);
  1974. dst += dstbpp;
  1975. src += srcbpp;
  1976. },
  1977. width);
  1978. /* *INDENT-ON* */
  1979. src += srcskip;
  1980. dst += dstskip;
  1981. }
  1982. }
  1983. static void
  1984. BlitNtoNCopyAlpha(SDL_BlitInfo * info)
  1985. {
  1986. int width = info->dst_w;
  1987. int height = info->dst_h;
  1988. Uint8 *src = info->src;
  1989. int srcskip = info->src_skip;
  1990. Uint8 *dst = info->dst;
  1991. int dstskip = info->dst_skip;
  1992. SDL_PixelFormat *srcfmt = info->src_fmt;
  1993. int srcbpp = srcfmt->BytesPerPixel;
  1994. SDL_PixelFormat *dstfmt = info->dst_fmt;
  1995. int dstbpp = dstfmt->BytesPerPixel;
  1996. int c;
  1997. while (height--) {
  1998. for (c = width; c; --c) {
  1999. Uint32 Pixel;
  2000. unsigned sR, sG, sB, sA;
  2001. DISEMBLE_RGBA(src, srcbpp, srcfmt, Pixel, sR, sG, sB, sA);
  2002. ASSEMBLE_RGBA(dst, dstbpp, dstfmt, sR, sG, sB, sA);
  2003. dst += dstbpp;
  2004. src += srcbpp;
  2005. }
  2006. src += srcskip;
  2007. dst += dstskip;
  2008. }
  2009. }
  2010. static void
  2011. BlitNto1Key(SDL_BlitInfo * info)
  2012. {
  2013. int width = info->dst_w;
  2014. int height = info->dst_h;
  2015. Uint8 *src = info->src;
  2016. int srcskip = info->src_skip;
  2017. Uint8 *dst = info->dst;
  2018. int dstskip = info->dst_skip;
  2019. SDL_PixelFormat *srcfmt = info->src_fmt;
  2020. const Uint8 *palmap = info->table;
  2021. Uint32 ckey = info->colorkey;
  2022. Uint32 rgbmask = ~srcfmt->Amask;
  2023. int srcbpp;
  2024. Uint32 Pixel;
  2025. unsigned sR, sG, sB;
  2026. /* Set up some basic variables */
  2027. srcbpp = srcfmt->BytesPerPixel;
  2028. ckey &= rgbmask;
  2029. if (palmap == NULL) {
  2030. while (height--) {
  2031. /* *INDENT-OFF* */
  2032. DUFFS_LOOP(
  2033. {
  2034. DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel,
  2035. sR, sG, sB);
  2036. if ( (Pixel & rgbmask) != ckey ) {
  2037. /* Pack RGB into 8bit pixel */
  2038. *dst = (Uint8)(((sR>>5)<<(3+2))|
  2039. ((sG>>5)<<(2)) |
  2040. ((sB>>6)<<(0)));
  2041. }
  2042. dst++;
  2043. src += srcbpp;
  2044. },
  2045. width);
  2046. /* *INDENT-ON* */
  2047. src += srcskip;
  2048. dst += dstskip;
  2049. }
  2050. } else {
  2051. while (height--) {
  2052. /* *INDENT-OFF* */
  2053. DUFFS_LOOP(
  2054. {
  2055. DISEMBLE_RGB(src, srcbpp, srcfmt, Pixel,
  2056. sR, sG, sB);
  2057. if ( (Pixel & rgbmask) != ckey ) {
  2058. /* Pack RGB into 8bit pixel */
  2059. *dst = (Uint8)palmap[((sR>>5)<<(3+2))|
  2060. ((sG>>5)<<(2)) |
  2061. ((sB>>6)<<(0)) ];
  2062. }
  2063. dst++;
  2064. src += srcbpp;
  2065. },
  2066. width);
  2067. /* *INDENT-ON* */
  2068. src += srcskip;
  2069. dst += dstskip;
  2070. }
  2071. }
  2072. }
  2073. static void
  2074. Blit2to2Key(SDL_BlitInfo * info)
  2075. {
  2076. int width = info->dst_w;
  2077. int height = info->dst_h;
  2078. Uint16 *srcp = (Uint16 *) info->src;
  2079. int srcskip = info->src_skip;
  2080. Uint16 *dstp = (Uint16 *) info->dst;
  2081. int dstskip = info->dst_skip;
  2082. Uint32 ckey = info->colorkey;
  2083. Uint32 rgbmask = ~info->src_fmt->Amask;
  2084. /* Set up some basic variables */
  2085. srcskip /= 2;
  2086. dstskip /= 2;
  2087. ckey &= rgbmask;
  2088. while (height--) {
  2089. /* *INDENT-OFF* */
  2090. DUFFS_LOOP(
  2091. {
  2092. if ( (*srcp & rgbmask) != ckey ) {
  2093. *dstp = *srcp;
  2094. }
  2095. dstp++;
  2096. srcp++;
  2097. },
  2098. width);
  2099. /* *INDENT-ON* */
  2100. srcp += srcskip;
  2101. dstp += dstskip;
  2102. }
  2103. }
  2104. static void
  2105. BlitNtoNKey(SDL_BlitInfo * info)
  2106. {
  2107. int width = info->dst_w;
  2108. int height = info->dst_h;
  2109. Uint8 *src = info->src;
  2110. int srcskip = info->src_skip;
  2111. Uint8 *dst = info->dst;
  2112. int dstskip = info->dst_skip;
  2113. Uint32 ckey = info->colorkey;
  2114. SDL_PixelFormat *srcfmt = info->src_fmt;
  2115. SDL_PixelFormat *dstfmt = info->dst_fmt;
  2116. int srcbpp = srcfmt->BytesPerPixel;
  2117. int dstbpp = dstfmt->BytesPerPixel;
  2118. unsigned alpha = dstfmt->Amask ? info->a : 0;
  2119. Uint32 rgbmask = ~srcfmt->Amask;
  2120. /* Set up some basic variables */
  2121. ckey &= rgbmask;
  2122. while (height--) {
  2123. /* *INDENT-OFF* */
  2124. DUFFS_LOOP(
  2125. {
  2126. Uint32 Pixel;
  2127. unsigned sR;
  2128. unsigned sG;
  2129. unsigned sB;
  2130. RETRIEVE_RGB_PIXEL(src, srcbpp, Pixel);
  2131. if ( (Pixel & rgbmask) != ckey ) {
  2132. RGB_FROM_PIXEL(Pixel, srcfmt, sR, sG, sB);
  2133. ASSEMBLE_RGBA(dst, dstbpp, dstfmt, sR, sG, sB, alpha);
  2134. }
  2135. dst += dstbpp;
  2136. src += srcbpp;
  2137. },
  2138. width);
  2139. /* *INDENT-ON* */
  2140. src += srcskip;
  2141. dst += dstskip;
  2142. }
  2143. }
  2144. static void
  2145. BlitNtoNKeyCopyAlpha(SDL_BlitInfo * info)
  2146. {
  2147. int width = info->dst_w;
  2148. int height = info->dst_h;
  2149. Uint8 *src = info->src;
  2150. int srcskip = info->src_skip;
  2151. Uint8 *dst = info->dst;
  2152. int dstskip = info->dst_skip;
  2153. Uint32 ckey = info->colorkey;
  2154. SDL_PixelFormat *srcfmt = info->src_fmt;
  2155. SDL_PixelFormat *dstfmt = info->dst_fmt;
  2156. Uint32 rgbmask = ~srcfmt->Amask;
  2157. Uint8 srcbpp;
  2158. Uint8 dstbpp;
  2159. Uint32 Pixel;
  2160. unsigned sR, sG, sB, sA;
  2161. /* Set up some basic variables */
  2162. srcbpp = srcfmt->BytesPerPixel;
  2163. dstbpp = dstfmt->BytesPerPixel;
  2164. ckey &= rgbmask;
  2165. while (height--) {
  2166. /* *INDENT-OFF* */
  2167. DUFFS_LOOP(
  2168. {
  2169. DISEMBLE_RGBA(src, srcbpp, srcfmt, Pixel, sR, sG, sB, sA);
  2170. if ( (Pixel & rgbmask) != ckey ) {
  2171. ASSEMBLE_RGBA(dst, dstbpp, dstfmt, sR, sG, sB, sA);
  2172. }
  2173. dst += dstbpp;
  2174. src += srcbpp;
  2175. },
  2176. width);
  2177. /* *INDENT-ON* */
  2178. src += srcskip;
  2179. dst += dstskip;
  2180. }
  2181. }
  2182. /* Normal N to N optimized blitters */
  2183. struct blit_table
  2184. {
  2185. Uint32 srcR, srcG, srcB;
  2186. int dstbpp;
  2187. Uint32 dstR, dstG, dstB;
  2188. Uint32 blit_features;
  2189. SDL_BlitFunc blitfunc;
  2190. enum
  2191. { NO_ALPHA = 1, SET_ALPHA = 2, COPY_ALPHA = 4 } alpha;
  2192. };
  2193. static const struct blit_table normal_blit_1[] = {
  2194. /* Default for 8-bit RGB source, an invalid combination */
  2195. {0, 0, 0, 0, 0, 0, 0, 0, NULL},
  2196. };
  2197. static const struct blit_table normal_blit_2[] = {
  2198. #if SDL_ALTIVEC_BLITTERS
  2199. /* has-altivec */
  2200. {0x0000F800, 0x000007E0, 0x0000001F, 4, 0x00000000, 0x00000000,
  2201. 0x00000000,
  2202. 2, Blit_RGB565_32Altivec, NO_ALPHA | COPY_ALPHA | SET_ALPHA},
  2203. {0x00007C00, 0x000003E0, 0x0000001F, 4, 0x00000000, 0x00000000,
  2204. 0x00000000,
  2205. 2, Blit_RGB555_32Altivec, NO_ALPHA | COPY_ALPHA | SET_ALPHA},
  2206. #endif
  2207. {0x0000F800, 0x000007E0, 0x0000001F, 4, 0x00FF0000, 0x0000FF00,
  2208. 0x000000FF,
  2209. 0, Blit_RGB565_ARGB8888, SET_ALPHA},
  2210. {0x0000F800, 0x000007E0, 0x0000001F, 4, 0x000000FF, 0x0000FF00,
  2211. 0x00FF0000,
  2212. 0, Blit_RGB565_ABGR8888, SET_ALPHA},
  2213. {0x0000F800, 0x000007E0, 0x0000001F, 4, 0xFF000000, 0x00FF0000,
  2214. 0x0000FF00,
  2215. 0, Blit_RGB565_RGBA8888, SET_ALPHA},
  2216. {0x0000F800, 0x000007E0, 0x0000001F, 4, 0x0000FF00, 0x00FF0000,
  2217. 0xFF000000,
  2218. 0, Blit_RGB565_BGRA8888, SET_ALPHA},
  2219. /* Default for 16-bit RGB source, used if no other blitter matches */
  2220. {0, 0, 0, 0, 0, 0, 0, 0, BlitNtoN, 0}
  2221. };
  2222. static const struct blit_table normal_blit_3[] = {
  2223. /* Default for 24-bit RGB source, never optimized */
  2224. {0, 0, 0, 0, 0, 0, 0, 0, BlitNtoN, 0}
  2225. };
  2226. static const struct blit_table normal_blit_4[] = {
  2227. #if SDL_ALTIVEC_BLITTERS
  2228. /* has-altivec | dont-use-prefetch */
  2229. {0x00000000, 0x00000000, 0x00000000, 4, 0x00000000, 0x00000000,
  2230. 0x00000000,
  2231. 6, ConvertAltivec32to32_noprefetch,
  2232. NO_ALPHA | COPY_ALPHA | SET_ALPHA},
  2233. /* has-altivec */
  2234. {0x00000000, 0x00000000, 0x00000000, 4, 0x00000000, 0x00000000,
  2235. 0x00000000,
  2236. 2, ConvertAltivec32to32_prefetch,
  2237. NO_ALPHA | COPY_ALPHA | SET_ALPHA},
  2238. /* has-altivec */
  2239. {0x00000000, 0x00000000, 0x00000000, 2, 0x0000F800, 0x000007E0,
  2240. 0x0000001F,
  2241. 2, Blit_RGB888_RGB565Altivec, NO_ALPHA},
  2242. #endif
  2243. {0x00FF0000, 0x0000FF00, 0x000000FF, 2, 0x0000F800, 0x000007E0,
  2244. 0x0000001F,
  2245. 0, Blit_RGB888_RGB565, NO_ALPHA},
  2246. {0x00FF0000, 0x0000FF00, 0x000000FF, 2, 0x00007C00, 0x000003E0,
  2247. 0x0000001F,
  2248. 0, Blit_RGB888_RGB555, NO_ALPHA},
  2249. /* Default for 32-bit RGB source, used if no other blitter matches */
  2250. {0, 0, 0, 0, 0, 0, 0, 0, BlitNtoN, 0}
  2251. };
  2252. static const struct blit_table *const normal_blit[] = {
  2253. normal_blit_1, normal_blit_2, normal_blit_3, normal_blit_4
  2254. };
  2255. /* Mask matches table, or table entry is zero */
  2256. #define MASKOK(x, y) (((x) == (y)) || ((y) == 0x00000000))
  2257. SDL_BlitFunc
  2258. SDL_CalculateBlitN(SDL_Surface * surface)
  2259. {
  2260. SDL_PixelFormat *srcfmt;
  2261. SDL_PixelFormat *dstfmt;
  2262. const struct blit_table *table;
  2263. int which;
  2264. SDL_BlitFunc blitfun;
  2265. /* Set up data for choosing the blit */
  2266. srcfmt = surface->format;
  2267. dstfmt = surface->map->dst->format;
  2268. /* We don't support destinations less than 8-bits */
  2269. if (dstfmt->BitsPerPixel < 8) {
  2270. return (NULL);
  2271. }
  2272. switch (surface->map->info.flags & ~SDL_COPY_RLE_MASK) {
  2273. case 0:
  2274. blitfun = NULL;
  2275. if (dstfmt->BitsPerPixel == 8) {
  2276. /* We assume 8-bit destinations are palettized */
  2277. if ((srcfmt->BytesPerPixel == 4) &&
  2278. (srcfmt->Rmask == 0x00FF0000) &&
  2279. (srcfmt->Gmask == 0x0000FF00) &&
  2280. (srcfmt->Bmask == 0x000000FF)) {
  2281. if (surface->map->info.table) {
  2282. blitfun = Blit_RGB888_index8_map;
  2283. } else {
  2284. blitfun = Blit_RGB888_index8;
  2285. }
  2286. } else {
  2287. blitfun = BlitNto1;
  2288. }
  2289. } else {
  2290. /* Now the meat, choose the blitter we want */
  2291. int a_need = NO_ALPHA;
  2292. if (dstfmt->Amask)
  2293. a_need = srcfmt->Amask ? COPY_ALPHA : SET_ALPHA;
  2294. table = normal_blit[srcfmt->BytesPerPixel - 1];
  2295. for (which = 0; table[which].dstbpp; ++which) {
  2296. if (MASKOK(srcfmt->Rmask, table[which].srcR) &&
  2297. MASKOK(srcfmt->Gmask, table[which].srcG) &&
  2298. MASKOK(srcfmt->Bmask, table[which].srcB) &&
  2299. MASKOK(dstfmt->Rmask, table[which].dstR) &&
  2300. MASKOK(dstfmt->Gmask, table[which].dstG) &&
  2301. MASKOK(dstfmt->Bmask, table[which].dstB) &&
  2302. dstfmt->BytesPerPixel == table[which].dstbpp &&
  2303. (a_need & table[which].alpha) == a_need &&
  2304. ((table[which].blit_features & GetBlitFeatures()) ==
  2305. table[which].blit_features))
  2306. break;
  2307. }
  2308. blitfun = table[which].blitfunc;
  2309. if (blitfun == BlitNtoN) { /* default C fallback catch-all. Slow! */
  2310. /* Fastpath C fallback: 32bit RGB<->RGBA blit with matching RGB */
  2311. if (srcfmt->BytesPerPixel == 4 && dstfmt->BytesPerPixel == 4
  2312. && srcfmt->Rmask == dstfmt->Rmask
  2313. && srcfmt->Gmask == dstfmt->Gmask
  2314. && srcfmt->Bmask == dstfmt->Bmask) {
  2315. blitfun = Blit4to4MaskAlpha;
  2316. } else if (a_need == COPY_ALPHA) {
  2317. blitfun = BlitNtoNCopyAlpha;
  2318. }
  2319. }
  2320. }
  2321. return (blitfun);
  2322. case SDL_COPY_COLORKEY:
  2323. /* colorkey blit: Here we don't have too many options, mostly
  2324. because RLE is the preferred fast way to deal with this.
  2325. If a particular case turns out to be useful we'll add it. */
  2326. if (srcfmt->BytesPerPixel == 2 && surface->map->identity)
  2327. return Blit2to2Key;
  2328. else if (dstfmt->BytesPerPixel == 1)
  2329. return BlitNto1Key;
  2330. else {
  2331. #if SDL_ALTIVEC_BLITTERS
  2332. if ((srcfmt->BytesPerPixel == 4) && (dstfmt->BytesPerPixel == 4)
  2333. && SDL_HasAltiVec()) {
  2334. return Blit32to32KeyAltivec;
  2335. } else
  2336. #endif
  2337. if (srcfmt->Amask && dstfmt->Amask) {
  2338. return BlitNtoNKeyCopyAlpha;
  2339. } else {
  2340. return BlitNtoNKey;
  2341. }
  2342. }
  2343. }
  2344. return NULL;
  2345. }
  2346. /* vi: set ts=4 sw=4 expandtab: */