/arch/x86/crypto/twofish-x86_64-asm_64.S

https://bitbucket.org/ndreys/linux-sunxi · Assembly · 324 lines · 226 code · 36 blank · 62 comment · 0 complexity · 452cc853ef8b8f05bda7fe18dda60acf MD5 · raw file

  1. /***************************************************************************
  2. * Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> *
  3. * *
  4. * This program is free software; you can redistribute it and/or modify *
  5. * it under the terms of the GNU General Public License as published by *
  6. * the Free Software Foundation; either version 2 of the License, or *
  7. * (at your option) any later version. *
  8. * *
  9. * This program is distributed in the hope that it will be useful, *
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of *
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
  12. * GNU General Public License for more details. *
  13. * *
  14. * You should have received a copy of the GNU General Public License *
  15. * along with this program; if not, write to the *
  16. * Free Software Foundation, Inc., *
  17. * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
  18. ***************************************************************************/
  19. .file "twofish-x86_64-asm.S"
  20. .text
  21. #include <asm/asm-offsets.h>
  22. #define a_offset 0
  23. #define b_offset 4
  24. #define c_offset 8
  25. #define d_offset 12
  26. /* Structure of the crypto context struct*/
  27. #define s0 0 /* S0 Array 256 Words each */
  28. #define s1 1024 /* S1 Array */
  29. #define s2 2048 /* S2 Array */
  30. #define s3 3072 /* S3 Array */
  31. #define w 4096 /* 8 whitening keys (word) */
  32. #define k 4128 /* key 1-32 ( word ) */
  33. /* define a few register aliases to allow macro substitution */
  34. #define R0 %rax
  35. #define R0D %eax
  36. #define R0B %al
  37. #define R0H %ah
  38. #define R1 %rbx
  39. #define R1D %ebx
  40. #define R1B %bl
  41. #define R1H %bh
  42. #define R2 %rcx
  43. #define R2D %ecx
  44. #define R2B %cl
  45. #define R2H %ch
  46. #define R3 %rdx
  47. #define R3D %edx
  48. #define R3B %dl
  49. #define R3H %dh
  50. /* performs input whitening */
  51. #define input_whitening(src,context,offset)\
  52. xor w+offset(context), src;
  53. /* performs input whitening */
  54. #define output_whitening(src,context,offset)\
  55. xor w+16+offset(context), src;
  56. /*
  57. * a input register containing a (rotated 16)
  58. * b input register containing b
  59. * c input register containing c
  60. * d input register containing d (already rol $1)
  61. * operations on a and b are interleaved to increase performance
  62. */
  63. #define encrypt_round(a,b,c,d,round)\
  64. movzx b ## B, %edi;\
  65. mov s1(%r11,%rdi,4),%r8d;\
  66. movzx a ## B, %edi;\
  67. mov s2(%r11,%rdi,4),%r9d;\
  68. movzx b ## H, %edi;\
  69. ror $16, b ## D;\
  70. xor s2(%r11,%rdi,4),%r8d;\
  71. movzx a ## H, %edi;\
  72. ror $16, a ## D;\
  73. xor s3(%r11,%rdi,4),%r9d;\
  74. movzx b ## B, %edi;\
  75. xor s3(%r11,%rdi,4),%r8d;\
  76. movzx a ## B, %edi;\
  77. xor (%r11,%rdi,4), %r9d;\
  78. movzx b ## H, %edi;\
  79. ror $15, b ## D;\
  80. xor (%r11,%rdi,4), %r8d;\
  81. movzx a ## H, %edi;\
  82. xor s1(%r11,%rdi,4),%r9d;\
  83. add %r8d, %r9d;\
  84. add %r9d, %r8d;\
  85. add k+round(%r11), %r9d;\
  86. xor %r9d, c ## D;\
  87. rol $15, c ## D;\
  88. add k+4+round(%r11),%r8d;\
  89. xor %r8d, d ## D;
  90. /*
  91. * a input register containing a(rotated 16)
  92. * b input register containing b
  93. * c input register containing c
  94. * d input register containing d (already rol $1)
  95. * operations on a and b are interleaved to increase performance
  96. * during the round a and b are prepared for the output whitening
  97. */
  98. #define encrypt_last_round(a,b,c,d,round)\
  99. mov b ## D, %r10d;\
  100. shl $32, %r10;\
  101. movzx b ## B, %edi;\
  102. mov s1(%r11,%rdi,4),%r8d;\
  103. movzx a ## B, %edi;\
  104. mov s2(%r11,%rdi,4),%r9d;\
  105. movzx b ## H, %edi;\
  106. ror $16, b ## D;\
  107. xor s2(%r11,%rdi,4),%r8d;\
  108. movzx a ## H, %edi;\
  109. ror $16, a ## D;\
  110. xor s3(%r11,%rdi,4),%r9d;\
  111. movzx b ## B, %edi;\
  112. xor s3(%r11,%rdi,4),%r8d;\
  113. movzx a ## B, %edi;\
  114. xor (%r11,%rdi,4), %r9d;\
  115. xor a, %r10;\
  116. movzx b ## H, %edi;\
  117. xor (%r11,%rdi,4), %r8d;\
  118. movzx a ## H, %edi;\
  119. xor s1(%r11,%rdi,4),%r9d;\
  120. add %r8d, %r9d;\
  121. add %r9d, %r8d;\
  122. add k+round(%r11), %r9d;\
  123. xor %r9d, c ## D;\
  124. ror $1, c ## D;\
  125. add k+4+round(%r11),%r8d;\
  126. xor %r8d, d ## D
  127. /*
  128. * a input register containing a
  129. * b input register containing b (rotated 16)
  130. * c input register containing c (already rol $1)
  131. * d input register containing d
  132. * operations on a and b are interleaved to increase performance
  133. */
  134. #define decrypt_round(a,b,c,d,round)\
  135. movzx a ## B, %edi;\
  136. mov (%r11,%rdi,4), %r9d;\
  137. movzx b ## B, %edi;\
  138. mov s3(%r11,%rdi,4),%r8d;\
  139. movzx a ## H, %edi;\
  140. ror $16, a ## D;\
  141. xor s1(%r11,%rdi,4),%r9d;\
  142. movzx b ## H, %edi;\
  143. ror $16, b ## D;\
  144. xor (%r11,%rdi,4), %r8d;\
  145. movzx a ## B, %edi;\
  146. xor s2(%r11,%rdi,4),%r9d;\
  147. movzx b ## B, %edi;\
  148. xor s1(%r11,%rdi,4),%r8d;\
  149. movzx a ## H, %edi;\
  150. ror $15, a ## D;\
  151. xor s3(%r11,%rdi,4),%r9d;\
  152. movzx b ## H, %edi;\
  153. xor s2(%r11,%rdi,4),%r8d;\
  154. add %r8d, %r9d;\
  155. add %r9d, %r8d;\
  156. add k+round(%r11), %r9d;\
  157. xor %r9d, c ## D;\
  158. add k+4+round(%r11),%r8d;\
  159. xor %r8d, d ## D;\
  160. rol $15, d ## D;
  161. /*
  162. * a input register containing a
  163. * b input register containing b
  164. * c input register containing c (already rol $1)
  165. * d input register containing d
  166. * operations on a and b are interleaved to increase performance
  167. * during the round a and b are prepared for the output whitening
  168. */
  169. #define decrypt_last_round(a,b,c,d,round)\
  170. movzx a ## B, %edi;\
  171. mov (%r11,%rdi,4), %r9d;\
  172. movzx b ## B, %edi;\
  173. mov s3(%r11,%rdi,4),%r8d;\
  174. movzx b ## H, %edi;\
  175. ror $16, b ## D;\
  176. xor (%r11,%rdi,4), %r8d;\
  177. movzx a ## H, %edi;\
  178. mov b ## D, %r10d;\
  179. shl $32, %r10;\
  180. xor a, %r10;\
  181. ror $16, a ## D;\
  182. xor s1(%r11,%rdi,4),%r9d;\
  183. movzx b ## B, %edi;\
  184. xor s1(%r11,%rdi,4),%r8d;\
  185. movzx a ## B, %edi;\
  186. xor s2(%r11,%rdi,4),%r9d;\
  187. movzx b ## H, %edi;\
  188. xor s2(%r11,%rdi,4),%r8d;\
  189. movzx a ## H, %edi;\
  190. xor s3(%r11,%rdi,4),%r9d;\
  191. add %r8d, %r9d;\
  192. add %r9d, %r8d;\
  193. add k+round(%r11), %r9d;\
  194. xor %r9d, c ## D;\
  195. add k+4+round(%r11),%r8d;\
  196. xor %r8d, d ## D;\
  197. ror $1, d ## D;
  198. .align 8
  199. .global twofish_enc_blk
  200. .global twofish_dec_blk
  201. twofish_enc_blk:
  202. pushq R1
  203. /* %rdi contains the crypto tfm address */
  204. /* %rsi contains the output address */
  205. /* %rdx contains the input address */
  206. add $crypto_tfm_ctx_offset, %rdi /* set ctx address */
  207. /* ctx address is moved to free one non-rex register
  208. as target for the 8bit high operations */
  209. mov %rdi, %r11
  210. movq (R3), R1
  211. movq 8(R3), R3
  212. input_whitening(R1,%r11,a_offset)
  213. input_whitening(R3,%r11,c_offset)
  214. mov R1D, R0D
  215. rol $16, R0D
  216. shr $32, R1
  217. mov R3D, R2D
  218. shr $32, R3
  219. rol $1, R3D
  220. encrypt_round(R0,R1,R2,R3,0);
  221. encrypt_round(R2,R3,R0,R1,8);
  222. encrypt_round(R0,R1,R2,R3,2*8);
  223. encrypt_round(R2,R3,R0,R1,3*8);
  224. encrypt_round(R0,R1,R2,R3,4*8);
  225. encrypt_round(R2,R3,R0,R1,5*8);
  226. encrypt_round(R0,R1,R2,R3,6*8);
  227. encrypt_round(R2,R3,R0,R1,7*8);
  228. encrypt_round(R0,R1,R2,R3,8*8);
  229. encrypt_round(R2,R3,R0,R1,9*8);
  230. encrypt_round(R0,R1,R2,R3,10*8);
  231. encrypt_round(R2,R3,R0,R1,11*8);
  232. encrypt_round(R0,R1,R2,R3,12*8);
  233. encrypt_round(R2,R3,R0,R1,13*8);
  234. encrypt_round(R0,R1,R2,R3,14*8);
  235. encrypt_last_round(R2,R3,R0,R1,15*8);
  236. output_whitening(%r10,%r11,a_offset)
  237. movq %r10, (%rsi)
  238. shl $32, R1
  239. xor R0, R1
  240. output_whitening(R1,%r11,c_offset)
  241. movq R1, 8(%rsi)
  242. popq R1
  243. movq $1,%rax
  244. ret
  245. twofish_dec_blk:
  246. pushq R1
  247. /* %rdi contains the crypto tfm address */
  248. /* %rsi contains the output address */
  249. /* %rdx contains the input address */
  250. add $crypto_tfm_ctx_offset, %rdi /* set ctx address */
  251. /* ctx address is moved to free one non-rex register
  252. as target for the 8bit high operations */
  253. mov %rdi, %r11
  254. movq (R3), R1
  255. movq 8(R3), R3
  256. output_whitening(R1,%r11,a_offset)
  257. output_whitening(R3,%r11,c_offset)
  258. mov R1D, R0D
  259. shr $32, R1
  260. rol $16, R1D
  261. mov R3D, R2D
  262. shr $32, R3
  263. rol $1, R2D
  264. decrypt_round(R0,R1,R2,R3,15*8);
  265. decrypt_round(R2,R3,R0,R1,14*8);
  266. decrypt_round(R0,R1,R2,R3,13*8);
  267. decrypt_round(R2,R3,R0,R1,12*8);
  268. decrypt_round(R0,R1,R2,R3,11*8);
  269. decrypt_round(R2,R3,R0,R1,10*8);
  270. decrypt_round(R0,R1,R2,R3,9*8);
  271. decrypt_round(R2,R3,R0,R1,8*8);
  272. decrypt_round(R0,R1,R2,R3,7*8);
  273. decrypt_round(R2,R3,R0,R1,6*8);
  274. decrypt_round(R0,R1,R2,R3,5*8);
  275. decrypt_round(R2,R3,R0,R1,4*8);
  276. decrypt_round(R0,R1,R2,R3,3*8);
  277. decrypt_round(R2,R3,R0,R1,2*8);
  278. decrypt_round(R0,R1,R2,R3,1*8);
  279. decrypt_last_round(R2,R3,R0,R1,0);
  280. input_whitening(%r10,%r11,a_offset)
  281. movq %r10, (%rsi)
  282. shl $32, R1
  283. xor R0, R1
  284. input_whitening(R1,%r11,c_offset)
  285. movq R1, 8(%rsi)
  286. popq R1
  287. movq $1,%rax
  288. ret