PageRenderTime 54ms CodeModel.GetById 9ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/score/include/asm/checksum.h

https://gitlab.com/pine64-android/linux-3.10
C Header | 244 lines | 187 code | 18 blank | 39 comment | 1 complexity | a153dcf607e14afe006c162863480266 MD5 | raw file
  1. #ifndef _ASM_SCORE_CHECKSUM_H
  2. #define _ASM_SCORE_CHECKSUM_H
  3. #include <linux/in6.h>
  4. #include <asm/uaccess.h>
  5. /*
  6. * computes the checksum of a memory block at buff, length len,
  7. * and adds in "sum" (32-bit)
  8. *
  9. * returns a 32-bit number suitable for feeding into itself
  10. * or csum_tcpudp_magic
  11. *
  12. * this function must be called with even lengths, except
  13. * for the last fragment, which may be odd
  14. *
  15. * it's best to have buff aligned on a 32-bit boundary
  16. */
  17. unsigned int csum_partial(const void *buff, int len, __wsum sum);
  18. unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len,
  19. unsigned int sum, int *csum_err);
  20. unsigned int csum_partial_copy(const char *src, char *dst,
  21. int len, unsigned int sum);
  22. /*
  23. * this is a new version of the above that records errors it finds in *errp,
  24. * but continues and zeros the rest of the buffer.
  25. */
  26. /*
  27. * Copy and checksum to user
  28. */
  29. #define HAVE_CSUM_COPY_USER
  30. static inline
  31. __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
  32. __wsum sum, int *err_ptr)
  33. {
  34. sum = csum_partial(src, len, sum);
  35. if (copy_to_user(dst, src, len)) {
  36. *err_ptr = -EFAULT;
  37. return (__force __wsum) -1; /* invalid checksum */
  38. }
  39. return sum;
  40. }
  41. #define csum_partial_copy_nocheck csum_partial_copy
  42. /*
  43. * Fold a partial checksum without adding pseudo headers
  44. */
  45. static inline __sum16 csum_fold(__wsum sum)
  46. {
  47. /* the while loop is unnecessary really, it's always enough with two
  48. iterations */
  49. __asm__ __volatile__(
  50. ".set volatile\n\t"
  51. ".set\tr1\n\t"
  52. "slli\tr1,%0, 16\n\t"
  53. "add\t%0,%0, r1\n\t"
  54. "cmp.c\tr1, %0\n\t"
  55. "srli\t%0, %0, 16\n\t"
  56. "bleu\t1f\n\t"
  57. "addi\t%0, 0x1\n\t"
  58. "1:ldi\tr30, 0xffff\n\t"
  59. "xor\t%0, %0, r30\n\t"
  60. "slli\t%0, %0, 16\n\t"
  61. "srli\t%0, %0, 16\n\t"
  62. ".set\tnor1\n\t"
  63. ".set optimize\n\t"
  64. : "=r" (sum)
  65. : "0" (sum));
  66. return sum;
  67. }
  68. /*
  69. * This is a version of ip_compute_csum() optimized for IP headers,
  70. * which always checksum on 4 octet boundaries.
  71. *
  72. * By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
  73. * Arnt Gulbrandsen.
  74. */
  75. static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
  76. {
  77. unsigned int sum;
  78. unsigned long dummy;
  79. __asm__ __volatile__(
  80. ".set volatile\n\t"
  81. ".set\tnor1\n\t"
  82. "lw\t%0, [%1]\n\t"
  83. "subri\t%2, %2, 4\n\t"
  84. "slli\t%2, %2, 2\n\t"
  85. "lw\t%3, [%1, 4]\n\t"
  86. "add\t%2, %2, %1\n\t"
  87. "add\t%0, %0, %3\n\t"
  88. "cmp.c\t%3, %0\n\t"
  89. "lw\t%3, [%1, 8]\n\t"
  90. "bleu\t1f\n\t"
  91. "addi\t%0, 0x1\n\t"
  92. "1:\n\t"
  93. "add\t%0, %0, %3\n\t"
  94. "cmp.c\t%3, %0\n\t"
  95. "lw\t%3, [%1, 12]\n\t"
  96. "bleu\t1f\n\t"
  97. "addi\t%0, 0x1\n\t"
  98. "1:add\t%0, %0, %3\n\t"
  99. "cmp.c\t%3, %0\n\t"
  100. "bleu\t1f\n\t"
  101. "addi\t%0, 0x1\n"
  102. "1:\tlw\t%3, [%1, 16]\n\t"
  103. "addi\t%1, 4\n\t"
  104. "add\t%0, %0, %3\n\t"
  105. "cmp.c\t%3, %0\n\t"
  106. "bleu\t2f\n\t"
  107. "addi\t%0, 0x1\n"
  108. "2:cmp.c\t%2, %1\n\t"
  109. "bne\t1b\n\t"
  110. ".set\tr1\n\t"
  111. ".set optimize\n\t"
  112. : "=&r" (sum), "=&r" (iph), "=&r" (ihl), "=&r" (dummy)
  113. : "1" (iph), "2" (ihl));
  114. return csum_fold(sum);
  115. }
  116. static inline __wsum
  117. csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
  118. unsigned short proto, __wsum sum)
  119. {
  120. unsigned long tmp = (ntohs(len) << 16) + proto * 256;
  121. __asm__ __volatile__(
  122. ".set volatile\n\t"
  123. "add\t%0, %0, %2\n\t"
  124. "cmp.c\t%2, %0\n\t"
  125. "bleu\t1f\n\t"
  126. "addi\t%0, 0x1\n\t"
  127. "1:\n\t"
  128. "add\t%0, %0, %3\n\t"
  129. "cmp.c\t%3, %0\n\t"
  130. "bleu\t1f\n\t"
  131. "addi\t%0, 0x1\n\t"
  132. "1:\n\t"
  133. "add\t%0, %0, %4\n\t"
  134. "cmp.c\t%4, %0\n\t"
  135. "bleu\t1f\n\t"
  136. "addi\t%0, 0x1\n\t"
  137. "1:\n\t"
  138. ".set optimize\n\t"
  139. : "=r" (sum)
  140. : "0" (daddr), "r"(saddr),
  141. "r" (tmp),
  142. "r" (sum));
  143. return sum;
  144. }
  145. /*
  146. * computes the checksum of the TCP/UDP pseudo-header
  147. * returns a 16-bit checksum, already complemented
  148. */
  149. static inline __sum16
  150. csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
  151. unsigned short proto, __wsum sum)
  152. {
  153. return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
  154. }
  155. /*
  156. * this routine is used for miscellaneous IP-like checksums, mainly
  157. * in icmp.c
  158. */
  159. static inline unsigned short ip_compute_csum(const void *buff, int len)
  160. {
  161. return csum_fold(csum_partial(buff, len, 0));
  162. }
  163. #define _HAVE_ARCH_IPV6_CSUM
  164. static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
  165. const struct in6_addr *daddr,
  166. __u32 len, unsigned short proto,
  167. __wsum sum)
  168. {
  169. __asm__ __volatile__(
  170. ".set\tvolatile\t\t\t# csum_ipv6_magic\n\t"
  171. "add\t%0, %0, %5\t\t\t# proto (long in network byte order)\n\t"
  172. "cmp.c\t%5, %0\n\t"
  173. "bleu 1f\n\t"
  174. "addi\t%0, 0x1\n\t"
  175. "1:add\t%0, %0, %6\t\t\t# csum\n\t"
  176. "cmp.c\t%6, %0\n\t"
  177. "lw\t%1, [%2, 0]\t\t\t# four words source address\n\t"
  178. "bleu 1f\n\t"
  179. "addi\t%0, 0x1\n\t"
  180. "1:add\t%0, %0, %1\n\t"
  181. "cmp.c\t%1, %0\n\t"
  182. "1:lw\t%1, [%2, 4]\n\t"
  183. "bleu 1f\n\t"
  184. "addi\t%0, 0x1\n\t"
  185. "1:add\t%0, %0, %1\n\t"
  186. "cmp.c\t%1, %0\n\t"
  187. "lw\t%1, [%2,8]\n\t"
  188. "bleu 1f\n\t"
  189. "addi\t%0, 0x1\n\t"
  190. "1:add\t%0, %0, %1\n\t"
  191. "cmp.c\t%1, %0\n\t"
  192. "lw\t%1, [%2, 12]\n\t"
  193. "bleu 1f\n\t"
  194. "addi\t%0, 0x1\n\t"
  195. "1:add\t%0, %0,%1\n\t"
  196. "cmp.c\t%1, %0\n\t"
  197. "lw\t%1, [%3, 0]\n\t"
  198. "bleu 1f\n\t"
  199. "addi\t%0, 0x1\n\t"
  200. "1:add\t%0, %0, %1\n\t"
  201. "cmp.c\t%1, %0\n\t"
  202. "lw\t%1, [%3, 4]\n\t"
  203. "bleu 1f\n\t"
  204. "addi\t%0, 0x1\n\t"
  205. "1:add\t%0, %0, %1\n\t"
  206. "cmp.c\t%1, %0\n\t"
  207. "lw\t%1, [%3, 8]\n\t"
  208. "bleu 1f\n\t"
  209. "addi\t%0, 0x1\n\t"
  210. "1:add\t%0, %0, %1\n\t"
  211. "cmp.c\t%1, %0\n\t"
  212. "lw\t%1, [%3, 12]\n\t"
  213. "bleu 1f\n\t"
  214. "addi\t%0, 0x1\n\t"
  215. "1:add\t%0, %0, %1\n\t"
  216. "cmp.c\t%1, %0\n\t"
  217. "bleu 1f\n\t"
  218. "addi\t%0, 0x1\n\t"
  219. "1:\n\t"
  220. ".set\toptimize"
  221. : "=r" (sum), "=r" (proto)
  222. : "r" (saddr), "r" (daddr),
  223. "0" (htonl(len)), "1" (htonl(proto)), "r" (sum));
  224. return csum_fold(sum);
  225. }
  226. #endif /* _ASM_SCORE_CHECKSUM_H */