PageRenderTime 64ms CodeModel.GetById 24ms RepoModel.GetById 1ms app.codeStats 0ms

/siphash.c

https://github.com/abdollar/ruby
C | 489 lines | 416 code | 71 blank | 2 comment | 43 complexity | 8e994734e4e12045da69d81968362756 MD5 | raw file
  1. #include <string.h>
  2. #include <stdio.h>
  3. #include "siphash.h"
  4. #ifndef SIP_HASH_STREAMING
  5. #define SIP_HASH_STREAMING 1
  6. #endif
  7. #if defined(__MINGW32__)
  8. #include <sys/param.h>
  9. /* MinGW only defines LITTLE_ENDIAN and BIG_ENDIAN macros */
  10. #define __LITTLE_ENDIAN LITTLE_ENDIAN
  11. #define __BIG_ENDIAN BIG_ENDIAN
  12. #elif defined(_WIN32)
  13. #define BYTE_ORDER __LITTLE_ENDIAN
  14. #elif !defined(BYTE_ORDER)
  15. #include <endian.h>
  16. #endif
  17. #ifndef LITTLE_ENDIAN
  18. #define LITTLE_ENDIAN __LITTLE_ENDIAN
  19. #endif
  20. #ifndef BIG_ENDIAN
  21. #define BIG_ENDIAN __BIG_ENDIAN
  22. #endif
  23. #if BYTE_ORDER == LITTLE_ENDIAN
  24. #define lo u32[0]
  25. #define hi u32[1]
  26. #elif BYTE_ORDER == BIG_ENDIAN
  27. #define hi u32[0]
  28. #define lo u32[1]
  29. #else
  30. #error "Only strictly little or big endian supported"
  31. #endif
  32. #ifndef UNALIGNED_WORD_ACCESS
  33. # if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
  34. defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || \
  35. defined(__powerpc64__) || defined(__aarch64__) || \
  36. defined(__mc68020__)
  37. # define UNALIGNED_WORD_ACCESS 1
  38. # endif
  39. #endif
  40. #ifndef UNALIGNED_WORD_ACCESS
  41. # define UNALIGNED_WORD_ACCESS 0
  42. #endif
  43. #define U8TO32_LE(p) \
  44. (((uint32_t)((p)[0]) ) | ((uint32_t)((p)[1]) << 8) | \
  45. ((uint32_t)((p)[2]) << 16) | ((uint32_t)((p)[3]) << 24)) \
  46. #define U32TO8_LE(p, v) \
  47. do { \
  48. (p)[0] = (uint8_t)((v) ); \
  49. (p)[1] = (uint8_t)((v) >> 8); \
  50. (p)[2] = (uint8_t)((v) >> 16); \
  51. (p)[3] = (uint8_t)((v) >> 24); \
  52. } while (0)
  53. #ifdef HAVE_UINT64_T
  54. #define U8TO64_LE(p) \
  55. ((uint64_t)U8TO32_LE(p) | ((uint64_t)U8TO32_LE((p) + 4)) << 32 )
  56. #define U64TO8_LE(p, v) \
  57. do { \
  58. U32TO8_LE((p), (uint32_t)((v) )); \
  59. U32TO8_LE((p) + 4, (uint32_t)((v) >> 32)); \
  60. } while (0)
  61. #define ROTL64(v, s) \
  62. ((v) << (s)) | ((v) >> (64 - (s)))
  63. #define ROTL64_TO(v, s) ((v) = ROTL64((v), (s)))
  64. #define ADD64_TO(v, s) ((v) += (s))
  65. #define XOR64_TO(v, s) ((v) ^= (s))
  66. #define XOR64_INT(v, x) ((v) ^= (x))
  67. #else
  68. #define U8TO64_LE(p) u8to64_le(p)
  69. static inline uint64_t
  70. u8to64_le(const uint8_t *p)
  71. {
  72. uint64_t ret;
  73. ret.lo = U8TO32_LE(p);
  74. ret.hi = U8TO32_LE(p + 4);
  75. return ret;
  76. }
  77. #define U64TO8_LE(p, v) u64to8_le(p, v)
  78. static inline void
  79. u64to8_le(uint8_t *p, uint64_t v)
  80. {
  81. U32TO8_LE(p, v.lo);
  82. U32TO8_LE(p + 4, v.hi);
  83. }
  84. #define ROTL64_TO(v, s) ((s) > 32 ? rotl64_swap(rotl64_to(&(v), (s) - 32)) : \
  85. (s) == 32 ? rotl64_swap(&(v)) : rotl64_to(&(v), (s)))
  86. static inline uint64_t *
  87. rotl64_to(uint64_t *v, unsigned int s)
  88. {
  89. uint32_t uhi = (v->hi << s) | (v->lo >> (32 - s));
  90. uint32_t ulo = (v->lo << s) | (v->hi >> (32 - s));
  91. v->hi = uhi;
  92. v->lo = ulo;
  93. return v;
  94. }
  95. static inline uint64_t *
  96. rotl64_swap(uint64_t *v)
  97. {
  98. uint32_t t = v->lo;
  99. v->lo = v->hi;
  100. v->hi = t;
  101. return v;
  102. }
  103. #define ADD64_TO(v, s) add64_to(&(v), (s))
  104. static inline uint64_t *
  105. add64_to(uint64_t *v, const uint64_t s)
  106. {
  107. v->lo += s.lo;
  108. v->hi += s.hi;
  109. if (v->lo < s.lo) v->hi++;
  110. return v;
  111. }
  112. #define XOR64_TO(v, s) xor64_to(&(v), (s))
  113. static inline uint64_t *
  114. xor64_to(uint64_t *v, const uint64_t s)
  115. {
  116. v->lo ^= s.lo;
  117. v->hi ^= s.hi;
  118. return v;
  119. }
  120. #define XOR64_INT(v, x) ((v).lo ^= (x))
  121. #endif
  122. static const union {
  123. char bin[32];
  124. uint64_t u64[4];
  125. } sip_init_state_bin = {"uespemos""modnarod""arenegyl""setybdet"};
  126. #define sip_init_state sip_init_state_bin.u64
  127. #if SIP_HASH_STREAMING
  128. struct sip_interface_st {
  129. void (*init)(sip_state *s, const uint8_t *key);
  130. void (*update)(sip_state *s, const uint8_t *data, size_t len);
  131. void (*final)(sip_state *s, uint64_t *digest);
  132. };
  133. static void int_sip_init(sip_state *state, const uint8_t *key);
  134. static void int_sip_update(sip_state *state, const uint8_t *data, size_t len);
  135. static void int_sip_final(sip_state *state, uint64_t *digest);
  136. static const sip_interface sip_methods = {
  137. int_sip_init,
  138. int_sip_update,
  139. int_sip_final
  140. };
  141. #endif /* SIP_HASH_STREAMING */
  142. #define SIP_COMPRESS(v0, v1, v2, v3) \
  143. do { \
  144. ADD64_TO((v0), (v1)); \
  145. ADD64_TO((v2), (v3)); \
  146. ROTL64_TO((v1), 13); \
  147. ROTL64_TO((v3), 16); \
  148. XOR64_TO((v1), (v0)); \
  149. XOR64_TO((v3), (v2)); \
  150. ROTL64_TO((v0), 32); \
  151. ADD64_TO((v2), (v1)); \
  152. ADD64_TO((v0), (v3)); \
  153. ROTL64_TO((v1), 17); \
  154. ROTL64_TO((v3), 21); \
  155. XOR64_TO((v1), (v2)); \
  156. XOR64_TO((v3), (v0)); \
  157. ROTL64_TO((v2), 32); \
  158. } while(0)
  159. #if SIP_HASH_STREAMING
  160. static void
  161. int_sip_dump(sip_state *state)
  162. {
  163. int v;
  164. for (v = 0; v < 4; v++) {
  165. #ifdef HAVE_UINT64_T
  166. printf("v%d: %" PRIx64 "\n", v, state->v[v]);
  167. #else
  168. printf("v%d: %" PRIx32 "%.8" PRIx32 "\n", v, state->v[v].hi, state->v[v].lo);
  169. #endif
  170. }
  171. }
  172. static void
  173. int_sip_init(sip_state *state, const uint8_t key[16])
  174. {
  175. uint64_t k0, k1;
  176. k0 = U8TO64_LE(key);
  177. k1 = U8TO64_LE(key + sizeof(uint64_t));
  178. state->v[0] = k0; XOR64_TO(state->v[0], sip_init_state[0]);
  179. state->v[1] = k1; XOR64_TO(state->v[1], sip_init_state[1]);
  180. state->v[2] = k0; XOR64_TO(state->v[2], sip_init_state[2]);
  181. state->v[3] = k1; XOR64_TO(state->v[3], sip_init_state[3]);
  182. }
  183. static inline void
  184. int_sip_round(sip_state *state, int n)
  185. {
  186. int i;
  187. for (i = 0; i < n; i++) {
  188. SIP_COMPRESS(state->v[0], state->v[1], state->v[2], state->v[3]);
  189. }
  190. }
  191. static inline void
  192. int_sip_update_block(sip_state *state, uint64_t m)
  193. {
  194. XOR64_TO(state->v[3], m);
  195. int_sip_round(state, state->c);
  196. XOR64_TO(state->v[0], m);
  197. }
  198. static inline void
  199. int_sip_pre_update(sip_state *state, const uint8_t **pdata, size_t *plen)
  200. {
  201. int to_read;
  202. uint64_t m;
  203. if (!state->buflen) return;
  204. to_read = sizeof(uint64_t) - state->buflen;
  205. memcpy(state->buf + state->buflen, *pdata, to_read);
  206. m = U8TO64_LE(state->buf);
  207. int_sip_update_block(state, m);
  208. *pdata += to_read;
  209. *plen -= to_read;
  210. state->buflen = 0;
  211. }
  212. static inline void
  213. int_sip_post_update(sip_state *state, const uint8_t *data, size_t len)
  214. {
  215. uint8_t r = len % sizeof(uint64_t);
  216. if (r) {
  217. memcpy(state->buf, data + len - r, r);
  218. state->buflen = r;
  219. }
  220. }
  221. static void
  222. int_sip_update(sip_state *state, const uint8_t *data, size_t len)
  223. {
  224. uint64_t *end;
  225. uint64_t *data64;
  226. state->msglen_byte = state->msglen_byte + (len % 256);
  227. data64 = (uint64_t *) data;
  228. int_sip_pre_update(state, &data, &len);
  229. end = data64 + (len / sizeof(uint64_t));
  230. #if BYTE_ORDER == LITTLE_ENDIAN
  231. while (data64 != end) {
  232. int_sip_update_block(state, *data64++);
  233. }
  234. #elif BYTE_ORDER == BIG_ENDIAN
  235. {
  236. uint64_t m;
  237. uint8_t *data8 = data;
  238. for (; data8 != (uint8_t *) end; data8 += sizeof(uint64_t)) {
  239. m = U8TO64_LE(data8);
  240. int_sip_update_block(state, m);
  241. }
  242. }
  243. #endif
  244. int_sip_post_update(state, data, len);
  245. }
  246. static inline void
  247. int_sip_pad_final_block(sip_state *state)
  248. {
  249. int i;
  250. /* pad with 0's and finalize with msg_len mod 256 */
  251. for (i = state->buflen; i < sizeof(uint64_t); i++) {
  252. state->buf[i] = 0x00;
  253. }
  254. state->buf[sizeof(uint64_t) - 1] = state->msglen_byte;
  255. }
  256. static void
  257. int_sip_final(sip_state *state, uint64_t *digest)
  258. {
  259. uint64_t m;
  260. int_sip_pad_final_block(state);
  261. m = U8TO64_LE(state->buf);
  262. int_sip_update_block(state, m);
  263. XOR64_INT(state->v[2], 0xff);
  264. int_sip_round(state, state->d);
  265. *digest = state->v[0];
  266. XOR64_TO(*digest, state->v[1]);
  267. XOR64_TO(*digest, state->v[2]);
  268. XOR64_TO(*digest, state->v[3]);
  269. }
  270. sip_hash *
  271. sip_hash_new(const uint8_t key[16], int c, int d)
  272. {
  273. sip_hash *h = NULL;
  274. if (!(h = (sip_hash *) malloc(sizeof(sip_hash)))) return NULL;
  275. return sip_hash_init(h, key, c, d);
  276. }
  277. sip_hash *
  278. sip_hash_init(sip_hash *h, const uint8_t key[16], int c, int d)
  279. {
  280. h->state->c = c;
  281. h->state->d = d;
  282. h->state->buflen = 0;
  283. h->state->msglen_byte = 0;
  284. h->methods = &sip_methods;
  285. h->methods->init(h->state, key);
  286. return h;
  287. }
  288. int
  289. sip_hash_update(sip_hash *h, const uint8_t *msg, size_t len)
  290. {
  291. h->methods->update(h->state, msg, len);
  292. return 1;
  293. }
  294. int
  295. sip_hash_final(sip_hash *h, uint8_t **digest, size_t* len)
  296. {
  297. uint64_t digest64;
  298. uint8_t *ret;
  299. h->methods->final(h->state, &digest64);
  300. if (!(ret = (uint8_t *)malloc(sizeof(uint64_t)))) return 0;
  301. U64TO8_LE(ret, digest64);
  302. *len = sizeof(uint64_t);
  303. *digest = ret;
  304. return 1;
  305. }
  306. int
  307. sip_hash_final_integer(sip_hash *h, uint64_t *digest)
  308. {
  309. h->methods->final(h->state, digest);
  310. return 1;
  311. }
  312. int
  313. sip_hash_digest(sip_hash *h, const uint8_t *data, size_t data_len, uint8_t **digest, size_t *digest_len)
  314. {
  315. if (!sip_hash_update(h, data, data_len)) return 0;
  316. return sip_hash_final(h, digest, digest_len);
  317. }
  318. int
  319. sip_hash_digest_integer(sip_hash *h, const uint8_t *data, size_t data_len, uint64_t *digest)
  320. {
  321. if (!sip_hash_update(h, data, data_len)) return 0;
  322. return sip_hash_final_integer(h, digest);
  323. }
  324. void
  325. sip_hash_free(sip_hash *h)
  326. {
  327. free(h);
  328. }
  329. void
  330. sip_hash_dump(sip_hash *h)
  331. {
  332. int_sip_dump(h->state);
  333. }
  334. #endif /* SIP_HASH_STREAMING */
  335. #define SIP_ROUND(m, v0, v1, v2, v3) \
  336. do { \
  337. XOR64_TO((v3), (m)); \
  338. SIP_COMPRESS(v0, v1, v2, v3); \
  339. XOR64_TO((v0), (m)); \
  340. } while (0)
  341. uint64_t
  342. sip_hash13(const uint8_t key[16], const uint8_t *data, size_t len)
  343. {
  344. uint64_t k0, k1;
  345. uint64_t v0, v1, v2, v3;
  346. uint64_t m, last;
  347. const uint8_t *end = data + len - (len % sizeof(uint64_t));
  348. k0 = U8TO64_LE(key);
  349. k1 = U8TO64_LE(key + sizeof(uint64_t));
  350. v0 = k0; XOR64_TO(v0, sip_init_state[0]);
  351. v1 = k1; XOR64_TO(v1, sip_init_state[1]);
  352. v2 = k0; XOR64_TO(v2, sip_init_state[2]);
  353. v3 = k1; XOR64_TO(v3, sip_init_state[3]);
  354. #if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
  355. {
  356. uint64_t *data64 = (uint64_t *)data;
  357. while (data64 != (uint64_t *) end) {
  358. m = *data64++;
  359. SIP_ROUND(m, v0, v1, v2, v3);
  360. }
  361. }
  362. #else
  363. for (; data != end; data += sizeof(uint64_t)) {
  364. m = U8TO64_LE(data);
  365. SIP_ROUND(m, v0, v1, v2, v3);
  366. }
  367. #endif
  368. #ifdef HAVE_UINT64_T
  369. last = (uint64_t)len << 56;
  370. #define OR_BYTE(n) (last |= ((uint64_t) end[n]) << ((n) * 8))
  371. #else
  372. last.hi = len << 24;
  373. last.lo = 0;
  374. #define OR_BYTE(n) do { \
  375. if (n >= 4) \
  376. last.hi |= ((uint32_t) end[n]) << ((n) >= 4 ? (n) * 8 - 32 : 0); \
  377. else \
  378. last.lo |= ((uint32_t) end[n]) << ((n) >= 4 ? 0 : (n) * 8); \
  379. } while (0)
  380. #endif
  381. switch (len % sizeof(uint64_t)) {
  382. case 7:
  383. OR_BYTE(6);
  384. case 6:
  385. OR_BYTE(5);
  386. case 5:
  387. OR_BYTE(4);
  388. case 4:
  389. #if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
  390. #ifdef HAVE_UINT64_T
  391. last |= (uint64_t) ((uint32_t *) end)[0];
  392. #else
  393. last.lo |= ((uint32_t *) end)[0];
  394. #endif
  395. break;
  396. #else
  397. OR_BYTE(3);
  398. #endif
  399. case 3:
  400. OR_BYTE(2);
  401. case 2:
  402. OR_BYTE(1);
  403. case 1:
  404. OR_BYTE(0);
  405. break;
  406. case 0:
  407. break;
  408. }
  409. SIP_ROUND(last, v0, v1, v2, v3);
  410. XOR64_INT(v2, 0xff);
  411. SIP_COMPRESS(v0, v1, v2, v3);
  412. SIP_COMPRESS(v0, v1, v2, v3);
  413. SIP_COMPRESS(v0, v1, v2, v3);
  414. XOR64_TO(v0, v1);
  415. XOR64_TO(v0, v2);
  416. XOR64_TO(v0, v3);
  417. return v0;
  418. }