/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c

https://bitbucket.org/wisechild/galaxy-nexus · C · 779 lines · 618 code · 126 blank · 35 comment · 75 complexity · 6dda0ea0279fe0ae4e818cb34612ab98 MD5 · raw file

  1. /*
  2. * Host AP crypt: host-based TKIP encryption implementation for Host AP driver
  3. *
  4. * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation. See README and COPYING for
  9. * more details.
  10. */
  11. //#include <linux/config.h>
  12. #include <linux/version.h>
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/slab.h>
  16. #include <linux/random.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/netdevice.h>
  19. #include <linux/if_ether.h>
  20. #include <linux/if_arp.h>
  21. #include <asm/string.h>
  22. #include "ieee80211.h"
  23. #include <linux/crypto.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/crc32.h>
  26. MODULE_AUTHOR("Jouni Malinen");
  27. MODULE_DESCRIPTION("Host AP crypt: TKIP");
  28. MODULE_LICENSE("GPL");
  29. struct ieee80211_tkip_data {
  30. #define TKIP_KEY_LEN 32
  31. u8 key[TKIP_KEY_LEN];
  32. int key_set;
  33. u32 tx_iv32;
  34. u16 tx_iv16;
  35. u16 tx_ttak[5];
  36. int tx_phase1_done;
  37. u32 rx_iv32;
  38. u16 rx_iv16;
  39. u16 rx_ttak[5];
  40. int rx_phase1_done;
  41. u32 rx_iv32_new;
  42. u16 rx_iv16_new;
  43. u32 dot11RSNAStatsTKIPReplays;
  44. u32 dot11RSNAStatsTKIPICVErrors;
  45. u32 dot11RSNAStatsTKIPLocalMICFailures;
  46. int key_idx;
  47. struct crypto_blkcipher *rx_tfm_arc4;
  48. struct crypto_hash *rx_tfm_michael;
  49. struct crypto_blkcipher *tx_tfm_arc4;
  50. struct crypto_hash *tx_tfm_michael;
  51. /* scratch buffers for virt_to_page() (crypto API) */
  52. u8 rx_hdr[16], tx_hdr[16];
  53. };
  54. static void * ieee80211_tkip_init(int key_idx)
  55. {
  56. struct ieee80211_tkip_data *priv;
  57. priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
  58. if (priv == NULL)
  59. goto fail;
  60. priv->key_idx = key_idx;
  61. priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
  62. CRYPTO_ALG_ASYNC);
  63. if (IS_ERR(priv->tx_tfm_arc4)) {
  64. printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
  65. "crypto API arc4\n");
  66. priv->tx_tfm_arc4 = NULL;
  67. goto fail;
  68. }
  69. priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
  70. CRYPTO_ALG_ASYNC);
  71. if (IS_ERR(priv->tx_tfm_michael)) {
  72. printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
  73. "crypto API michael_mic\n");
  74. priv->tx_tfm_michael = NULL;
  75. goto fail;
  76. }
  77. priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
  78. CRYPTO_ALG_ASYNC);
  79. if (IS_ERR(priv->rx_tfm_arc4)) {
  80. printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
  81. "crypto API arc4\n");
  82. priv->rx_tfm_arc4 = NULL;
  83. goto fail;
  84. }
  85. priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
  86. CRYPTO_ALG_ASYNC);
  87. if (IS_ERR(priv->rx_tfm_michael)) {
  88. printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
  89. "crypto API michael_mic\n");
  90. priv->rx_tfm_michael = NULL;
  91. goto fail;
  92. }
  93. return priv;
  94. fail:
  95. if (priv) {
  96. if (priv->tx_tfm_michael)
  97. crypto_free_hash(priv->tx_tfm_michael);
  98. if (priv->tx_tfm_arc4)
  99. crypto_free_blkcipher(priv->tx_tfm_arc4);
  100. if (priv->rx_tfm_michael)
  101. crypto_free_hash(priv->rx_tfm_michael);
  102. if (priv->rx_tfm_arc4)
  103. crypto_free_blkcipher(priv->rx_tfm_arc4);
  104. kfree(priv);
  105. }
  106. return NULL;
  107. }
  108. static void ieee80211_tkip_deinit(void *priv)
  109. {
  110. struct ieee80211_tkip_data *_priv = priv;
  111. if (_priv) {
  112. if (_priv->tx_tfm_michael)
  113. crypto_free_hash(_priv->tx_tfm_michael);
  114. if (_priv->tx_tfm_arc4)
  115. crypto_free_blkcipher(_priv->tx_tfm_arc4);
  116. if (_priv->rx_tfm_michael)
  117. crypto_free_hash(_priv->rx_tfm_michael);
  118. if (_priv->rx_tfm_arc4)
  119. crypto_free_blkcipher(_priv->rx_tfm_arc4);
  120. }
  121. kfree(priv);
  122. }
  123. static inline u16 RotR1(u16 val)
  124. {
  125. return (val >> 1) | (val << 15);
  126. }
  127. static inline u8 Lo8(u16 val)
  128. {
  129. return val & 0xff;
  130. }
  131. static inline u8 Hi8(u16 val)
  132. {
  133. return val >> 8;
  134. }
  135. static inline u16 Lo16(u32 val)
  136. {
  137. return val & 0xffff;
  138. }
  139. static inline u16 Hi16(u32 val)
  140. {
  141. return val >> 16;
  142. }
  143. static inline u16 Mk16(u8 hi, u8 lo)
  144. {
  145. return lo | (((u16) hi) << 8);
  146. }
  147. static inline u16 Mk16_le(u16 *v)
  148. {
  149. return le16_to_cpu(*v);
  150. }
  151. static const u16 Sbox[256] =
  152. {
  153. 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
  154. 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
  155. 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
  156. 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
  157. 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
  158. 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
  159. 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
  160. 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
  161. 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
  162. 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
  163. 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
  164. 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
  165. 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
  166. 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
  167. 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
  168. 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
  169. 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
  170. 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
  171. 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
  172. 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
  173. 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
  174. 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
  175. 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
  176. 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
  177. 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
  178. 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
  179. 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
  180. 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
  181. 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
  182. 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
  183. 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
  184. 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
  185. };
  186. static inline u16 _S_(u16 v)
  187. {
  188. u16 t = Sbox[Hi8(v)];
  189. return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
  190. }
  191. #define PHASE1_LOOP_COUNT 8
  192. static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32)
  193. {
  194. int i, j;
  195. /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */
  196. TTAK[0] = Lo16(IV32);
  197. TTAK[1] = Hi16(IV32);
  198. TTAK[2] = Mk16(TA[1], TA[0]);
  199. TTAK[3] = Mk16(TA[3], TA[2]);
  200. TTAK[4] = Mk16(TA[5], TA[4]);
  201. for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
  202. j = 2 * (i & 1);
  203. TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j]));
  204. TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j]));
  205. TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j]));
  206. TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j]));
  207. TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i;
  208. }
  209. }
  210. static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
  211. u16 IV16)
  212. {
  213. /* Make temporary area overlap WEP seed so that the final copy can be
  214. * avoided on little endian hosts. */
  215. u16 *PPK = (u16 *) &WEPSeed[4];
  216. /* Step 1 - make copy of TTAK and bring in TSC */
  217. PPK[0] = TTAK[0];
  218. PPK[1] = TTAK[1];
  219. PPK[2] = TTAK[2];
  220. PPK[3] = TTAK[3];
  221. PPK[4] = TTAK[4];
  222. PPK[5] = TTAK[4] + IV16;
  223. /* Step 2 - 96-bit bijective mixing using S-box */
  224. PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0]));
  225. PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2]));
  226. PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4]));
  227. PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6]));
  228. PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8]));
  229. PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10]));
  230. PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12]));
  231. PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14]));
  232. PPK[2] += RotR1(PPK[1]);
  233. PPK[3] += RotR1(PPK[2]);
  234. PPK[4] += RotR1(PPK[3]);
  235. PPK[5] += RotR1(PPK[4]);
  236. /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value
  237. * WEPSeed[0..2] is transmitted as WEP IV */
  238. WEPSeed[0] = Hi8(IV16);
  239. WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
  240. WEPSeed[2] = Lo8(IV16);
  241. WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1);
  242. #ifdef __BIG_ENDIAN
  243. {
  244. int i;
  245. for (i = 0; i < 6; i++)
  246. PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8);
  247. }
  248. #endif
  249. }
  250. static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
  251. {
  252. struct ieee80211_tkip_data *tkey = priv;
  253. int len;
  254. u8 *pos;
  255. struct ieee80211_hdr_4addr *hdr;
  256. cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
  257. struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
  258. int ret = 0;
  259. u8 rc4key[16], *icv;
  260. u32 crc;
  261. struct scatterlist sg;
  262. if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
  263. skb->len < hdr_len)
  264. return -1;
  265. hdr = (struct ieee80211_hdr_4addr *) skb->data;
  266. if (!tcb_desc->bHwSec)
  267. {
  268. if (!tkey->tx_phase1_done) {
  269. tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
  270. tkey->tx_iv32);
  271. tkey->tx_phase1_done = 1;
  272. }
  273. tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
  274. }
  275. else
  276. tkey->tx_phase1_done = 1;
  277. len = skb->len - hdr_len;
  278. pos = skb_push(skb, 8);
  279. memmove(pos, pos + 8, hdr_len);
  280. pos += hdr_len;
  281. if (tcb_desc->bHwSec)
  282. {
  283. *pos++ = Hi8(tkey->tx_iv16);
  284. *pos++ = (Hi8(tkey->tx_iv16) | 0x20) & 0x7F;
  285. *pos++ = Lo8(tkey->tx_iv16);
  286. }
  287. else
  288. {
  289. *pos++ = rc4key[0];
  290. *pos++ = rc4key[1];
  291. *pos++ = rc4key[2];
  292. }
  293. *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */;
  294. *pos++ = tkey->tx_iv32 & 0xff;
  295. *pos++ = (tkey->tx_iv32 >> 8) & 0xff;
  296. *pos++ = (tkey->tx_iv32 >> 16) & 0xff;
  297. *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
  298. if (!tcb_desc->bHwSec)
  299. {
  300. icv = skb_put(skb, 4);
  301. crc = ~crc32_le(~0, pos, len);
  302. icv[0] = crc;
  303. icv[1] = crc >> 8;
  304. icv[2] = crc >> 16;
  305. icv[3] = crc >> 24;
  306. crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
  307. sg_init_one(&sg, pos, len+4);
  308. ret= crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
  309. }
  310. tkey->tx_iv16++;
  311. if (tkey->tx_iv16 == 0) {
  312. tkey->tx_phase1_done = 0;
  313. tkey->tx_iv32++;
  314. }
  315. if (!tcb_desc->bHwSec)
  316. return ret;
  317. else
  318. return 0;
  319. }
  320. static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
  321. {
  322. struct ieee80211_tkip_data *tkey = priv;
  323. u8 keyidx, *pos;
  324. u32 iv32;
  325. u16 iv16;
  326. struct ieee80211_hdr_4addr *hdr;
  327. cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
  328. struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4};
  329. u8 rc4key[16];
  330. u8 icv[4];
  331. u32 crc;
  332. struct scatterlist sg;
  333. int plen;
  334. if (skb->len < hdr_len + 8 + 4)
  335. return -1;
  336. hdr = (struct ieee80211_hdr_4addr *) skb->data;
  337. pos = skb->data + hdr_len;
  338. keyidx = pos[3];
  339. if (!(keyidx & (1 << 5))) {
  340. if (net_ratelimit()) {
  341. printk(KERN_DEBUG "TKIP: received packet without ExtIV"
  342. " flag from %pM\n", hdr->addr2);
  343. }
  344. return -2;
  345. }
  346. keyidx >>= 6;
  347. if (tkey->key_idx != keyidx) {
  348. printk(KERN_DEBUG "TKIP: RX tkey->key_idx=%d frame "
  349. "keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv);
  350. return -6;
  351. }
  352. if (!tkey->key_set) {
  353. if (net_ratelimit()) {
  354. printk(KERN_DEBUG "TKIP: received packet from %pM"
  355. " with keyid=%d that does not have a configured"
  356. " key\n", hdr->addr2, keyidx);
  357. }
  358. return -3;
  359. }
  360. iv16 = (pos[0] << 8) | pos[2];
  361. iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
  362. pos += 8;
  363. if (!tcb_desc->bHwSec)
  364. {
  365. if (iv32 < tkey->rx_iv32 ||
  366. (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
  367. if (net_ratelimit()) {
  368. printk(KERN_DEBUG "TKIP: replay detected: STA=%pM"
  369. " previous TSC %08x%04x received TSC "
  370. "%08x%04x\n", hdr->addr2,
  371. tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
  372. }
  373. tkey->dot11RSNAStatsTKIPReplays++;
  374. return -4;
  375. }
  376. if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) {
  377. tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32);
  378. tkey->rx_phase1_done = 1;
  379. }
  380. tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16);
  381. plen = skb->len - hdr_len - 12;
  382. crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
  383. sg_init_one(&sg, pos, plen+4);
  384. if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
  385. if (net_ratelimit()) {
  386. printk(KERN_DEBUG ": TKIP: failed to decrypt "
  387. "received packet from %pM\n",
  388. hdr->addr2);
  389. }
  390. return -7;
  391. }
  392. crc = ~crc32_le(~0, pos, plen);
  393. icv[0] = crc;
  394. icv[1] = crc >> 8;
  395. icv[2] = crc >> 16;
  396. icv[3] = crc >> 24;
  397. if (memcmp(icv, pos + plen, 4) != 0) {
  398. if (iv32 != tkey->rx_iv32) {
  399. /* Previously cached Phase1 result was already lost, so
  400. * it needs to be recalculated for the next packet. */
  401. tkey->rx_phase1_done = 0;
  402. }
  403. if (net_ratelimit()) {
  404. printk(KERN_DEBUG "TKIP: ICV error detected: STA="
  405. "%pM\n", hdr->addr2);
  406. }
  407. tkey->dot11RSNAStatsTKIPICVErrors++;
  408. return -5;
  409. }
  410. }
  411. /* Update real counters only after Michael MIC verification has
  412. * completed */
  413. tkey->rx_iv32_new = iv32;
  414. tkey->rx_iv16_new = iv16;
  415. /* Remove IV and ICV */
  416. memmove(skb->data + 8, skb->data, hdr_len);
  417. skb_pull(skb, 8);
  418. skb_trim(skb, skb->len - 4);
  419. return keyidx;
  420. }
  421. static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
  422. u8 * data, size_t data_len, u8 * mic)
  423. {
  424. struct hash_desc desc;
  425. struct scatterlist sg[2];
  426. if (tfm_michael == NULL) {
  427. printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
  428. return -1;
  429. }
  430. sg_init_table(sg, 2);
  431. sg_set_buf(&sg[0], hdr, 16);
  432. sg_set_buf(&sg[1], data, data_len);
  433. if (crypto_hash_setkey(tfm_michael, key, 8))
  434. return -1;
  435. desc.tfm = tfm_michael;
  436. desc.flags = 0;
  437. return crypto_hash_digest(&desc, sg, data_len + 16, mic);
  438. }
  439. static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
  440. {
  441. struct ieee80211_hdr_4addr *hdr11;
  442. hdr11 = (struct ieee80211_hdr_4addr *) skb->data;
  443. switch (le16_to_cpu(hdr11->frame_ctl) &
  444. (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
  445. case IEEE80211_FCTL_TODS:
  446. memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
  447. memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
  448. break;
  449. case IEEE80211_FCTL_FROMDS:
  450. memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
  451. memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */
  452. break;
  453. case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
  454. memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
  455. memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
  456. break;
  457. case 0:
  458. memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
  459. memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
  460. break;
  461. }
  462. hdr[12] = 0; /* priority */
  463. hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
  464. }
  465. static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
  466. {
  467. struct ieee80211_tkip_data *tkey = priv;
  468. u8 *pos;
  469. struct ieee80211_hdr_4addr *hdr;
  470. hdr = (struct ieee80211_hdr_4addr *) skb->data;
  471. if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
  472. printk(KERN_DEBUG "Invalid packet for Michael MIC add "
  473. "(tailroom=%d hdr_len=%d skb->len=%d)\n",
  474. skb_tailroom(skb), hdr_len, skb->len);
  475. return -1;
  476. }
  477. michael_mic_hdr(skb, tkey->tx_hdr);
  478. // { david, 2006.9.1
  479. // fix the wpa process with wmm enabled.
  480. if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
  481. tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
  482. }
  483. // }
  484. pos = skb_put(skb, 8);
  485. if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
  486. skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
  487. return -1;
  488. return 0;
  489. }
  490. static void ieee80211_michael_mic_failure(struct net_device *dev,
  491. struct ieee80211_hdr_4addr *hdr,
  492. int keyidx)
  493. {
  494. union iwreq_data wrqu;
  495. struct iw_michaelmicfailure ev;
  496. /* TODO: needed parameters: count, keyid, key type, TSC */
  497. memset(&ev, 0, sizeof(ev));
  498. ev.flags = keyidx & IW_MICFAILURE_KEY_ID;
  499. if (hdr->addr1[0] & 0x01)
  500. ev.flags |= IW_MICFAILURE_GROUP;
  501. else
  502. ev.flags |= IW_MICFAILURE_PAIRWISE;
  503. ev.src_addr.sa_family = ARPHRD_ETHER;
  504. memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN);
  505. memset(&wrqu, 0, sizeof(wrqu));
  506. wrqu.data.length = sizeof(ev);
  507. wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev);
  508. }
  509. static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
  510. int hdr_len, void *priv)
  511. {
  512. struct ieee80211_tkip_data *tkey = priv;
  513. u8 mic[8];
  514. struct ieee80211_hdr_4addr *hdr;
  515. hdr = (struct ieee80211_hdr_4addr *) skb->data;
  516. if (!tkey->key_set)
  517. return -1;
  518. michael_mic_hdr(skb, tkey->rx_hdr);
  519. // { david, 2006.9.1
  520. // fix the wpa process with wmm enabled.
  521. if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) {
  522. tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07;
  523. }
  524. // }
  525. if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
  526. skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
  527. return -1;
  528. if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
  529. struct ieee80211_hdr_4addr *hdr;
  530. hdr = (struct ieee80211_hdr_4addr *) skb->data;
  531. printk(KERN_DEBUG "%s: Michael MIC verification failed for "
  532. "MSDU from %pM keyidx=%d\n",
  533. skb->dev ? skb->dev->name : "N/A", hdr->addr2,
  534. keyidx);
  535. if (skb->dev)
  536. ieee80211_michael_mic_failure(skb->dev, hdr, keyidx);
  537. tkey->dot11RSNAStatsTKIPLocalMICFailures++;
  538. return -1;
  539. }
  540. /* Update TSC counters for RX now that the packet verification has
  541. * completed. */
  542. tkey->rx_iv32 = tkey->rx_iv32_new;
  543. tkey->rx_iv16 = tkey->rx_iv16_new;
  544. skb_trim(skb, skb->len - 8);
  545. return 0;
  546. }
  547. static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
  548. {
  549. struct ieee80211_tkip_data *tkey = priv;
  550. int keyidx;
  551. struct crypto_hash *tfm = tkey->tx_tfm_michael;
  552. struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
  553. struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
  554. struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
  555. keyidx = tkey->key_idx;
  556. memset(tkey, 0, sizeof(*tkey));
  557. tkey->key_idx = keyidx;
  558. tkey->tx_tfm_michael = tfm;
  559. tkey->tx_tfm_arc4 = tfm2;
  560. tkey->rx_tfm_michael = tfm3;
  561. tkey->rx_tfm_arc4 = tfm4;
  562. if (len == TKIP_KEY_LEN) {
  563. memcpy(tkey->key, key, TKIP_KEY_LEN);
  564. tkey->key_set = 1;
  565. tkey->tx_iv16 = 1; /* TSC is initialized to 1 */
  566. if (seq) {
  567. tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) |
  568. (seq[3] << 8) | seq[2];
  569. tkey->rx_iv16 = (seq[1] << 8) | seq[0];
  570. }
  571. } else if (len == 0)
  572. tkey->key_set = 0;
  573. else
  574. return -1;
  575. return 0;
  576. }
  577. static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
  578. {
  579. struct ieee80211_tkip_data *tkey = priv;
  580. if (len < TKIP_KEY_LEN)
  581. return -1;
  582. if (!tkey->key_set)
  583. return 0;
  584. memcpy(key, tkey->key, TKIP_KEY_LEN);
  585. if (seq) {
  586. /* Return the sequence number of the last transmitted frame. */
  587. u16 iv16 = tkey->tx_iv16;
  588. u32 iv32 = tkey->tx_iv32;
  589. if (iv16 == 0)
  590. iv32--;
  591. iv16--;
  592. seq[0] = tkey->tx_iv16;
  593. seq[1] = tkey->tx_iv16 >> 8;
  594. seq[2] = tkey->tx_iv32;
  595. seq[3] = tkey->tx_iv32 >> 8;
  596. seq[4] = tkey->tx_iv32 >> 16;
  597. seq[5] = tkey->tx_iv32 >> 24;
  598. }
  599. return TKIP_KEY_LEN;
  600. }
  601. static char * ieee80211_tkip_print_stats(char *p, void *priv)
  602. {
  603. struct ieee80211_tkip_data *tkip = priv;
  604. p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
  605. "tx_pn=%02x%02x%02x%02x%02x%02x "
  606. "rx_pn=%02x%02x%02x%02x%02x%02x "
  607. "replays=%d icv_errors=%d local_mic_failures=%d\n",
  608. tkip->key_idx, tkip->key_set,
  609. (tkip->tx_iv32 >> 24) & 0xff,
  610. (tkip->tx_iv32 >> 16) & 0xff,
  611. (tkip->tx_iv32 >> 8) & 0xff,
  612. tkip->tx_iv32 & 0xff,
  613. (tkip->tx_iv16 >> 8) & 0xff,
  614. tkip->tx_iv16 & 0xff,
  615. (tkip->rx_iv32 >> 24) & 0xff,
  616. (tkip->rx_iv32 >> 16) & 0xff,
  617. (tkip->rx_iv32 >> 8) & 0xff,
  618. tkip->rx_iv32 & 0xff,
  619. (tkip->rx_iv16 >> 8) & 0xff,
  620. tkip->rx_iv16 & 0xff,
  621. tkip->dot11RSNAStatsTKIPReplays,
  622. tkip->dot11RSNAStatsTKIPICVErrors,
  623. tkip->dot11RSNAStatsTKIPLocalMICFailures);
  624. return p;
  625. }
  626. static struct ieee80211_crypto_ops ieee80211_crypt_tkip = {
  627. .name = "TKIP",
  628. .init = ieee80211_tkip_init,
  629. .deinit = ieee80211_tkip_deinit,
  630. .encrypt_mpdu = ieee80211_tkip_encrypt,
  631. .decrypt_mpdu = ieee80211_tkip_decrypt,
  632. .encrypt_msdu = ieee80211_michael_mic_add,
  633. .decrypt_msdu = ieee80211_michael_mic_verify,
  634. .set_key = ieee80211_tkip_set_key,
  635. .get_key = ieee80211_tkip_get_key,
  636. .print_stats = ieee80211_tkip_print_stats,
  637. .extra_prefix_len = 4 + 4, /* IV + ExtIV */
  638. .extra_postfix_len = 8 + 4, /* MIC + ICV */
  639. .owner = THIS_MODULE,
  640. };
  641. int __init ieee80211_crypto_tkip_init(void)
  642. {
  643. return ieee80211_register_crypto_ops(&ieee80211_crypt_tkip);
  644. }
  645. void __exit ieee80211_crypto_tkip_exit(void)
  646. {
  647. ieee80211_unregister_crypto_ops(&ieee80211_crypt_tkip);
  648. }
  649. void ieee80211_tkip_null(void)
  650. {
  651. // printk("============>%s()\n", __FUNCTION__);
  652. return;
  653. }