/kern_oII/drivers/net/cnic_defs.h

http://omnia2droid.googlecode.com/ · C++ Header · 580 lines · 506 code · 30 blank · 44 comment · 0 complexity · 81f30198ba8ac2d24c6964af5deb55c1 MD5 · raw file

  1. /* cnic.c: Broadcom CNIC core network driver.
  2. *
  3. * Copyright (c) 2006-2009 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. *
  9. */
  10. #ifndef CNIC_DEFS_H
  11. #define CNIC_DEFS_H
  12. /* KWQ (kernel work queue) request op codes */
  13. #define L2_KWQE_OPCODE_VALUE_FLUSH (4)
  14. #define L4_KWQE_OPCODE_VALUE_CONNECT1 (50)
  15. #define L4_KWQE_OPCODE_VALUE_CONNECT2 (51)
  16. #define L4_KWQE_OPCODE_VALUE_CONNECT3 (52)
  17. #define L4_KWQE_OPCODE_VALUE_RESET (53)
  18. #define L4_KWQE_OPCODE_VALUE_CLOSE (54)
  19. #define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60)
  20. #define L4_KWQE_OPCODE_VALUE_INIT_ULP (61)
  21. #define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1)
  22. #define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9)
  23. #define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14)
  24. #define L5CM_RAMROD_CMD_ID_BASE (0x80)
  25. #define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3)
  26. #define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12)
  27. #define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13)
  28. #define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
  29. #define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
  30. /* KCQ (kernel completion queue) response op codes */
  31. #define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
  32. #define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
  33. #define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55)
  34. #define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56)
  35. #define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57)
  36. #define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58)
  37. #define L4_KCQE_OPCODE_VALUE_INIT_ULP (61)
  38. #define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1)
  39. #define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9)
  40. #define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14)
  41. /* KCQ (kernel completion queue) completion status */
  42. #define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
  43. #define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
  44. #define L4_LAYER_CODE (4)
  45. #define L2_LAYER_CODE (2)
  46. /*
  47. * L4 KCQ CQE
  48. */
  49. struct l4_kcq {
  50. u32 cid;
  51. u32 pg_cid;
  52. u32 conn_id;
  53. u32 pg_host_opaque;
  54. #if defined(__BIG_ENDIAN)
  55. u16 status;
  56. u16 reserved1;
  57. #elif defined(__LITTLE_ENDIAN)
  58. u16 reserved1;
  59. u16 status;
  60. #endif
  61. u32 reserved2[2];
  62. #if defined(__BIG_ENDIAN)
  63. u8 flags;
  64. #define L4_KCQ_RESERVED3 (0x7<<0)
  65. #define L4_KCQ_RESERVED3_SHIFT 0
  66. #define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
  67. #define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
  68. #define L4_KCQ_LAYER_CODE (0x7<<4)
  69. #define L4_KCQ_LAYER_CODE_SHIFT 4
  70. #define L4_KCQ_RESERVED4 (0x1<<7)
  71. #define L4_KCQ_RESERVED4_SHIFT 7
  72. u8 op_code;
  73. u16 qe_self_seq;
  74. #elif defined(__LITTLE_ENDIAN)
  75. u16 qe_self_seq;
  76. u8 op_code;
  77. u8 flags;
  78. #define L4_KCQ_RESERVED3 (0xF<<0)
  79. #define L4_KCQ_RESERVED3_SHIFT 0
  80. #define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
  81. #define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
  82. #define L4_KCQ_LAYER_CODE (0x7<<4)
  83. #define L4_KCQ_LAYER_CODE_SHIFT 4
  84. #define L4_KCQ_RESERVED4 (0x1<<7)
  85. #define L4_KCQ_RESERVED4_SHIFT 7
  86. #endif
  87. };
  88. /*
  89. * L4 KCQ CQE PG upload
  90. */
  91. struct l4_kcq_upload_pg {
  92. u32 pg_cid;
  93. #if defined(__BIG_ENDIAN)
  94. u16 pg_status;
  95. u16 pg_ipid_count;
  96. #elif defined(__LITTLE_ENDIAN)
  97. u16 pg_ipid_count;
  98. u16 pg_status;
  99. #endif
  100. u32 reserved1[5];
  101. #if defined(__BIG_ENDIAN)
  102. u8 flags;
  103. #define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
  104. #define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
  105. #define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
  106. #define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
  107. #define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
  108. #define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
  109. u8 op_code;
  110. u16 qe_self_seq;
  111. #elif defined(__LITTLE_ENDIAN)
  112. u16 qe_self_seq;
  113. u8 op_code;
  114. u8 flags;
  115. #define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
  116. #define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
  117. #define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
  118. #define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
  119. #define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
  120. #define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
  121. #endif
  122. };
  123. /*
  124. * Gracefully close the connection request
  125. */
  126. struct l4_kwq_close_req {
  127. #if defined(__BIG_ENDIAN)
  128. u8 flags;
  129. #define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
  130. #define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
  131. #define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
  132. #define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
  133. #define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
  134. #define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
  135. u8 op_code;
  136. u16 reserved0;
  137. #elif defined(__LITTLE_ENDIAN)
  138. u16 reserved0;
  139. u8 op_code;
  140. u8 flags;
  141. #define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
  142. #define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
  143. #define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
  144. #define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
  145. #define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
  146. #define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
  147. #endif
  148. u32 cid;
  149. u32 reserved2[6];
  150. };
  151. /*
  152. * The first request to be passed in order to establish connection in option2
  153. */
  154. struct l4_kwq_connect_req1 {
  155. #if defined(__BIG_ENDIAN)
  156. u8 flags;
  157. #define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
  158. #define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
  159. #define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
  160. #define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
  161. #define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
  162. #define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
  163. u8 op_code;
  164. u8 reserved0;
  165. u8 conn_flags;
  166. #define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
  167. #define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
  168. #define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
  169. #define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
  170. #define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
  171. #define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
  172. #define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
  173. #define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
  174. #elif defined(__LITTLE_ENDIAN)
  175. u8 conn_flags;
  176. #define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
  177. #define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
  178. #define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
  179. #define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
  180. #define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
  181. #define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
  182. #define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
  183. #define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
  184. u8 reserved0;
  185. u8 op_code;
  186. u8 flags;
  187. #define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
  188. #define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
  189. #define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
  190. #define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
  191. #define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
  192. #define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
  193. #endif
  194. u32 cid;
  195. u32 pg_cid;
  196. u32 src_ip;
  197. u32 dst_ip;
  198. #if defined(__BIG_ENDIAN)
  199. u16 dst_port;
  200. u16 src_port;
  201. #elif defined(__LITTLE_ENDIAN)
  202. u16 src_port;
  203. u16 dst_port;
  204. #endif
  205. #if defined(__BIG_ENDIAN)
  206. u8 rsrv1[3];
  207. u8 tcp_flags;
  208. #define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
  209. #define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
  210. #define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
  211. #define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
  212. #define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
  213. #define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
  214. #define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
  215. #define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
  216. #define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
  217. #define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
  218. #define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
  219. #define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
  220. #define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
  221. #define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
  222. #elif defined(__LITTLE_ENDIAN)
  223. u8 tcp_flags;
  224. #define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
  225. #define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
  226. #define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
  227. #define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
  228. #define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
  229. #define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
  230. #define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
  231. #define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
  232. #define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
  233. #define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
  234. #define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
  235. #define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
  236. #define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
  237. #define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
  238. u8 rsrv1[3];
  239. #endif
  240. u32 rsrv2;
  241. };
  242. /*
  243. * The second ( optional )request to be passed in order to establish
  244. * connection in option2 - for IPv6 only
  245. */
  246. struct l4_kwq_connect_req2 {
  247. #if defined(__BIG_ENDIAN)
  248. u8 flags;
  249. #define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
  250. #define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
  251. #define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
  252. #define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
  253. #define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
  254. #define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
  255. u8 op_code;
  256. u8 reserved0;
  257. u8 rsrv;
  258. #elif defined(__LITTLE_ENDIAN)
  259. u8 rsrv;
  260. u8 reserved0;
  261. u8 op_code;
  262. u8 flags;
  263. #define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
  264. #define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
  265. #define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
  266. #define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
  267. #define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
  268. #define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
  269. #endif
  270. u32 reserved2;
  271. u32 src_ip_v6_2;
  272. u32 src_ip_v6_3;
  273. u32 src_ip_v6_4;
  274. u32 dst_ip_v6_2;
  275. u32 dst_ip_v6_3;
  276. u32 dst_ip_v6_4;
  277. };
  278. /*
  279. * The third ( and last )request to be passed in order to establish
  280. * connection in option2
  281. */
  282. struct l4_kwq_connect_req3 {
  283. #if defined(__BIG_ENDIAN)
  284. u8 flags;
  285. #define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
  286. #define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
  287. #define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
  288. #define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
  289. #define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
  290. #define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
  291. u8 op_code;
  292. u16 reserved0;
  293. #elif defined(__LITTLE_ENDIAN)
  294. u16 reserved0;
  295. u8 op_code;
  296. u8 flags;
  297. #define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
  298. #define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
  299. #define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
  300. #define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
  301. #define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
  302. #define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
  303. #endif
  304. u32 ka_timeout;
  305. u32 ka_interval ;
  306. #if defined(__BIG_ENDIAN)
  307. u8 snd_seq_scale;
  308. u8 ttl;
  309. u8 tos;
  310. u8 ka_max_probe_count;
  311. #elif defined(__LITTLE_ENDIAN)
  312. u8 ka_max_probe_count;
  313. u8 tos;
  314. u8 ttl;
  315. u8 snd_seq_scale;
  316. #endif
  317. #if defined(__BIG_ENDIAN)
  318. u16 pmtu;
  319. u16 mss;
  320. #elif defined(__LITTLE_ENDIAN)
  321. u16 mss;
  322. u16 pmtu;
  323. #endif
  324. u32 rcv_buf;
  325. u32 snd_buf;
  326. u32 seed;
  327. };
  328. /*
  329. * a KWQE request to offload a PG connection
  330. */
  331. struct l4_kwq_offload_pg {
  332. #if defined(__BIG_ENDIAN)
  333. u8 flags;
  334. #define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
  335. #define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
  336. #define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
  337. #define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
  338. #define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
  339. #define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
  340. u8 op_code;
  341. u16 reserved0;
  342. #elif defined(__LITTLE_ENDIAN)
  343. u16 reserved0;
  344. u8 op_code;
  345. u8 flags;
  346. #define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
  347. #define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
  348. #define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
  349. #define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
  350. #define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
  351. #define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
  352. #endif
  353. #if defined(__BIG_ENDIAN)
  354. u8 l2hdr_nbytes;
  355. u8 pg_flags;
  356. #define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
  357. #define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
  358. #define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
  359. #define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
  360. #define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
  361. #define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
  362. u8 da0;
  363. u8 da1;
  364. #elif defined(__LITTLE_ENDIAN)
  365. u8 da1;
  366. u8 da0;
  367. u8 pg_flags;
  368. #define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
  369. #define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
  370. #define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
  371. #define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
  372. #define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
  373. #define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
  374. u8 l2hdr_nbytes;
  375. #endif
  376. #if defined(__BIG_ENDIAN)
  377. u8 da2;
  378. u8 da3;
  379. u8 da4;
  380. u8 da5;
  381. #elif defined(__LITTLE_ENDIAN)
  382. u8 da5;
  383. u8 da4;
  384. u8 da3;
  385. u8 da2;
  386. #endif
  387. #if defined(__BIG_ENDIAN)
  388. u8 sa0;
  389. u8 sa1;
  390. u8 sa2;
  391. u8 sa3;
  392. #elif defined(__LITTLE_ENDIAN)
  393. u8 sa3;
  394. u8 sa2;
  395. u8 sa1;
  396. u8 sa0;
  397. #endif
  398. #if defined(__BIG_ENDIAN)
  399. u8 sa4;
  400. u8 sa5;
  401. u16 etype;
  402. #elif defined(__LITTLE_ENDIAN)
  403. u16 etype;
  404. u8 sa5;
  405. u8 sa4;
  406. #endif
  407. #if defined(__BIG_ENDIAN)
  408. u16 vlan_tag;
  409. u16 ipid_start;
  410. #elif defined(__LITTLE_ENDIAN)
  411. u16 ipid_start;
  412. u16 vlan_tag;
  413. #endif
  414. #if defined(__BIG_ENDIAN)
  415. u16 ipid_count;
  416. u16 reserved3;
  417. #elif defined(__LITTLE_ENDIAN)
  418. u16 reserved3;
  419. u16 ipid_count;
  420. #endif
  421. u32 host_opaque;
  422. };
  423. /*
  424. * Abortively close the connection request
  425. */
  426. struct l4_kwq_reset_req {
  427. #if defined(__BIG_ENDIAN)
  428. u8 flags;
  429. #define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
  430. #define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
  431. #define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
  432. #define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
  433. #define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
  434. #define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
  435. u8 op_code;
  436. u16 reserved0;
  437. #elif defined(__LITTLE_ENDIAN)
  438. u16 reserved0;
  439. u8 op_code;
  440. u8 flags;
  441. #define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
  442. #define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
  443. #define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
  444. #define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
  445. #define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
  446. #define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
  447. #endif
  448. u32 cid;
  449. u32 reserved2[6];
  450. };
  451. /*
  452. * a KWQE request to update a PG connection
  453. */
  454. struct l4_kwq_update_pg {
  455. #if defined(__BIG_ENDIAN)
  456. u8 flags;
  457. #define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
  458. #define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
  459. #define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
  460. #define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
  461. #define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
  462. #define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
  463. u8 opcode;
  464. u16 oper16;
  465. #elif defined(__LITTLE_ENDIAN)
  466. u16 oper16;
  467. u8 opcode;
  468. u8 flags;
  469. #define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
  470. #define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
  471. #define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
  472. #define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
  473. #define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
  474. #define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
  475. #endif
  476. u32 pg_cid;
  477. u32 pg_host_opaque;
  478. #if defined(__BIG_ENDIAN)
  479. u8 pg_valids;
  480. #define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
  481. #define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
  482. #define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
  483. #define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
  484. #define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
  485. #define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
  486. u8 pg_unused_a;
  487. u16 pg_ipid_count;
  488. #elif defined(__LITTLE_ENDIAN)
  489. u16 pg_ipid_count;
  490. u8 pg_unused_a;
  491. u8 pg_valids;
  492. #define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
  493. #define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
  494. #define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
  495. #define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
  496. #define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
  497. #define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
  498. #endif
  499. #if defined(__BIG_ENDIAN)
  500. u16 reserverd3;
  501. u8 da0;
  502. u8 da1;
  503. #elif defined(__LITTLE_ENDIAN)
  504. u8 da1;
  505. u8 da0;
  506. u16 reserverd3;
  507. #endif
  508. #if defined(__BIG_ENDIAN)
  509. u8 da2;
  510. u8 da3;
  511. u8 da4;
  512. u8 da5;
  513. #elif defined(__LITTLE_ENDIAN)
  514. u8 da5;
  515. u8 da4;
  516. u8 da3;
  517. u8 da2;
  518. #endif
  519. u32 reserved4;
  520. u32 reserved5;
  521. };
  522. /*
  523. * a KWQE request to upload a PG or L4 context
  524. */
  525. struct l4_kwq_upload {
  526. #if defined(__BIG_ENDIAN)
  527. u8 flags;
  528. #define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
  529. #define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
  530. #define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
  531. #define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
  532. #define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
  533. #define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
  534. u8 opcode;
  535. u16 oper16;
  536. #elif defined(__LITTLE_ENDIAN)
  537. u16 oper16;
  538. u8 opcode;
  539. u8 flags;
  540. #define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
  541. #define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
  542. #define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
  543. #define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
  544. #define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
  545. #define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
  546. #endif
  547. u32 cid;
  548. u32 reserved2[6];
  549. };
  550. #endif /* CNIC_DEFS_H */