PageRenderTime 32ms CodeModel.GetById 15ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/net/vxge/vxge-traffic.c

https://bitbucket.org/ndreys/linux-sunxi
C | 2514 lines | 1379 code | 404 blank | 731 comment | 209 complexity | 1a311c5a4f0c594ea37dc3664c8d9c58 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /******************************************************************************
  2. * This software may be used and distributed according to the terms of
  3. * the GNU General Public License (GPL), incorporated herein by reference.
  4. * Drivers based on or derived from this code fall under the GPL and must
  5. * retain the authorship, copyright and license notice. This file is not
  6. * a complete program and may only be used when the entire operating
  7. * system is licensed under the GPL.
  8. * See the file COPYING in this distribution for more information.
  9. *
  10. * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
  11. * Virtualized Server Adapter.
  12. * Copyright(c) 2002-2010 Exar Corp.
  13. ******************************************************************************/
  14. #include <linux/etherdevice.h>
  15. #include <linux/prefetch.h>
  16. #include "vxge-traffic.h"
  17. #include "vxge-config.h"
  18. #include "vxge-main.h"
  19. /*
  20. * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
  21. * @vp: Virtual Path handle.
  22. *
  23. * Enable vpath interrupts. The function is to be executed the last in
  24. * vpath initialization sequence.
  25. *
  26. * See also: vxge_hw_vpath_intr_disable()
  27. */
  28. enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
  29. {
  30. u64 val64;
  31. struct __vxge_hw_virtualpath *vpath;
  32. struct vxge_hw_vpath_reg __iomem *vp_reg;
  33. enum vxge_hw_status status = VXGE_HW_OK;
  34. if (vp == NULL) {
  35. status = VXGE_HW_ERR_INVALID_HANDLE;
  36. goto exit;
  37. }
  38. vpath = vp->vpath;
  39. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  40. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  41. goto exit;
  42. }
  43. vp_reg = vpath->vp_reg;
  44. writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
  45. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  46. &vp_reg->general_errors_reg);
  47. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  48. &vp_reg->pci_config_errors_reg);
  49. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  50. &vp_reg->mrpcim_to_vpath_alarm_reg);
  51. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  52. &vp_reg->srpcim_to_vpath_alarm_reg);
  53. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  54. &vp_reg->vpath_ppif_int_status);
  55. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  56. &vp_reg->srpcim_msg_to_vpath_reg);
  57. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  58. &vp_reg->vpath_pcipif_int_status);
  59. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  60. &vp_reg->prc_alarm_reg);
  61. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  62. &vp_reg->wrdma_alarm_status);
  63. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  64. &vp_reg->asic_ntwk_vp_err_reg);
  65. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  66. &vp_reg->xgmac_vp_int_status);
  67. val64 = readq(&vp_reg->vpath_general_int_status);
  68. /* Mask unwanted interrupts */
  69. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  70. &vp_reg->vpath_pcipif_int_mask);
  71. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  72. &vp_reg->srpcim_msg_to_vpath_mask);
  73. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  74. &vp_reg->srpcim_to_vpath_alarm_mask);
  75. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  76. &vp_reg->mrpcim_to_vpath_alarm_mask);
  77. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  78. &vp_reg->pci_config_errors_mask);
  79. /* Unmask the individual interrupts */
  80. writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
  81. VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
  82. VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
  83. VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
  84. &vp_reg->general_errors_mask);
  85. __vxge_hw_pio_mem_write32_upper(
  86. (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
  87. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
  88. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
  89. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
  90. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
  91. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
  92. &vp_reg->kdfcctl_errors_mask);
  93. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
  94. __vxge_hw_pio_mem_write32_upper(
  95. (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
  96. &vp_reg->prc_alarm_mask);
  97. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
  98. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
  99. if (vpath->hldev->first_vp_id != vpath->vp_id)
  100. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  101. &vp_reg->asic_ntwk_vp_err_mask);
  102. else
  103. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
  104. VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
  105. VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
  106. &vp_reg->asic_ntwk_vp_err_mask);
  107. __vxge_hw_pio_mem_write32_upper(0,
  108. &vp_reg->vpath_general_int_mask);
  109. exit:
  110. return status;
  111. }
  112. /*
  113. * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
  114. * @vp: Virtual Path handle.
  115. *
  116. * Disable vpath interrupts. The function is to be executed the last in
  117. * vpath initialization sequence.
  118. *
  119. * See also: vxge_hw_vpath_intr_enable()
  120. */
  121. enum vxge_hw_status vxge_hw_vpath_intr_disable(
  122. struct __vxge_hw_vpath_handle *vp)
  123. {
  124. u64 val64;
  125. struct __vxge_hw_virtualpath *vpath;
  126. enum vxge_hw_status status = VXGE_HW_OK;
  127. struct vxge_hw_vpath_reg __iomem *vp_reg;
  128. if (vp == NULL) {
  129. status = VXGE_HW_ERR_INVALID_HANDLE;
  130. goto exit;
  131. }
  132. vpath = vp->vpath;
  133. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  134. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  135. goto exit;
  136. }
  137. vp_reg = vpath->vp_reg;
  138. __vxge_hw_pio_mem_write32_upper(
  139. (u32)VXGE_HW_INTR_MASK_ALL,
  140. &vp_reg->vpath_general_int_mask);
  141. val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
  142. writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
  143. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  144. &vp_reg->general_errors_mask);
  145. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  146. &vp_reg->pci_config_errors_mask);
  147. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  148. &vp_reg->mrpcim_to_vpath_alarm_mask);
  149. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  150. &vp_reg->srpcim_to_vpath_alarm_mask);
  151. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  152. &vp_reg->vpath_ppif_int_mask);
  153. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  154. &vp_reg->srpcim_msg_to_vpath_mask);
  155. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  156. &vp_reg->vpath_pcipif_int_mask);
  157. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  158. &vp_reg->wrdma_alarm_mask);
  159. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  160. &vp_reg->prc_alarm_mask);
  161. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  162. &vp_reg->xgmac_vp_int_mask);
  163. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  164. &vp_reg->asic_ntwk_vp_err_mask);
  165. exit:
  166. return status;
  167. }
  168. void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
  169. {
  170. struct vxge_hw_vpath_reg __iomem *vp_reg;
  171. struct vxge_hw_vp_config *config;
  172. u64 val64;
  173. if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
  174. return;
  175. vp_reg = fifo->vp_reg;
  176. config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
  177. if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
  178. config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
  179. val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
  180. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
  181. fifo->tim_tti_cfg1_saved = val64;
  182. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
  183. }
  184. }
  185. void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
  186. {
  187. u64 val64 = ring->tim_rti_cfg1_saved;
  188. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
  189. ring->tim_rti_cfg1_saved = val64;
  190. writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
  191. }
  192. void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
  193. {
  194. u64 val64 = fifo->tim_tti_cfg3_saved;
  195. u64 timer = (fifo->rtimer * 1000) / 272;
  196. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
  197. if (timer)
  198. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
  199. VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
  200. writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
  201. /* tti_cfg3_saved is not updated again because it is
  202. * initialized at one place only - init time.
  203. */
  204. }
  205. void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
  206. {
  207. u64 val64 = ring->tim_rti_cfg3_saved;
  208. u64 timer = (ring->rtimer * 1000) / 272;
  209. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
  210. if (timer)
  211. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
  212. VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
  213. writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
  214. /* rti_cfg3_saved is not updated again because it is
  215. * initialized at one place only - init time.
  216. */
  217. }
  218. /**
  219. * vxge_hw_channel_msix_mask - Mask MSIX Vector.
  220. * @channeh: Channel for rx or tx handle
  221. * @msix_id: MSIX ID
  222. *
  223. * The function masks the msix interrupt for the given msix_id
  224. *
  225. * Returns: 0
  226. */
  227. void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
  228. {
  229. __vxge_hw_pio_mem_write32_upper(
  230. (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  231. &channel->common_reg->set_msix_mask_vect[msix_id%4]);
  232. }
  233. /**
  234. * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
  235. * @channeh: Channel for rx or tx handle
  236. * @msix_id: MSI ID
  237. *
  238. * The function unmasks the msix interrupt for the given msix_id
  239. *
  240. * Returns: 0
  241. */
  242. void
  243. vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
  244. {
  245. __vxge_hw_pio_mem_write32_upper(
  246. (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  247. &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
  248. }
  249. /**
  250. * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
  251. * @channel: Channel for rx or tx handle
  252. * @msix_id: MSI ID
  253. *
  254. * The function unmasks the msix interrupt for the given msix_id
  255. * if configured in MSIX oneshot mode
  256. *
  257. * Returns: 0
  258. */
  259. void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
  260. {
  261. __vxge_hw_pio_mem_write32_upper(
  262. (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  263. &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
  264. }
  265. /**
  266. * vxge_hw_device_set_intr_type - Updates the configuration
  267. * with new interrupt type.
  268. * @hldev: HW device handle.
  269. * @intr_mode: New interrupt type
  270. */
  271. u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
  272. {
  273. if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
  274. (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
  275. (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
  276. (intr_mode != VXGE_HW_INTR_MODE_DEF))
  277. intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
  278. hldev->config.intr_mode = intr_mode;
  279. return intr_mode;
  280. }
  281. /**
  282. * vxge_hw_device_intr_enable - Enable interrupts.
  283. * @hldev: HW device handle.
  284. * @op: One of the enum vxge_hw_device_intr enumerated values specifying
  285. * the type(s) of interrupts to enable.
  286. *
  287. * Enable Titan interrupts. The function is to be executed the last in
  288. * Titan initialization sequence.
  289. *
  290. * See also: vxge_hw_device_intr_disable()
  291. */
  292. void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
  293. {
  294. u32 i;
  295. u64 val64;
  296. u32 val32;
  297. vxge_hw_device_mask_all(hldev);
  298. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  299. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  300. continue;
  301. vxge_hw_vpath_intr_enable(
  302. VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
  303. }
  304. if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
  305. val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  306. hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
  307. if (val64 != 0) {
  308. writeq(val64, &hldev->common_reg->tim_int_status0);
  309. writeq(~val64, &hldev->common_reg->tim_int_mask0);
  310. }
  311. val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  312. hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
  313. if (val32 != 0) {
  314. __vxge_hw_pio_mem_write32_upper(val32,
  315. &hldev->common_reg->tim_int_status1);
  316. __vxge_hw_pio_mem_write32_upper(~val32,
  317. &hldev->common_reg->tim_int_mask1);
  318. }
  319. }
  320. val64 = readq(&hldev->common_reg->titan_general_int_status);
  321. vxge_hw_device_unmask_all(hldev);
  322. }
  323. /**
  324. * vxge_hw_device_intr_disable - Disable Titan interrupts.
  325. * @hldev: HW device handle.
  326. * @op: One of the enum vxge_hw_device_intr enumerated values specifying
  327. * the type(s) of interrupts to disable.
  328. *
  329. * Disable Titan interrupts.
  330. *
  331. * See also: vxge_hw_device_intr_enable()
  332. */
  333. void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
  334. {
  335. u32 i;
  336. vxge_hw_device_mask_all(hldev);
  337. /* mask all the tim interrupts */
  338. writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
  339. __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
  340. &hldev->common_reg->tim_int_mask1);
  341. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  342. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  343. continue;
  344. vxge_hw_vpath_intr_disable(
  345. VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
  346. }
  347. }
  348. /**
  349. * vxge_hw_device_mask_all - Mask all device interrupts.
  350. * @hldev: HW device handle.
  351. *
  352. * Mask all device interrupts.
  353. *
  354. * See also: vxge_hw_device_unmask_all()
  355. */
  356. void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
  357. {
  358. u64 val64;
  359. val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
  360. VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
  361. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  362. &hldev->common_reg->titan_mask_all_int);
  363. }
  364. /**
  365. * vxge_hw_device_unmask_all - Unmask all device interrupts.
  366. * @hldev: HW device handle.
  367. *
  368. * Unmask all device interrupts.
  369. *
  370. * See also: vxge_hw_device_mask_all()
  371. */
  372. void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
  373. {
  374. u64 val64 = 0;
  375. if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
  376. val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
  377. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  378. &hldev->common_reg->titan_mask_all_int);
  379. }
  380. /**
  381. * vxge_hw_device_flush_io - Flush io writes.
  382. * @hldev: HW device handle.
  383. *
  384. * The function performs a read operation to flush io writes.
  385. *
  386. * Returns: void
  387. */
  388. void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
  389. {
  390. u32 val32;
  391. val32 = readl(&hldev->common_reg->titan_general_int_status);
  392. }
  393. /**
  394. * __vxge_hw_device_handle_error - Handle error
  395. * @hldev: HW device
  396. * @vp_id: Vpath Id
  397. * @type: Error type. Please see enum vxge_hw_event{}
  398. *
  399. * Handle error.
  400. */
  401. static enum vxge_hw_status
  402. __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
  403. enum vxge_hw_event type)
  404. {
  405. switch (type) {
  406. case VXGE_HW_EVENT_UNKNOWN:
  407. break;
  408. case VXGE_HW_EVENT_RESET_START:
  409. case VXGE_HW_EVENT_RESET_COMPLETE:
  410. case VXGE_HW_EVENT_LINK_DOWN:
  411. case VXGE_HW_EVENT_LINK_UP:
  412. goto out;
  413. case VXGE_HW_EVENT_ALARM_CLEARED:
  414. goto out;
  415. case VXGE_HW_EVENT_ECCERR:
  416. case VXGE_HW_EVENT_MRPCIM_ECCERR:
  417. goto out;
  418. case VXGE_HW_EVENT_FIFO_ERR:
  419. case VXGE_HW_EVENT_VPATH_ERR:
  420. case VXGE_HW_EVENT_CRITICAL_ERR:
  421. case VXGE_HW_EVENT_SERR:
  422. break;
  423. case VXGE_HW_EVENT_SRPCIM_SERR:
  424. case VXGE_HW_EVENT_MRPCIM_SERR:
  425. goto out;
  426. case VXGE_HW_EVENT_SLOT_FREEZE:
  427. break;
  428. default:
  429. vxge_assert(0);
  430. goto out;
  431. }
  432. /* notify driver */
  433. if (hldev->uld_callbacks.crit_err)
  434. hldev->uld_callbacks.crit_err(
  435. (struct __vxge_hw_device *)hldev,
  436. type, vp_id);
  437. out:
  438. return VXGE_HW_OK;
  439. }
  440. /*
  441. * __vxge_hw_device_handle_link_down_ind
  442. * @hldev: HW device handle.
  443. *
  444. * Link down indication handler. The function is invoked by HW when
  445. * Titan indicates that the link is down.
  446. */
  447. static enum vxge_hw_status
  448. __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
  449. {
  450. /*
  451. * If the previous link state is not down, return.
  452. */
  453. if (hldev->link_state == VXGE_HW_LINK_DOWN)
  454. goto exit;
  455. hldev->link_state = VXGE_HW_LINK_DOWN;
  456. /* notify driver */
  457. if (hldev->uld_callbacks.link_down)
  458. hldev->uld_callbacks.link_down(hldev);
  459. exit:
  460. return VXGE_HW_OK;
  461. }
  462. /*
  463. * __vxge_hw_device_handle_link_up_ind
  464. * @hldev: HW device handle.
  465. *
  466. * Link up indication handler. The function is invoked by HW when
  467. * Titan indicates that the link is up for programmable amount of time.
  468. */
  469. static enum vxge_hw_status
  470. __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
  471. {
  472. /*
  473. * If the previous link state is not down, return.
  474. */
  475. if (hldev->link_state == VXGE_HW_LINK_UP)
  476. goto exit;
  477. hldev->link_state = VXGE_HW_LINK_UP;
  478. /* notify driver */
  479. if (hldev->uld_callbacks.link_up)
  480. hldev->uld_callbacks.link_up(hldev);
  481. exit:
  482. return VXGE_HW_OK;
  483. }
  484. /*
  485. * __vxge_hw_vpath_alarm_process - Process Alarms.
  486. * @vpath: Virtual Path.
  487. * @skip_alarms: Do not clear the alarms
  488. *
  489. * Process vpath alarms.
  490. *
  491. */
  492. static enum vxge_hw_status
  493. __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
  494. u32 skip_alarms)
  495. {
  496. u64 val64;
  497. u64 alarm_status;
  498. u64 pic_status;
  499. struct __vxge_hw_device *hldev = NULL;
  500. enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
  501. u64 mask64;
  502. struct vxge_hw_vpath_stats_sw_info *sw_stats;
  503. struct vxge_hw_vpath_reg __iomem *vp_reg;
  504. if (vpath == NULL) {
  505. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
  506. alarm_event);
  507. goto out2;
  508. }
  509. hldev = vpath->hldev;
  510. vp_reg = vpath->vp_reg;
  511. alarm_status = readq(&vp_reg->vpath_general_int_status);
  512. if (alarm_status == VXGE_HW_ALL_FOXES) {
  513. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
  514. alarm_event);
  515. goto out;
  516. }
  517. sw_stats = vpath->sw_stats;
  518. if (alarm_status & ~(
  519. VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
  520. VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
  521. VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
  522. VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
  523. sw_stats->error_stats.unknown_alarms++;
  524. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
  525. alarm_event);
  526. goto out;
  527. }
  528. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
  529. val64 = readq(&vp_reg->xgmac_vp_int_status);
  530. if (val64 &
  531. VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
  532. val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
  533. if (((val64 &
  534. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
  535. (!(val64 &
  536. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
  537. ((val64 &
  538. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
  539. (!(val64 &
  540. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
  541. ))) {
  542. sw_stats->error_stats.network_sustained_fault++;
  543. writeq(
  544. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
  545. &vp_reg->asic_ntwk_vp_err_mask);
  546. __vxge_hw_device_handle_link_down_ind(hldev);
  547. alarm_event = VXGE_HW_SET_LEVEL(
  548. VXGE_HW_EVENT_LINK_DOWN, alarm_event);
  549. }
  550. if (((val64 &
  551. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
  552. (!(val64 &
  553. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
  554. ((val64 &
  555. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
  556. (!(val64 &
  557. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
  558. ))) {
  559. sw_stats->error_stats.network_sustained_ok++;
  560. writeq(
  561. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
  562. &vp_reg->asic_ntwk_vp_err_mask);
  563. __vxge_hw_device_handle_link_up_ind(hldev);
  564. alarm_event = VXGE_HW_SET_LEVEL(
  565. VXGE_HW_EVENT_LINK_UP, alarm_event);
  566. }
  567. writeq(VXGE_HW_INTR_MASK_ALL,
  568. &vp_reg->asic_ntwk_vp_err_reg);
  569. alarm_event = VXGE_HW_SET_LEVEL(
  570. VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
  571. if (skip_alarms)
  572. return VXGE_HW_OK;
  573. }
  574. }
  575. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
  576. pic_status = readq(&vp_reg->vpath_ppif_int_status);
  577. if (pic_status &
  578. VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
  579. val64 = readq(&vp_reg->general_errors_reg);
  580. mask64 = readq(&vp_reg->general_errors_mask);
  581. if ((val64 &
  582. VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
  583. ~mask64) {
  584. sw_stats->error_stats.ini_serr_det++;
  585. alarm_event = VXGE_HW_SET_LEVEL(
  586. VXGE_HW_EVENT_SERR, alarm_event);
  587. }
  588. if ((val64 &
  589. VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
  590. ~mask64) {
  591. sw_stats->error_stats.dblgen_fifo0_overflow++;
  592. alarm_event = VXGE_HW_SET_LEVEL(
  593. VXGE_HW_EVENT_FIFO_ERR, alarm_event);
  594. }
  595. if ((val64 &
  596. VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
  597. ~mask64)
  598. sw_stats->error_stats.statsb_pif_chain_error++;
  599. if ((val64 &
  600. VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
  601. ~mask64)
  602. sw_stats->error_stats.statsb_drop_timeout++;
  603. if ((val64 &
  604. VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
  605. ~mask64)
  606. sw_stats->error_stats.target_illegal_access++;
  607. if (!skip_alarms) {
  608. writeq(VXGE_HW_INTR_MASK_ALL,
  609. &vp_reg->general_errors_reg);
  610. alarm_event = VXGE_HW_SET_LEVEL(
  611. VXGE_HW_EVENT_ALARM_CLEARED,
  612. alarm_event);
  613. }
  614. }
  615. if (pic_status &
  616. VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
  617. val64 = readq(&vp_reg->kdfcctl_errors_reg);
  618. mask64 = readq(&vp_reg->kdfcctl_errors_mask);
  619. if ((val64 &
  620. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
  621. ~mask64) {
  622. sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
  623. alarm_event = VXGE_HW_SET_LEVEL(
  624. VXGE_HW_EVENT_FIFO_ERR,
  625. alarm_event);
  626. }
  627. if ((val64 &
  628. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
  629. ~mask64) {
  630. sw_stats->error_stats.kdfcctl_fifo0_poison++;
  631. alarm_event = VXGE_HW_SET_LEVEL(
  632. VXGE_HW_EVENT_FIFO_ERR,
  633. alarm_event);
  634. }
  635. if ((val64 &
  636. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
  637. ~mask64) {
  638. sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
  639. alarm_event = VXGE_HW_SET_LEVEL(
  640. VXGE_HW_EVENT_FIFO_ERR,
  641. alarm_event);
  642. }
  643. if (!skip_alarms) {
  644. writeq(VXGE_HW_INTR_MASK_ALL,
  645. &vp_reg->kdfcctl_errors_reg);
  646. alarm_event = VXGE_HW_SET_LEVEL(
  647. VXGE_HW_EVENT_ALARM_CLEARED,
  648. alarm_event);
  649. }
  650. }
  651. }
  652. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
  653. val64 = readq(&vp_reg->wrdma_alarm_status);
  654. if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
  655. val64 = readq(&vp_reg->prc_alarm_reg);
  656. mask64 = readq(&vp_reg->prc_alarm_mask);
  657. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
  658. ~mask64)
  659. sw_stats->error_stats.prc_ring_bumps++;
  660. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
  661. ~mask64) {
  662. sw_stats->error_stats.prc_rxdcm_sc_err++;
  663. alarm_event = VXGE_HW_SET_LEVEL(
  664. VXGE_HW_EVENT_VPATH_ERR,
  665. alarm_event);
  666. }
  667. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
  668. & ~mask64) {
  669. sw_stats->error_stats.prc_rxdcm_sc_abort++;
  670. alarm_event = VXGE_HW_SET_LEVEL(
  671. VXGE_HW_EVENT_VPATH_ERR,
  672. alarm_event);
  673. }
  674. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
  675. & ~mask64) {
  676. sw_stats->error_stats.prc_quanta_size_err++;
  677. alarm_event = VXGE_HW_SET_LEVEL(
  678. VXGE_HW_EVENT_VPATH_ERR,
  679. alarm_event);
  680. }
  681. if (!skip_alarms) {
  682. writeq(VXGE_HW_INTR_MASK_ALL,
  683. &vp_reg->prc_alarm_reg);
  684. alarm_event = VXGE_HW_SET_LEVEL(
  685. VXGE_HW_EVENT_ALARM_CLEARED,
  686. alarm_event);
  687. }
  688. }
  689. }
  690. out:
  691. hldev->stats.sw_dev_err_stats.vpath_alarms++;
  692. out2:
  693. if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
  694. (alarm_event == VXGE_HW_EVENT_UNKNOWN))
  695. return VXGE_HW_OK;
  696. __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
  697. if (alarm_event == VXGE_HW_EVENT_SERR)
  698. return VXGE_HW_ERR_CRITICAL;
  699. return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
  700. VXGE_HW_ERR_SLOT_FREEZE :
  701. (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
  702. VXGE_HW_ERR_VPATH;
  703. }
  704. /**
  705. * vxge_hw_device_begin_irq - Begin IRQ processing.
  706. * @hldev: HW device handle.
  707. * @skip_alarms: Do not clear the alarms
  708. * @reason: "Reason" for the interrupt, the value of Titan's
  709. * general_int_status register.
  710. *
  711. * The function performs two actions, It first checks whether (shared IRQ) the
  712. * interrupt was raised by the device. Next, it masks the device interrupts.
  713. *
  714. * Note:
  715. * vxge_hw_device_begin_irq() does not flush MMIO writes through the
  716. * bridge. Therefore, two back-to-back interrupts are potentially possible.
  717. *
  718. * Returns: 0, if the interrupt is not "ours" (note that in this case the
  719. * device remain enabled).
  720. * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
  721. * status.
  722. */
  723. enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
  724. u32 skip_alarms, u64 *reason)
  725. {
  726. u32 i;
  727. u64 val64;
  728. u64 adapter_status;
  729. u64 vpath_mask;
  730. enum vxge_hw_status ret = VXGE_HW_OK;
  731. val64 = readq(&hldev->common_reg->titan_general_int_status);
  732. if (unlikely(!val64)) {
  733. /* not Titan interrupt */
  734. *reason = 0;
  735. ret = VXGE_HW_ERR_WRONG_IRQ;
  736. goto exit;
  737. }
  738. if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
  739. adapter_status = readq(&hldev->common_reg->adapter_status);
  740. if (adapter_status == VXGE_HW_ALL_FOXES) {
  741. __vxge_hw_device_handle_error(hldev,
  742. NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
  743. *reason = 0;
  744. ret = VXGE_HW_ERR_SLOT_FREEZE;
  745. goto exit;
  746. }
  747. }
  748. hldev->stats.sw_dev_info_stats.total_intr_cnt++;
  749. *reason = val64;
  750. vpath_mask = hldev->vpaths_deployed >>
  751. (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
  752. if (val64 &
  753. VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
  754. hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
  755. return VXGE_HW_OK;
  756. }
  757. hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
  758. if (unlikely(val64 &
  759. VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
  760. enum vxge_hw_status error_level = VXGE_HW_OK;
  761. hldev->stats.sw_dev_err_stats.vpath_alarms++;
  762. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  763. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  764. continue;
  765. ret = __vxge_hw_vpath_alarm_process(
  766. &hldev->virtual_paths[i], skip_alarms);
  767. error_level = VXGE_HW_SET_LEVEL(ret, error_level);
  768. if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
  769. (ret == VXGE_HW_ERR_SLOT_FREEZE)))
  770. break;
  771. }
  772. ret = error_level;
  773. }
  774. exit:
  775. return ret;
  776. }
  777. /**
  778. * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
  779. * condition that has caused the Tx and RX interrupt.
  780. * @hldev: HW device.
  781. *
  782. * Acknowledge (that is, clear) the condition that has caused
  783. * the Tx and Rx interrupt.
  784. * See also: vxge_hw_device_begin_irq(),
  785. * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
  786. */
  787. void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
  788. {
  789. if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  790. (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  791. writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  792. hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
  793. &hldev->common_reg->tim_int_status0);
  794. }
  795. if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  796. (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  797. __vxge_hw_pio_mem_write32_upper(
  798. (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  799. hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
  800. &hldev->common_reg->tim_int_status1);
  801. }
  802. }
  803. /*
  804. * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
  805. * @channel: Channel
  806. * @dtrh: Buffer to return the DTR pointer
  807. *
  808. * Allocates a dtr from the reserve array. If the reserve array is empty,
  809. * it swaps the reserve and free arrays.
  810. *
  811. */
  812. static enum vxge_hw_status
  813. vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
  814. {
  815. void **tmp_arr;
  816. if (channel->reserve_ptr - channel->reserve_top > 0) {
  817. _alloc_after_swap:
  818. *dtrh = channel->reserve_arr[--channel->reserve_ptr];
  819. return VXGE_HW_OK;
  820. }
  821. /* switch between empty and full arrays */
  822. /* the idea behind such a design is that by having free and reserved
  823. * arrays separated we basically separated irq and non-irq parts.
  824. * i.e. no additional lock need to be done when we free a resource */
  825. if (channel->length - channel->free_ptr > 0) {
  826. tmp_arr = channel->reserve_arr;
  827. channel->reserve_arr = channel->free_arr;
  828. channel->free_arr = tmp_arr;
  829. channel->reserve_ptr = channel->length;
  830. channel->reserve_top = channel->free_ptr;
  831. channel->free_ptr = channel->length;
  832. channel->stats->reserve_free_swaps_cnt++;
  833. goto _alloc_after_swap;
  834. }
  835. channel->stats->full_cnt++;
  836. *dtrh = NULL;
  837. return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
  838. }
  839. /*
  840. * vxge_hw_channel_dtr_post - Post a dtr to the channel
  841. * @channelh: Channel
  842. * @dtrh: DTR pointer
  843. *
  844. * Posts a dtr to work array.
  845. *
  846. */
  847. static void
  848. vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
  849. {
  850. vxge_assert(channel->work_arr[channel->post_index] == NULL);
  851. channel->work_arr[channel->post_index++] = dtrh;
  852. /* wrap-around */
  853. if (channel->post_index == channel->length)
  854. channel->post_index = 0;
  855. }
  856. /*
  857. * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
  858. * @channel: Channel
  859. * @dtr: Buffer to return the next completed DTR pointer
  860. *
  861. * Returns the next completed dtr with out removing it from work array
  862. *
  863. */
  864. void
  865. vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
  866. {
  867. vxge_assert(channel->compl_index < channel->length);
  868. *dtrh = channel->work_arr[channel->compl_index];
  869. prefetch(*dtrh);
  870. }
  871. /*
  872. * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
  873. * @channel: Channel handle
  874. *
  875. * Removes the next completed dtr from work array
  876. *
  877. */
  878. void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
  879. {
  880. channel->work_arr[channel->compl_index] = NULL;
  881. /* wrap-around */
  882. if (++channel->compl_index == channel->length)
  883. channel->compl_index = 0;
  884. channel->stats->total_compl_cnt++;
  885. }
  886. /*
  887. * vxge_hw_channel_dtr_free - Frees a dtr
  888. * @channel: Channel handle
  889. * @dtr: DTR pointer
  890. *
  891. * Returns the dtr to free array
  892. *
  893. */
  894. void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
  895. {
  896. channel->free_arr[--channel->free_ptr] = dtrh;
  897. }
  898. /*
  899. * vxge_hw_channel_dtr_count
  900. * @channel: Channel handle. Obtained via vxge_hw_channel_open().
  901. *
  902. * Retrieve number of DTRs available. This function can not be called
  903. * from data path. ring_initial_replenishi() is the only user.
  904. */
  905. int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
  906. {
  907. return (channel->reserve_ptr - channel->reserve_top) +
  908. (channel->length - channel->free_ptr);
  909. }
  910. /**
  911. * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
  912. * @ring: Handle to the ring object used for receive
  913. * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
  914. * with a valid handle.
  915. *
  916. * Reserve Rx descriptor for the subsequent filling-in driver
  917. * and posting on the corresponding channel (@channelh)
  918. * via vxge_hw_ring_rxd_post().
  919. *
  920. * Returns: VXGE_HW_OK - success.
  921. * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
  922. *
  923. */
  924. enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
  925. void **rxdh)
  926. {
  927. enum vxge_hw_status status;
  928. struct __vxge_hw_channel *channel;
  929. channel = &ring->channel;
  930. status = vxge_hw_channel_dtr_alloc(channel, rxdh);
  931. if (status == VXGE_HW_OK) {
  932. struct vxge_hw_ring_rxd_1 *rxdp =
  933. (struct vxge_hw_ring_rxd_1 *)*rxdh;
  934. rxdp->control_0 = rxdp->control_1 = 0;
  935. }
  936. return status;
  937. }
  938. /**
  939. * vxge_hw_ring_rxd_free - Free descriptor.
  940. * @ring: Handle to the ring object used for receive
  941. * @rxdh: Descriptor handle.
  942. *
  943. * Free the reserved descriptor. This operation is "symmetrical" to
  944. * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
  945. * lifecycle.
  946. *
  947. * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
  948. * be:
  949. *
  950. * - reserved (vxge_hw_ring_rxd_reserve);
  951. *
  952. * - posted (vxge_hw_ring_rxd_post);
  953. *
  954. * - completed (vxge_hw_ring_rxd_next_completed);
  955. *
  956. * - and recycled again (vxge_hw_ring_rxd_free).
  957. *
  958. * For alternative state transitions and more details please refer to
  959. * the design doc.
  960. *
  961. */
  962. void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
  963. {
  964. struct __vxge_hw_channel *channel;
  965. channel = &ring->channel;
  966. vxge_hw_channel_dtr_free(channel, rxdh);
  967. }
  968. /**
  969. * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
  970. * @ring: Handle to the ring object used for receive
  971. * @rxdh: Descriptor handle.
  972. *
  973. * This routine prepares a rxd and posts
  974. */
  975. void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
  976. {
  977. struct __vxge_hw_channel *channel;
  978. channel = &ring->channel;
  979. vxge_hw_channel_dtr_post(channel, rxdh);
  980. }
  981. /**
  982. * vxge_hw_ring_rxd_post_post - Process rxd after post.
  983. * @ring: Handle to the ring object used for receive
  984. * @rxdh: Descriptor handle.
  985. *
  986. * Processes rxd after post
  987. */
  988. void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
  989. {
  990. struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
  991. struct __vxge_hw_channel *channel;
  992. channel = &ring->channel;
  993. rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  994. if (ring->stats->common_stats.usage_cnt > 0)
  995. ring->stats->common_stats.usage_cnt--;
  996. }
  997. /**
  998. * vxge_hw_ring_rxd_post - Post descriptor on the ring.
  999. * @ring: Handle to the ring object used for receive
  1000. * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
  1001. *
  1002. * Post descriptor on the ring.
  1003. * Prior to posting the descriptor should be filled in accordance with
  1004. * Host/Titan interface specification for a given service (LL, etc.).
  1005. *
  1006. */
  1007. void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
  1008. {
  1009. struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
  1010. struct __vxge_hw_channel *channel;
  1011. channel = &ring->channel;
  1012. wmb();
  1013. rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  1014. vxge_hw_channel_dtr_post(channel, rxdh);
  1015. if (ring->stats->common_stats.usage_cnt > 0)
  1016. ring->stats->common_stats.usage_cnt--;
  1017. }
  1018. /**
  1019. * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
  1020. * @ring: Handle to the ring object used for receive
  1021. * @rxdh: Descriptor handle.
  1022. *
  1023. * Processes rxd after post with memory barrier.
  1024. */
  1025. void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
  1026. {
  1027. wmb();
  1028. vxge_hw_ring_rxd_post_post(ring, rxdh);
  1029. }
  1030. /**
  1031. * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
  1032. * @ring: Handle to the ring object used for receive
  1033. * @rxdh: Descriptor handle. Returned by HW.
  1034. * @t_code: Transfer code, as per Titan User Guide,
  1035. * Receive Descriptor Format. Returned by HW.
  1036. *
  1037. * Retrieve the _next_ completed descriptor.
  1038. * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
  1039. * driver of new completed descriptors. After that
  1040. * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
  1041. * completions (the very first completion is passed by HW via
  1042. * vxge_hw_ring_callback_f).
  1043. *
  1044. * Implementation-wise, the driver is free to call
  1045. * vxge_hw_ring_rxd_next_completed either immediately from inside the
  1046. * ring callback, or in a deferred fashion and separate (from HW)
  1047. * context.
  1048. *
  1049. * Non-zero @t_code means failure to fill-in receive buffer(s)
  1050. * of the descriptor.
  1051. * For instance, parity error detected during the data transfer.
  1052. * In this case Titan will complete the descriptor and indicate
  1053. * for the host that the received data is not to be used.
  1054. * For details please refer to Titan User Guide.
  1055. *
  1056. * Returns: VXGE_HW_OK - success.
  1057. * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
  1058. * are currently available for processing.
  1059. *
  1060. * See also: vxge_hw_ring_callback_f{},
  1061. * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
  1062. */
  1063. enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
  1064. struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
  1065. {
  1066. struct __vxge_hw_channel *channel;
  1067. struct vxge_hw_ring_rxd_1 *rxdp;
  1068. enum vxge_hw_status status = VXGE_HW_OK;
  1069. u64 control_0, own;
  1070. channel = &ring->channel;
  1071. vxge_hw_channel_dtr_try_complete(channel, rxdh);
  1072. rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
  1073. if (rxdp == NULL) {
  1074. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1075. goto exit;
  1076. }
  1077. control_0 = rxdp->control_0;
  1078. own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  1079. *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
  1080. /* check whether it is not the end */
  1081. if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
  1082. vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
  1083. 0);
  1084. ++ring->cmpl_cnt;
  1085. vxge_hw_channel_dtr_complete(channel);
  1086. vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
  1087. ring->stats->common_stats.usage_cnt++;
  1088. if (ring->stats->common_stats.usage_max <
  1089. ring->stats->common_stats.usage_cnt)
  1090. ring->stats->common_stats.usage_max =
  1091. ring->stats->common_stats.usage_cnt;
  1092. status = VXGE_HW_OK;
  1093. goto exit;
  1094. }
  1095. /* reset it. since we don't want to return
  1096. * garbage to the driver */
  1097. *rxdh = NULL;
  1098. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1099. exit:
  1100. return status;
  1101. }
  1102. /**
  1103. * vxge_hw_ring_handle_tcode - Handle transfer code.
  1104. * @ring: Handle to the ring object used for receive
  1105. * @rxdh: Descriptor handle.
  1106. * @t_code: One of the enumerated (and documented in the Titan user guide)
  1107. * "transfer codes".
  1108. *
  1109. * Handle descriptor's transfer code. The latter comes with each completed
  1110. * descriptor.
  1111. *
  1112. * Returns: one of the enum vxge_hw_status{} enumerated types.
  1113. * VXGE_HW_OK - for success.
  1114. * VXGE_HW_ERR_CRITICAL - when encounters critical error.
  1115. */
  1116. enum vxge_hw_status vxge_hw_ring_handle_tcode(
  1117. struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
  1118. {
  1119. struct __vxge_hw_channel *channel;
  1120. enum vxge_hw_status status = VXGE_HW_OK;
  1121. channel = &ring->channel;
  1122. /* If the t_code is not supported and if the
  1123. * t_code is other than 0x5 (unparseable packet
  1124. * such as unknown UPV6 header), Drop it !!!
  1125. */
  1126. if (t_code == VXGE_HW_RING_T_CODE_OK ||
  1127. t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
  1128. status = VXGE_HW_OK;
  1129. goto exit;
  1130. }
  1131. if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
  1132. status = VXGE_HW_ERR_INVALID_TCODE;
  1133. goto exit;
  1134. }
  1135. ring->stats->rxd_t_code_err_cnt[t_code]++;
  1136. exit:
  1137. return status;
  1138. }
  1139. /**
  1140. * __vxge_hw_non_offload_db_post - Post non offload doorbell
  1141. *
  1142. * @fifo: fifohandle
  1143. * @txdl_ptr: The starting location of the TxDL in host memory
  1144. * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
  1145. * @no_snoop: No snoop flags
  1146. *
  1147. * This function posts a non-offload doorbell to doorbell FIFO
  1148. *
  1149. */
  1150. static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
  1151. u64 txdl_ptr, u32 num_txds, u32 no_snoop)
  1152. {
  1153. struct __vxge_hw_channel *channel;
  1154. channel = &fifo->channel;
  1155. writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
  1156. VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
  1157. VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
  1158. &fifo->nofl_db->control_0);
  1159. mmiowb();
  1160. writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
  1161. mmiowb();
  1162. }
  1163. /**
  1164. * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
  1165. * the fifo
  1166. * @fifoh: Handle to the fifo object used for non offload send
  1167. */
  1168. u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
  1169. {
  1170. return vxge_hw_channel_dtr_count(&fifoh->channel);
  1171. }
  1172. /**
  1173. * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
  1174. * @fifoh: Handle to the fifo object used for non offload send
  1175. * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
  1176. * with a valid handle.
  1177. * @txdl_priv: Buffer to return the pointer to per txdl space
  1178. *
  1179. * Reserve a single TxDL (that is, fifo descriptor)
  1180. * for the subsequent filling-in by driver)
  1181. * and posting on the corresponding channel (@channelh)
  1182. * via vxge_hw_fifo_txdl_post().
  1183. *
  1184. * Note: it is the responsibility of driver to reserve multiple descriptors
  1185. * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
  1186. * carries up to configured number (fifo.max_frags) of contiguous buffers.
  1187. *
  1188. * Returns: VXGE_HW_OK - success;
  1189. * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
  1190. *
  1191. */
  1192. enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
  1193. struct __vxge_hw_fifo *fifo,
  1194. void **txdlh, void **txdl_priv)
  1195. {
  1196. struct __vxge_hw_channel *channel;
  1197. enum vxge_hw_status status;
  1198. int i;
  1199. channel = &fifo->channel;
  1200. status = vxge_hw_channel_dtr_alloc(channel, txdlh);
  1201. if (status == VXGE_HW_OK) {
  1202. struct vxge_hw_fifo_txd *txdp =
  1203. (struct vxge_hw_fifo_txd *)*txdlh;
  1204. struct __vxge_hw_fifo_txdl_priv *priv;
  1205. priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
  1206. /* reset the TxDL's private */
  1207. priv->align_dma_offset = 0;
  1208. priv->align_vaddr_start = priv->align_vaddr;
  1209. priv->align_used_frags = 0;
  1210. priv->frags = 0;
  1211. priv->alloc_frags = fifo->config->max_frags;
  1212. priv->next_txdl_priv = NULL;
  1213. *txdl_priv = (void *)(size_t)txdp->host_control;
  1214. for (i = 0; i < fifo->config->max_frags; i++) {
  1215. txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
  1216. txdp->control_0 = txdp->control_1 = 0;
  1217. }
  1218. }
  1219. return status;
  1220. }
  1221. /**
  1222. * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
  1223. * descriptor.
  1224. * @fifo: Handle to the fifo object used for non offload send
  1225. * @txdlh: Descriptor handle.
  1226. * @frag_idx: Index of the data buffer in the caller's scatter-gather list
  1227. * (of buffers).
  1228. * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
  1229. * @size: Size of the data buffer (in bytes).
  1230. *
  1231. * This API is part of the preparation of the transmit descriptor for posting
  1232. * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
  1233. * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
  1234. * All three APIs fill in the fields of the fifo descriptor,
  1235. * in accordance with the Titan specification.
  1236. *
  1237. */
  1238. void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
  1239. void *txdlh, u32 frag_idx,
  1240. dma_addr_t dma_pointer, u32 size)
  1241. {
  1242. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1243. struct vxge_hw_fifo_txd *txdp, *txdp_last;
  1244. struct __vxge_hw_channel *channel;
  1245. channel = &fifo->channel;
  1246. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
  1247. txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
  1248. if (frag_idx != 0)
  1249. txdp->control_0 = txdp->control_1 = 0;
  1250. else {
  1251. txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
  1252. VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
  1253. txdp->control_1 |= fifo->interrupt_type;
  1254. txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
  1255. fifo->tx_intr_num);
  1256. if (txdl_priv->frags) {
  1257. txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
  1258. (txdl_priv->frags - 1);
  1259. txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
  1260. VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
  1261. }
  1262. }
  1263. vxge_assert(frag_idx < txdl_priv->alloc_frags);
  1264. txdp->buffer_pointer = (u64)dma_pointer;
  1265. txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
  1266. fifo->stats->total_buffers++;
  1267. txdl_priv->frags++;
  1268. }
  1269. /**
  1270. * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
  1271. * @fifo: Handle to the fifo object used for non offload send
  1272. * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
  1273. * @frags: Number of contiguous buffers that are part of a single
  1274. * transmit operation.
  1275. *
  1276. * Post descriptor on the 'fifo' type channel for transmission.
  1277. * Prior to posting the descriptor should be filled in accordance with
  1278. * Host/Titan interface specification for a given service (LL, etc.).
  1279. *
  1280. */
  1281. void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
  1282. {
  1283. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1284. struct vxge_hw_fifo_txd *txdp_last;
  1285. struct vxge_hw_fifo_txd *txdp_first;
  1286. struct __vxge_hw_channel *channel;
  1287. channel = &fifo->channel;
  1288. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
  1289. txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
  1290. txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
  1291. txdp_last->control_0 |=
  1292. VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
  1293. txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
  1294. vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
  1295. __vxge_hw_non_offload_db_post(fifo,
  1296. (u64)txdl_priv->dma_addr,
  1297. txdl_priv->frags - 1,
  1298. fifo->no_snoop_bits);
  1299. fifo->stats->total_posts++;
  1300. fifo->stats->common_stats.usage_cnt++;
  1301. if (fifo->stats->common_stats.usage_max <
  1302. fifo->stats->common_stats.usage_cnt)
  1303. fifo->stats->common_stats.usage_max =
  1304. fifo->stats->common_stats.usage_cnt;
  1305. }
  1306. /**
  1307. * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
  1308. * @fifo: Handle to the fifo object used for non offload send
  1309. * @txdlh: Descriptor handle. Returned by HW.
  1310. * @t_code: Transfer code, as per Titan User Guide,
  1311. * Transmit Descriptor Format.
  1312. * Returned by HW.
  1313. *
  1314. * Retrieve the _next_ completed descriptor.
  1315. * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
  1316. * driver of new completed descriptors. After that
  1317. * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
  1318. * completions (the very first completion is passed by HW via
  1319. * vxge_hw_channel_callback_f).
  1320. *
  1321. * Implementation-wise, the driver is free to call
  1322. * vxge_hw_fifo_txdl_next_completed either immediately from inside the
  1323. * channel callback, or in a deferred fashion and separate (from HW)
  1324. * context.
  1325. *
  1326. * Non-zero @t_code means failure to process the descriptor.
  1327. * The failure could happen, for instance, when the link is
  1328. * down, in which case Titan completes the descriptor because it
  1329. * is not able to send the data out.
  1330. *
  1331. * For details please refer to Titan User Guide.
  1332. *
  1333. * Returns: VXGE_HW_OK - success.
  1334. * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
  1335. * are currently available for processing.
  1336. *
  1337. */
  1338. enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
  1339. struct __vxge_hw_fifo *fifo, void **txdlh,
  1340. enum vxge_hw_fifo_tcode *t_code)
  1341. {
  1342. struct __vxge_hw_channel *channel;
  1343. struct vxge_hw_fifo_txd *txdp;
  1344. enum vxge_hw_status status = VXGE_HW_OK;
  1345. channel = &fifo->channel;
  1346. vxge_hw_channel_dtr_try_complete(channel, txdlh);
  1347. txdp = (struct vxge_hw_fifo_txd *)*txdlh;
  1348. if (txdp == NULL) {
  1349. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1350. goto exit;
  1351. }
  1352. /* check whether host owns it */
  1353. if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
  1354. vxge_assert(txdp->host_control != 0);
  1355. vxge_hw_channel_dtr_complete(channel);
  1356. *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
  1357. if (fifo->stats->common_stats.usage_cnt > 0)
  1358. fifo->stats->common_stats.usage_cnt--;
  1359. status = VXGE_HW_OK;
  1360. goto exit;
  1361. }
  1362. /* no more completions */
  1363. *txdlh = NULL;
  1364. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1365. exit:
  1366. return status;
  1367. }
  1368. /**
  1369. * vxge_hw_fifo_handle_tcode - Handle transfer code.
  1370. * @fifo: Handle to the fifo object used for non offload send
  1371. * @txdlh: Descriptor handle.
  1372. * @t_code: One of the enumerated (and documented in the Titan user guide)
  1373. * "transfer codes".
  1374. *
  1375. * Handle descriptor's transfer code. The latter comes with each completed
  1376. * descriptor.
  1377. *
  1378. * Returns: one of the enum vxge_hw_status{} enumerated types.
  1379. * VXGE_HW_OK - for success.
  1380. * VXGE_HW_ERR_CRITICAL - when encounters critical error.
  1381. */
  1382. enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
  1383. void *txdlh,
  1384. enum vxge_hw_fifo_tcode t_code)
  1385. {
  1386. struct __vxge_hw_channel *channel;
  1387. enum vxge_hw_status status = VXGE_HW_OK;
  1388. channel = &fifo->channel;
  1389. if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
  1390. status = VXGE_HW_ERR_INVALID_TCODE;
  1391. goto exit;
  1392. }
  1393. fifo->stats->txd_t_code_err_cnt[t_code]++;
  1394. exit:
  1395. return status;
  1396. }
  1397. /**
  1398. * vxge_hw_fifo_txdl_free - Free descriptor.
  1399. * @fifo: Handle to the fifo object used for non offload send
  1400. * @txdlh: Descriptor handle.
  1401. *
  1402. * Free the reserved descriptor. This operation is "symmetrical" to
  1403. * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
  1404. * lifecycle.
  1405. *
  1406. * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
  1407. * be:
  1408. *
  1409. * - reserved (vxge_hw_fifo_txdl_reserve);
  1410. *
  1411. * - posted (vxge_hw_fifo_txdl_post);
  1412. *
  1413. * - completed (vxge_hw_fifo_txdl_next_completed);
  1414. *
  1415. * - and recycled again (vxge_hw_fifo_txdl_free).
  1416. *
  1417. * For alternative state transitions and more details please refer to
  1418. * the design doc.
  1419. *
  1420. */
  1421. void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
  1422. {
  1423. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1424. u32 max_frags;
  1425. struct __vxge_hw_channel *channel;
  1426. channel = &fifo->channel;
  1427. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
  1428. (struct vxge_hw_fifo_txd *)txdlh);
  1429. max_frags = fifo->config->max_frags;
  1430. vxge_hw_channel_dtr_free(channel, txdlh);
  1431. }
  1432. /**
  1433. * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
  1434. * to MAC address table.
  1435. * @vp: Vpath handle.
  1436. * @macaddr: MAC address to be added for this vpath into the list
  1437. * @macaddr_mask: MAC address mask for macaddr
  1438. * @duplicate_mode: Duplicate MAC address add mode. Please see
  1439. * enum vxge_hw_vpath_mac_addr_add_mode{}
  1440. *
  1441. * Adds the given mac address and mac address mask into the list for this
  1442. * vpath.
  1443. * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
  1444. * vxge_hw_vpath_mac_addr_get_next
  1445. *
  1446. */
  1447. enum vxge_hw_status
  1448. vxge_hw_vpath_mac_addr_add(
  1449. struct __vxge_hw_vpath_handle *vp,
  1450. u8 (macaddr)[ETH_ALEN],
  1451. u8 (macaddr_mask)[ETH_ALEN],
  1452. enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
  1453. {
  1454. u32 i;
  1455. u64 data1 = 0ULL;
  1456. u64 data2 = 0ULL;
  1457. enum vxge_hw_status status = VXGE_HW_OK;
  1458. if (vp == NULL) {
  1459. status = VXGE_HW_ERR_INVALID_HANDLE;
  1460. goto exit;
  1461. }
  1462. for (i = 0; i < ETH_ALEN; i++) {
  1463. data1 <<= 8;
  1464. data1 |= (u8)macaddr[i];
  1465. data2 <<= 8;
  1466. data2 |= (u8)macaddr_mask[i];
  1467. }
  1468. switch (duplicate_mode) {
  1469. case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
  1470. i = 0;
  1471. break;
  1472. case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
  1473. i = 1;
  1474. break;
  1475. case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
  1476. i = 2;
  1477. break;
  1478. default:
  1479. i = 0;
  1480. break;
  1481. }
  1482. status = __vxge_hw_vpath_rts_table_set(vp,
  1483. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
  1484. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1485. 0,
  1486. VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
  1487. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
  1488. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
  1489. exit:
  1490. return status;
  1491. }
  1492. /**
  1493. * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
  1494. * from MAC address table.
  1495. * @vp: Vpath handle.
  1496. * @macaddr: First MAC address entry for this vpath in the list
  1497. * @macaddr_mask: MAC address mask for macaddr
  1498. *
  1499. * Returns the first mac address and mac address mask in the list for this
  1500. * vpath.
  1501. * see also: vxge_hw_vpath_mac_addr_get_next
  1502. *
  1503. */
  1504. enum vxge_hw_status
  1505. vxge_hw_vpath_mac_addr_get(
  1506. struct __vxge_hw_vpath_handle *vp,
  1507. u8 (macaddr)[ETH_ALEN],
  1508. u8 (macaddr_mask)[ETH_ALEN])
  1509. {
  1510. u32 i;
  1511. u64 data1 = 0ULL;
  1512. u64 data2 = 0ULL;
  1513. enum vxge_hw_status status = VXGE_HW_OK;
  1514. if (vp == NULL) {
  1515. status = VXGE_HW_ERR_INVALID_HANDLE;
  1516. goto exit;
  1517. }
  1518. status = __vxge_hw_vpath_rts_table_get(vp,
  1519. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
  1520. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1521. 0, &data1, &data2);
  1522. if (status != VXGE_HW_OK)
  1523. goto exit;
  1524. data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
  1525. data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
  1526. for (i = ETH_ALEN; i > 0; i--) {
  1527. macaddr[i-1] = (u8)(data1 & 0xFF);
  1528. data1 >>= 8;
  1529. macaddr_mask[i-1] = (u8)(data2 & 0xFF);
  1530. data2 >>= 8;
  1531. }
  1532. exit:
  1533. return status;
  1534. }
  1535. /**
  1536. * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
  1537. * vpath
  1538. * from MAC address table.
  1539. * @vp: Vpath handle.
  1540. * @macaddr: Next MAC address entry for this vpath in the list
  1541. * @macaddr_mask: MAC address mask for macaddr
  1542. *
  1543. * Returns the next mac address and mac address mask in the list for this
  1544. * vpath.
  1545. * see also: vxge_hw_vpath_mac_addr_get
  1546. *
  1547. */
  1548. enum vxge_hw_status
  1549. vxge_hw_vpath_mac_addr_get_next(
  1550. struct __vxge_hw_vpath_handle *vp,
  1551. u8 (macaddr)[ETH_ALEN],
  1552. u8 (macaddr_mask)[ETH_ALEN])
  1553. {
  1554. u32 i;
  1555. u64 data1 = 0ULL;
  1556. u64 data2 = 0ULL;
  1557. enum vxge_hw_status status = VXGE_HW_OK;
  1558. if (vp == NULL) {
  1559. status = VXGE_HW_ERR_INVALID_HANDLE;
  1560. goto exit;
  1561. }
  1562. status = __vxge_hw_vpath_rts_table_get(vp,
  1563. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
  1564. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1565. 0, &data1, &data2);
  1566. if (status != VXGE_HW_OK)
  1567. goto exit;
  1568. data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
  1569. data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
  1570. for (i = ETH_ALEN; i > 0; i--) {
  1571. macaddr[i-1] = (u8)(data1 & 0xFF);
  1572. data1 >>= 8;
  1573. macaddr_mask[i-1] = (u8)(data2 & 0xFF);
  1574. data2 >>= 8;
  1575. }
  1576. exit:
  1577. return status;
  1578. }
  1579. /**
  1580. * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
  1581. * to MAC address table.
  1582. * @vp: Vpath handle.
  1583. * @macaddr: MAC address to be added for this vpath into the list
  1584. * @macaddr_mask: MAC address mask for macaddr
  1585. *
  1586. * Delete the given mac address and mac address mask into the list for this
  1587. * vpath.
  1588. * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
  1589. * vxge_hw_vpath_mac_addr_get_next
  1590. *
  1591. */
  1592. enum vxge_hw_status
  1593. vxge_hw_vpath_mac_addr_delete(
  1594. struct __vxge_hw_vpath_handle *vp,
  1595. u8 (macaddr)[ETH_ALEN],
  1596. u8 (macaddr_mask)[ETH_ALEN])
  1597. {
  1598. u32 i;
  1599. u64 data1 = 0ULL;
  1600. u64 data2 = 0ULL;
  1601. enum vxge_hw_status status = VXGE_HW_OK;
  1602. if (vp == NULL) {
  1603. status = VXGE_HW_ERR_INVALID_HANDLE;
  1604. goto exit;
  1605. }
  1606. for (i = 0; i < ETH_ALEN; i++) {
  1607. data1 <<= 8;
  1608. data1 |= (u8)macaddr[i];
  1609. data2 <<= 8;
  1610. data2 |= (u8)macaddr_mask[i];
  1611. }
  1612. status = __vxge_hw_vpath_rts_table_set(vp,
  1613. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
  1614. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1615. 0,
  1616. VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
  1617. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
  1618. exit:
  1619. return status;
  1620. }
  1621. /**
  1622. * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
  1623. * to vlan id table.
  1624. * @vp: Vpath handle.
  1625. * @vid: vlan id to be added for this vpath into the list
  1626. *
  1627. * Adds the given vlan id into the list for this vpath.
  1628. * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
  1629. * vxge_hw_vpath_vid_get_next
  1630. *
  1631. */
  1632. enum vxge_hw_status
  1633. vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
  1634. {
  1635. enum vxge_hw_status status = VXGE_HW_OK;
  1636. if (vp == NULL) {
  1637. status = VXGE_HW_ERR_INVALID_HANDLE;
  1638. goto exit;
  1639. }
  1640. status = __vxge_hw_vpath_rts_table_set(vp,
  1641. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
  1642. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1643. 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
  1644. exit:
  1645. return status;
  1646. }
  1647. /**
  1648. * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
  1649. * from vlan id table.
  1650. * @vp: Vpath handle.
  1651. * @vid: Buffer to return vlan id
  1652. *
  1653. * Returns the first vlan id in the list for this vpath.
  1654. * see also: vxge_hw_vpath_vid_get_next
  1655. *
  1656. */
  1657. enum vxge_hw_status
  1658. vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
  1659. {
  1660. u64 data;
  1661. enum vxge_hw_status status = VXGE_HW_OK;
  1662. if (vp == NULL) {
  1663. status = VXGE_HW_ERR_INVALID_HANDLE;
  1664. goto exit;
  1665. }
  1666. status = __vxge_hw_vpath_rts_table_get(vp,
  1667. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
  1668. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1669. 0, vid, &data);
  1670. *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
  1671. exit:
  1672. return status;
  1673. }
  1674. /**
  1675. * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
  1676. * to vlan id table.
  1677. * @vp: Vpath handle.
  1678. * @vid: vlan id to be added for this vpath into the list
  1679. *
  1680. * Adds the given vlan id into the list for this vpath.
  1681. * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
  1682. * vxge_hw_vpath_vid_get_next
  1683. *
  1684. */
  1685. enum vxge_hw_status
  1686. vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
  1687. {
  1688. enum vxge_hw_status status = VXGE_HW_OK;
  1689. if (vp == NULL) {
  1690. status = VXGE_HW_ERR_INVALID_HANDLE;
  1691. goto exit;
  1692. }
  1693. status = __vxge_hw_vpath_rts_table_set(vp,
  1694. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
  1695. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1696. 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
  1697. exit:
  1698. return status;
  1699. }
  1700. /**
  1701. * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
  1702. * @vp: Vpath handle.
  1703. *
  1704. * Enable promiscuous mode of Titan-e operation.
  1705. *
  1706. * See also: vxge_hw_vpath_promisc_disable().
  1707. */
  1708. enum vxge_hw_status vxge_hw_vpath_promisc_enable(
  1709. struct __vxge_hw_vpath_handle *vp)
  1710. {
  1711. u64 val64;
  1712. struct __vxge_hw_virtualpath *vpath;
  1713. enum vxge_hw_status status = VXGE_HW_OK;
  1714. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1715. status = VXGE_HW_ERR_INVALID_HANDLE;
  1716. goto exit;
  1717. }
  1718. vpath = vp->vpath;
  1719. /* Enable promiscuous mode for function 0 only */
  1720. if (!(vpath->hldev->access_rights &
  1721. VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
  1722. return VXGE_HW_OK;
  1723. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1724. if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
  1725. val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
  1726. VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
  1727. VXGE_HW_RXMAC_VCFG0_BCAST_EN |
  1728. VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
  1729. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1730. }
  1731. exit:
  1732. return status;
  1733. }
  1734. /**
  1735. * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
  1736. * @vp: Vpath handle.
  1737. *
  1738. * Disable promiscuous mode of Titan-e operation.
  1739. *
  1740. * See also: vxge_hw_vpath_promisc_enable().
  1741. */
  1742. enum vxge_hw_status vxge_hw_vpath_promisc_disable(
  1743. struct __vxge_hw_vpath_handle *vp)
  1744. {
  1745. u64 val64;
  1746. struct __vxge_hw_virtualpath *vpath;
  1747. enum vxge_hw_status status = VXGE_HW_OK;
  1748. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1749. status = VXGE_HW_ERR_INVALID_HANDLE;
  1750. goto exit;
  1751. }
  1752. vpath = vp->vpath;
  1753. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1754. if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
  1755. val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
  1756. VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
  1757. VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
  1758. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1759. }
  1760. exit:
  1761. return status;
  1762. }
  1763. /*
  1764. * vxge_hw_vpath_bcast_enable - Enable broadcast
  1765. * @vp: Vpath handle.
  1766. *
  1767. * Enable receiving broadcasts.
  1768. */
  1769. enum vxge_hw_status vxge_hw_vpath_bcast_enable(
  1770. struct __vxge_hw_vpath_handle *vp)
  1771. {
  1772. u64 val64;
  1773. struct __vxge_hw_virtualpath *vpath;
  1774. enum vxge_hw_status status = VXGE_HW_OK;
  1775. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1776. status = VXGE_HW_ERR_INVALID_HANDLE;
  1777. goto exit;
  1778. }
  1779. vpath = vp->vpath;
  1780. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1781. if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
  1782. val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
  1783. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1784. }
  1785. exit:
  1786. return status;
  1787. }
  1788. /**
  1789. * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
  1790. * @vp: Vpath handle.
  1791. *
  1792. * Enable Titan-e multicast addresses.
  1793. * Returns: VXGE_HW_OK on success.
  1794. *
  1795. */
  1796. enum vxge_hw_status vxge_hw_vpath_mcast_enable(
  1797. struct __vxge_hw_vpath_handle *vp)
  1798. {
  1799. u64 val64;
  1800. struct __vxge_hw_virtualpath *vpath;
  1801. enum vxge_hw_status status = VXGE_HW_OK;
  1802. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1803. status = VXGE_HW_ERR_INVALID_HANDLE;
  1804. goto exit;
  1805. }
  1806. vpath = vp->vpath;
  1807. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1808. if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
  1809. val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
  1810. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1811. }
  1812. exit:
  1813. return status;
  1814. }
  1815. /**
  1816. * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
  1817. * @vp: Vpath handle.
  1818. *
  1819. * Disable Titan-e multicast addresses.
  1820. * Returns: VXGE_HW_OK - success.
  1821. * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
  1822. *
  1823. */
  1824. enum vxge_hw_status
  1825. vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
  1826. {
  1827. u64 val64;
  1828. struct __vxge_hw_virtualpath *vpath;
  1829. enum vxge_hw_status status = VXGE_HW_OK;
  1830. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1831. status = VXGE_HW_ERR_INVALID_HANDLE;
  1832. goto exit;
  1833. }
  1834. vpath = vp->vpath;
  1835. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1836. if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
  1837. val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
  1838. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1839. }
  1840. exit:
  1841. return status;
  1842. }
  1843. /*
  1844. * vxge_hw_vpath_alarm_process - Process Alarms.
  1845. * @vpath: Virtual Path.
  1846. * @skip_alarms: Do not clear the alarms
  1847. *
  1848. * Process vpath alarms.
  1849. *
  1850. */
  1851. enum vxge_hw_status vxge_hw_vpath_alarm_process(
  1852. struct __vxge_hw_vpath_handle *vp,
  1853. u32 skip_alarms)
  1854. {
  1855. enum vxge_hw_status status = VXGE_HW_OK;
  1856. if (vp == NULL) {
  1857. status = VXGE_HW_ERR_INVALID_HANDLE;
  1858. goto exit;
  1859. }
  1860. status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
  1861. exit:
  1862. return status;
  1863. }
  1864. /**
  1865. * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
  1866. * alrms
  1867. * @vp: Virtual Path handle.
  1868. * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
  1869. * interrupts(Can be repeated). If fifo or ring are not enabled
  1870. * the MSIX vector for that should be set to 0
  1871. * @alarm_msix_id: MSIX vector for alarm.
  1872. *
  1873. * This API will associate a given MSIX vector numbers with the four TIM
  1874. * interrupts and alarm interrupt.
  1875. */
  1876. void
  1877. vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
  1878. int alarm_msix_id)
  1879. {
  1880. u64 val64;
  1881. struct __vxge_hw_virtualpath *vpath = vp->vpath;
  1882. struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
  1883. u32 vp_id = vp->vpath->vp_id;
  1884. val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
  1885. (vp_id * 4) + tim_msix_id[0]) |
  1886. VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
  1887. (vp_id * 4) + tim_msix_id[1]);
  1888. writeq(val64, &vp_reg->interrupt_cfg0);
  1889. writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
  1890. (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
  1891. &vp_reg->interrupt_cfg2);
  1892. if (vpath->hldev->config.intr_mode ==
  1893. VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
  1894. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1895. VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
  1896. 0, 32), &vp_reg->one_shot_vect0_en);
  1897. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1898. VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
  1899. 0, 32), &vp_reg->one_shot_vect1_en);
  1900. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1901. VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
  1902. 0, 32), &vp_reg->one_shot_vect2_en);
  1903. }
  1904. }
  1905. /**
  1906. * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
  1907. * @vp: Virtual Path handle.
  1908. * @msix_id: MSIX ID
  1909. *
  1910. * The function masks the msix interrupt for the given msix_id
  1911. *
  1912. * Returns: 0,
  1913. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1914. * status.
  1915. * See also:
  1916. */
  1917. void
  1918. vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1919. {
  1920. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1921. __vxge_hw_pio_mem_write32_upper(
  1922. (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  1923. &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
  1924. }
  1925. /**
  1926. * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
  1927. * @vp: Virtual Path handle.
  1928. * @msix_id: MSI ID
  1929. *
  1930. * The function clears the msix interrupt for the given msix_id
  1931. *
  1932. * Returns: 0,
  1933. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1934. * status.
  1935. * See also:
  1936. */
  1937. void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1938. {
  1939. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1940. if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
  1941. __vxge_hw_pio_mem_write32_upper(
  1942. (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
  1943. &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
  1944. else
  1945. __vxge_hw_pio_mem_write32_upper(
  1946. (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
  1947. &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
  1948. }
  1949. /**
  1950. * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
  1951. * @vp: Virtual Path handle.
  1952. * @msix_id: MSI ID
  1953. *
  1954. * The function unmasks the msix interrupt for the given msix_id
  1955. *
  1956. * Returns: 0,
  1957. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1958. * status.
  1959. * See also:
  1960. */
  1961. void
  1962. vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1963. {
  1964. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1965. __vxge_hw_pio_mem_write32_upper(
  1966. (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  1967. &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
  1968. }
  1969. /**
  1970. * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
  1971. * @vp: Virtual Path handle.
  1972. *
  1973. * Mask Tx and Rx vpath interrupts.
  1974. *
  1975. * See also: vxge_hw_vpath_inta_mask_tx_rx()
  1976. */
  1977. void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
  1978. {
  1979. u64 tim_int_mask0[4] = {[0 ...3] = 0};
  1980. u32 tim_int_mask1[4] = {[0 ...3] = 0};
  1981. u64 val64;
  1982. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1983. VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
  1984. tim_int_mask1, vp->vpath->vp_id);
  1985. val64 = readq(&hldev->common_reg->tim_int_mask0);
  1986. if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1987. (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1988. writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  1989. tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
  1990. &hldev->common_reg->tim_int_mask0);
  1991. }
  1992. val64 = readl(&hldev->common_reg->tim_int_mask1);
  1993. if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1994. (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1995. __vxge_hw_pio_mem_write32_upper(
  1996. (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  1997. tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
  1998. &hldev->common_reg->tim_int_mask1);
  1999. }
  2000. }
  2001. /**
  2002. * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
  2003. * @vp: Virtual Path handle.
  2004. *
  2005. * Unmask Tx and Rx vpath interrupts.
  2006. *
  2007. * See also: vxge_hw_vpath_inta_mask_tx_rx()
  2008. */
  2009. void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
  2010. {
  2011. u64 tim_int_mask0[4] = {[0 ...3] = 0};
  2012. u32 tim_int_mask1[4] = {[0 ...3] = 0};
  2013. u64 val64;
  2014. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  2015. VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
  2016. tim_int_mask1, vp->vpath->vp_id);
  2017. val64 = readq(&hldev->common_reg->tim_int_mask0);
  2018. if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  2019. (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  2020. writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  2021. tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
  2022. &hldev->common_reg->tim_int_mask0);
  2023. }
  2024. if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  2025. (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  2026. __vxge_hw_pio_mem_write32_upper(
  2027. (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  2028. tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
  2029. &hldev->common_reg->tim_int_mask1);
  2030. }
  2031. }
  2032. /**
  2033. * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
  2034. * descriptors and process the same.
  2035. * @ring: Handle to the ring object used for receive
  2036. *
  2037. * The function polls the Rx for the completed descriptors and calls
  2038. * the driver via supplied completion callback.
  2039. *
  2040. * Returns: VXGE_HW_OK, if the polling is completed successful.
  2041. * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
  2042. * descriptors available which are yet to be processed.
  2043. *
  2044. * See also: vxge_hw_vpath_poll_rx()
  2045. */
  2046. enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
  2047. {
  2048. u8 t_code;
  2049. enum vxge_hw_status status = VXGE_HW_OK;
  2050. void *first_rxdh;
  2051. u64 val64 = 0;
  2052. int new_count = 0;
  2053. ring->cmpl_cnt = 0;
  2054. status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
  2055. if (status == VXGE_HW_OK)
  2056. ring->callback(ring, first_rxdh,
  2057. t_code, ring->channel.userdata);
  2058. if (ring->cmpl_cnt != 0) {
  2059. ring->doorbell_cnt += ring->cmpl_cnt;
  2060. if (ring->doorbell_cnt >= ring->rxds_limit) {
  2061. /*
  2062. * Each RxD is of 4 qwords, update the number of
  2063. * qwords replenished
  2064. */
  2065. new_count = (ring->doorbell_cnt * 4);
  2066. /* For each block add 4 more qwords */
  2067. ring->total_db_cnt += ring->doorbell_cnt;
  2068. if (ring->total_db_cnt >= ring->rxds_per_block) {
  2069. new_count += 4;
  2070. /* Reset total count */
  2071. ring->total_db_cnt %= ring->rxds_per_block;
  2072. }
  2073. writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
  2074. &ring->vp_reg->prc_rxd_doorbell);
  2075. val64 =
  2076. readl(&ring->common_reg->titan_general_int_status);
  2077. ring->doorbell_cnt = 0;
  2078. }
  2079. }
  2080. return status;
  2081. }
  2082. /**
  2083. * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
  2084. * the same.
  2085. * @fifo: Handle to the fifo object used for non offload send
  2086. *
  2087. * The function polls the Tx for the completed descriptors and calls
  2088. * the driver via supplied completion callback.
  2089. *
  2090. * Returns: VXGE_HW_OK, if the polling is completed successful.
  2091. * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
  2092. * descriptors available which are yet to be processed.
  2093. */
  2094. enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
  2095. struct sk_buff ***skb_ptr, int nr_skb,
  2096. int *more)
  2097. {
  2098. enum vxge_hw_fifo_tcode t_code;
  2099. void *first_txdlh;
  2100. enum vxge_hw_status status = VXGE_HW_OK;
  2101. struct __vxge_hw_channel *channel;
  2102. channel = &fifo->channel;
  2103. status = vxge_hw_fifo_txdl_next_completed(fifo,
  2104. &first_txdlh, &t_code);
  2105. if (status == VXGE_HW_OK)
  2106. if (fifo->callback(fifo, first_txdlh, t_code,
  2107. channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
  2108. status = VXGE_HW_COMPLETIONS_REMAIN;
  2109. return status;
  2110. }