PageRenderTime 67ms CodeModel.GetById 20ms app.highlight 40ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/net/vxge/vxge-traffic.c

https://bitbucket.org/ndreys/linux-sunxi
C | 2514 lines | 1379 code | 404 blank | 731 comment | 209 complexity | 1a311c5a4f0c594ea37dc3664c8d9c58 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0

Large files files are truncated, but you can click here to view the full file

   1/******************************************************************************
   2 * This software may be used and distributed according to the terms of
   3 * the GNU General Public License (GPL), incorporated herein by reference.
   4 * Drivers based on or derived from this code fall under the GPL and must
   5 * retain the authorship, copyright and license notice.  This file is not
   6 * a complete program and may only be used when the entire operating
   7 * system is licensed under the GPL.
   8 * See the file COPYING in this distribution for more information.
   9 *
  10 * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
  11 *                 Virtualized Server Adapter.
  12 * Copyright(c) 2002-2010 Exar Corp.
  13 ******************************************************************************/
  14#include <linux/etherdevice.h>
  15#include <linux/prefetch.h>
  16
  17#include "vxge-traffic.h"
  18#include "vxge-config.h"
  19#include "vxge-main.h"
  20
  21/*
  22 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
  23 * @vp: Virtual Path handle.
  24 *
  25 * Enable vpath interrupts. The function is to be executed the last in
  26 * vpath initialization sequence.
  27 *
  28 * See also: vxge_hw_vpath_intr_disable()
  29 */
  30enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
  31{
  32	u64 val64;
  33
  34	struct __vxge_hw_virtualpath *vpath;
  35	struct vxge_hw_vpath_reg __iomem *vp_reg;
  36	enum vxge_hw_status status = VXGE_HW_OK;
  37	if (vp == NULL) {
  38		status = VXGE_HW_ERR_INVALID_HANDLE;
  39		goto exit;
  40	}
  41
  42	vpath = vp->vpath;
  43
  44	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  45		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  46		goto exit;
  47	}
  48
  49	vp_reg = vpath->vp_reg;
  50
  51	writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
  52
  53	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  54			&vp_reg->general_errors_reg);
  55
  56	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  57			&vp_reg->pci_config_errors_reg);
  58
  59	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  60			&vp_reg->mrpcim_to_vpath_alarm_reg);
  61
  62	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  63			&vp_reg->srpcim_to_vpath_alarm_reg);
  64
  65	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  66			&vp_reg->vpath_ppif_int_status);
  67
  68	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  69			&vp_reg->srpcim_msg_to_vpath_reg);
  70
  71	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  72			&vp_reg->vpath_pcipif_int_status);
  73
  74	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  75			&vp_reg->prc_alarm_reg);
  76
  77	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  78			&vp_reg->wrdma_alarm_status);
  79
  80	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  81			&vp_reg->asic_ntwk_vp_err_reg);
  82
  83	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  84			&vp_reg->xgmac_vp_int_status);
  85
  86	val64 = readq(&vp_reg->vpath_general_int_status);
  87
  88	/* Mask unwanted interrupts */
  89
  90	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  91			&vp_reg->vpath_pcipif_int_mask);
  92
  93	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  94			&vp_reg->srpcim_msg_to_vpath_mask);
  95
  96	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  97			&vp_reg->srpcim_to_vpath_alarm_mask);
  98
  99	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 100			&vp_reg->mrpcim_to_vpath_alarm_mask);
 101
 102	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 103			&vp_reg->pci_config_errors_mask);
 104
 105	/* Unmask the individual interrupts */
 106
 107	writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
 108		VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
 109		VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
 110		VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
 111		&vp_reg->general_errors_mask);
 112
 113	__vxge_hw_pio_mem_write32_upper(
 114		(u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
 115		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
 116		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
 117		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
 118		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
 119		VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
 120		&vp_reg->kdfcctl_errors_mask);
 121
 122	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
 123
 124	__vxge_hw_pio_mem_write32_upper(
 125		(u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
 126		&vp_reg->prc_alarm_mask);
 127
 128	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
 129	__vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
 130
 131	if (vpath->hldev->first_vp_id != vpath->vp_id)
 132		__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 133			&vp_reg->asic_ntwk_vp_err_mask);
 134	else
 135		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
 136		VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
 137		VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
 138		&vp_reg->asic_ntwk_vp_err_mask);
 139
 140	__vxge_hw_pio_mem_write32_upper(0,
 141		&vp_reg->vpath_general_int_mask);
 142exit:
 143	return status;
 144
 145}
 146
 147/*
 148 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
 149 * @vp: Virtual Path handle.
 150 *
 151 * Disable vpath interrupts. The function is to be executed the last in
 152 * vpath initialization sequence.
 153 *
 154 * See also: vxge_hw_vpath_intr_enable()
 155 */
 156enum vxge_hw_status vxge_hw_vpath_intr_disable(
 157			struct __vxge_hw_vpath_handle *vp)
 158{
 159	u64 val64;
 160
 161	struct __vxge_hw_virtualpath *vpath;
 162	enum vxge_hw_status status = VXGE_HW_OK;
 163	struct vxge_hw_vpath_reg __iomem *vp_reg;
 164	if (vp == NULL) {
 165		status = VXGE_HW_ERR_INVALID_HANDLE;
 166		goto exit;
 167	}
 168
 169	vpath = vp->vpath;
 170
 171	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
 172		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
 173		goto exit;
 174	}
 175	vp_reg = vpath->vp_reg;
 176
 177	__vxge_hw_pio_mem_write32_upper(
 178		(u32)VXGE_HW_INTR_MASK_ALL,
 179		&vp_reg->vpath_general_int_mask);
 180
 181	val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
 182
 183	writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
 184
 185	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 186			&vp_reg->general_errors_mask);
 187
 188	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 189			&vp_reg->pci_config_errors_mask);
 190
 191	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 192			&vp_reg->mrpcim_to_vpath_alarm_mask);
 193
 194	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 195			&vp_reg->srpcim_to_vpath_alarm_mask);
 196
 197	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 198			&vp_reg->vpath_ppif_int_mask);
 199
 200	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 201			&vp_reg->srpcim_msg_to_vpath_mask);
 202
 203	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 204			&vp_reg->vpath_pcipif_int_mask);
 205
 206	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 207			&vp_reg->wrdma_alarm_mask);
 208
 209	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 210			&vp_reg->prc_alarm_mask);
 211
 212	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 213			&vp_reg->xgmac_vp_int_mask);
 214
 215	__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
 216			&vp_reg->asic_ntwk_vp_err_mask);
 217
 218exit:
 219	return status;
 220}
 221
 222void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
 223{
 224	struct vxge_hw_vpath_reg __iomem *vp_reg;
 225	struct vxge_hw_vp_config *config;
 226	u64 val64;
 227
 228	if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
 229		return;
 230
 231	vp_reg = fifo->vp_reg;
 232	config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
 233
 234	if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
 235		config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
 236		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
 237		val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
 238		fifo->tim_tti_cfg1_saved = val64;
 239		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
 240	}
 241}
 242
 243void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
 244{
 245	u64 val64 = ring->tim_rti_cfg1_saved;
 246
 247	val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
 248	ring->tim_rti_cfg1_saved = val64;
 249	writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
 250}
 251
 252void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
 253{
 254	u64 val64 = fifo->tim_tti_cfg3_saved;
 255	u64 timer = (fifo->rtimer * 1000) / 272;
 256
 257	val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
 258	if (timer)
 259		val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
 260			VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
 261
 262	writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
 263	/* tti_cfg3_saved is not updated again because it is
 264	 * initialized at one place only - init time.
 265	 */
 266}
 267
 268void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
 269{
 270	u64 val64 = ring->tim_rti_cfg3_saved;
 271	u64 timer = (ring->rtimer * 1000) / 272;
 272
 273	val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
 274	if (timer)
 275		val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
 276			VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
 277
 278	writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
 279	/* rti_cfg3_saved is not updated again because it is
 280	 * initialized at one place only - init time.
 281	 */
 282}
 283
 284/**
 285 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
 286 * @channeh: Channel for rx or tx handle
 287 * @msix_id:  MSIX ID
 288 *
 289 * The function masks the msix interrupt for the given msix_id
 290 *
 291 * Returns: 0
 292 */
 293void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
 294{
 295
 296	__vxge_hw_pio_mem_write32_upper(
 297		(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
 298		&channel->common_reg->set_msix_mask_vect[msix_id%4]);
 299}
 300
 301/**
 302 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
 303 * @channeh: Channel for rx or tx handle
 304 * @msix_id:  MSI ID
 305 *
 306 * The function unmasks the msix interrupt for the given msix_id
 307 *
 308 * Returns: 0
 309 */
 310void
 311vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
 312{
 313
 314	__vxge_hw_pio_mem_write32_upper(
 315		(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
 316		&channel->common_reg->clear_msix_mask_vect[msix_id%4]);
 317}
 318
 319/**
 320 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
 321 * @channel: Channel for rx or tx handle
 322 * @msix_id:  MSI ID
 323 *
 324 * The function unmasks the msix interrupt for the given msix_id
 325 * if configured in MSIX oneshot mode
 326 *
 327 * Returns: 0
 328 */
 329void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
 330{
 331	__vxge_hw_pio_mem_write32_upper(
 332		(u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
 333		&channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
 334}
 335
 336/**
 337 * vxge_hw_device_set_intr_type - Updates the configuration
 338 *		with new interrupt type.
 339 * @hldev: HW device handle.
 340 * @intr_mode: New interrupt type
 341 */
 342u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
 343{
 344
 345	if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
 346	   (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
 347	   (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
 348	   (intr_mode != VXGE_HW_INTR_MODE_DEF))
 349		intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
 350
 351	hldev->config.intr_mode = intr_mode;
 352	return intr_mode;
 353}
 354
 355/**
 356 * vxge_hw_device_intr_enable - Enable interrupts.
 357 * @hldev: HW device handle.
 358 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
 359 *      the type(s) of interrupts to enable.
 360 *
 361 * Enable Titan interrupts. The function is to be executed the last in
 362 * Titan initialization sequence.
 363 *
 364 * See also: vxge_hw_device_intr_disable()
 365 */
 366void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
 367{
 368	u32 i;
 369	u64 val64;
 370	u32 val32;
 371
 372	vxge_hw_device_mask_all(hldev);
 373
 374	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
 375
 376		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
 377			continue;
 378
 379		vxge_hw_vpath_intr_enable(
 380			VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
 381	}
 382
 383	if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
 384		val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
 385			hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
 386
 387		if (val64 != 0) {
 388			writeq(val64, &hldev->common_reg->tim_int_status0);
 389
 390			writeq(~val64, &hldev->common_reg->tim_int_mask0);
 391		}
 392
 393		val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
 394			hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
 395
 396		if (val32 != 0) {
 397			__vxge_hw_pio_mem_write32_upper(val32,
 398					&hldev->common_reg->tim_int_status1);
 399
 400			__vxge_hw_pio_mem_write32_upper(~val32,
 401					&hldev->common_reg->tim_int_mask1);
 402		}
 403	}
 404
 405	val64 = readq(&hldev->common_reg->titan_general_int_status);
 406
 407	vxge_hw_device_unmask_all(hldev);
 408}
 409
 410/**
 411 * vxge_hw_device_intr_disable - Disable Titan interrupts.
 412 * @hldev: HW device handle.
 413 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
 414 *      the type(s) of interrupts to disable.
 415 *
 416 * Disable Titan interrupts.
 417 *
 418 * See also: vxge_hw_device_intr_enable()
 419 */
 420void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
 421{
 422	u32 i;
 423
 424	vxge_hw_device_mask_all(hldev);
 425
 426	/* mask all the tim interrupts */
 427	writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
 428	__vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
 429		&hldev->common_reg->tim_int_mask1);
 430
 431	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
 432
 433		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
 434			continue;
 435
 436		vxge_hw_vpath_intr_disable(
 437			VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
 438	}
 439}
 440
 441/**
 442 * vxge_hw_device_mask_all - Mask all device interrupts.
 443 * @hldev: HW device handle.
 444 *
 445 * Mask	all device interrupts.
 446 *
 447 * See also: vxge_hw_device_unmask_all()
 448 */
 449void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
 450{
 451	u64 val64;
 452
 453	val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
 454		VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
 455
 456	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
 457				&hldev->common_reg->titan_mask_all_int);
 458}
 459
 460/**
 461 * vxge_hw_device_unmask_all - Unmask all device interrupts.
 462 * @hldev: HW device handle.
 463 *
 464 * Unmask all device interrupts.
 465 *
 466 * See also: vxge_hw_device_mask_all()
 467 */
 468void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
 469{
 470	u64 val64 = 0;
 471
 472	if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
 473		val64 =  VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
 474
 475	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
 476			&hldev->common_reg->titan_mask_all_int);
 477}
 478
 479/**
 480 * vxge_hw_device_flush_io - Flush io writes.
 481 * @hldev: HW device handle.
 482 *
 483 * The function	performs a read operation to flush io writes.
 484 *
 485 * Returns: void
 486 */
 487void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
 488{
 489	u32 val32;
 490
 491	val32 = readl(&hldev->common_reg->titan_general_int_status);
 492}
 493
 494/**
 495 * __vxge_hw_device_handle_error - Handle error
 496 * @hldev: HW device
 497 * @vp_id: Vpath Id
 498 * @type: Error type. Please see enum vxge_hw_event{}
 499 *
 500 * Handle error.
 501 */
 502static enum vxge_hw_status
 503__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
 504			      enum vxge_hw_event type)
 505{
 506	switch (type) {
 507	case VXGE_HW_EVENT_UNKNOWN:
 508		break;
 509	case VXGE_HW_EVENT_RESET_START:
 510	case VXGE_HW_EVENT_RESET_COMPLETE:
 511	case VXGE_HW_EVENT_LINK_DOWN:
 512	case VXGE_HW_EVENT_LINK_UP:
 513		goto out;
 514	case VXGE_HW_EVENT_ALARM_CLEARED:
 515		goto out;
 516	case VXGE_HW_EVENT_ECCERR:
 517	case VXGE_HW_EVENT_MRPCIM_ECCERR:
 518		goto out;
 519	case VXGE_HW_EVENT_FIFO_ERR:
 520	case VXGE_HW_EVENT_VPATH_ERR:
 521	case VXGE_HW_EVENT_CRITICAL_ERR:
 522	case VXGE_HW_EVENT_SERR:
 523		break;
 524	case VXGE_HW_EVENT_SRPCIM_SERR:
 525	case VXGE_HW_EVENT_MRPCIM_SERR:
 526		goto out;
 527	case VXGE_HW_EVENT_SLOT_FREEZE:
 528		break;
 529	default:
 530		vxge_assert(0);
 531		goto out;
 532	}
 533
 534	/* notify driver */
 535	if (hldev->uld_callbacks.crit_err)
 536		hldev->uld_callbacks.crit_err(
 537			(struct __vxge_hw_device *)hldev,
 538			type, vp_id);
 539out:
 540
 541	return VXGE_HW_OK;
 542}
 543
 544/*
 545 * __vxge_hw_device_handle_link_down_ind
 546 * @hldev: HW device handle.
 547 *
 548 * Link down indication handler. The function is invoked by HW when
 549 * Titan indicates that the link is down.
 550 */
 551static enum vxge_hw_status
 552__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
 553{
 554	/*
 555	 * If the previous link state is not down, return.
 556	 */
 557	if (hldev->link_state == VXGE_HW_LINK_DOWN)
 558		goto exit;
 559
 560	hldev->link_state = VXGE_HW_LINK_DOWN;
 561
 562	/* notify driver */
 563	if (hldev->uld_callbacks.link_down)
 564		hldev->uld_callbacks.link_down(hldev);
 565exit:
 566	return VXGE_HW_OK;
 567}
 568
 569/*
 570 * __vxge_hw_device_handle_link_up_ind
 571 * @hldev: HW device handle.
 572 *
 573 * Link up indication handler. The function is invoked by HW when
 574 * Titan indicates that the link is up for programmable amount of time.
 575 */
 576static enum vxge_hw_status
 577__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
 578{
 579	/*
 580	 * If the previous link state is not down, return.
 581	 */
 582	if (hldev->link_state == VXGE_HW_LINK_UP)
 583		goto exit;
 584
 585	hldev->link_state = VXGE_HW_LINK_UP;
 586
 587	/* notify driver */
 588	if (hldev->uld_callbacks.link_up)
 589		hldev->uld_callbacks.link_up(hldev);
 590exit:
 591	return VXGE_HW_OK;
 592}
 593
 594/*
 595 * __vxge_hw_vpath_alarm_process - Process Alarms.
 596 * @vpath: Virtual Path.
 597 * @skip_alarms: Do not clear the alarms
 598 *
 599 * Process vpath alarms.
 600 *
 601 */
 602static enum vxge_hw_status
 603__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
 604			      u32 skip_alarms)
 605{
 606	u64 val64;
 607	u64 alarm_status;
 608	u64 pic_status;
 609	struct __vxge_hw_device *hldev = NULL;
 610	enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
 611	u64 mask64;
 612	struct vxge_hw_vpath_stats_sw_info *sw_stats;
 613	struct vxge_hw_vpath_reg __iomem *vp_reg;
 614
 615	if (vpath == NULL) {
 616		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
 617			alarm_event);
 618		goto out2;
 619	}
 620
 621	hldev = vpath->hldev;
 622	vp_reg = vpath->vp_reg;
 623	alarm_status = readq(&vp_reg->vpath_general_int_status);
 624
 625	if (alarm_status == VXGE_HW_ALL_FOXES) {
 626		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
 627			alarm_event);
 628		goto out;
 629	}
 630
 631	sw_stats = vpath->sw_stats;
 632
 633	if (alarm_status & ~(
 634		VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
 635		VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
 636		VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
 637		VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
 638		sw_stats->error_stats.unknown_alarms++;
 639
 640		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
 641			alarm_event);
 642		goto out;
 643	}
 644
 645	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
 646
 647		val64 = readq(&vp_reg->xgmac_vp_int_status);
 648
 649		if (val64 &
 650		VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
 651
 652			val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
 653
 654			if (((val64 &
 655			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
 656			     (!(val64 &
 657				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
 658			    ((val64 &
 659			     VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
 660			     (!(val64 &
 661				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
 662				     ))) {
 663				sw_stats->error_stats.network_sustained_fault++;
 664
 665				writeq(
 666				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
 667					&vp_reg->asic_ntwk_vp_err_mask);
 668
 669				__vxge_hw_device_handle_link_down_ind(hldev);
 670				alarm_event = VXGE_HW_SET_LEVEL(
 671					VXGE_HW_EVENT_LINK_DOWN, alarm_event);
 672			}
 673
 674			if (((val64 &
 675			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
 676			     (!(val64 &
 677				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
 678			    ((val64 &
 679			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
 680			     (!(val64 &
 681				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
 682				     ))) {
 683
 684				sw_stats->error_stats.network_sustained_ok++;
 685
 686				writeq(
 687				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
 688					&vp_reg->asic_ntwk_vp_err_mask);
 689
 690				__vxge_hw_device_handle_link_up_ind(hldev);
 691				alarm_event = VXGE_HW_SET_LEVEL(
 692					VXGE_HW_EVENT_LINK_UP, alarm_event);
 693			}
 694
 695			writeq(VXGE_HW_INTR_MASK_ALL,
 696				&vp_reg->asic_ntwk_vp_err_reg);
 697
 698			alarm_event = VXGE_HW_SET_LEVEL(
 699				VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
 700
 701			if (skip_alarms)
 702				return VXGE_HW_OK;
 703		}
 704	}
 705
 706	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
 707
 708		pic_status = readq(&vp_reg->vpath_ppif_int_status);
 709
 710		if (pic_status &
 711		    VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
 712
 713			val64 = readq(&vp_reg->general_errors_reg);
 714			mask64 = readq(&vp_reg->general_errors_mask);
 715
 716			if ((val64 &
 717				VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
 718				~mask64) {
 719				sw_stats->error_stats.ini_serr_det++;
 720
 721				alarm_event = VXGE_HW_SET_LEVEL(
 722					VXGE_HW_EVENT_SERR, alarm_event);
 723			}
 724
 725			if ((val64 &
 726			    VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
 727				~mask64) {
 728				sw_stats->error_stats.dblgen_fifo0_overflow++;
 729
 730				alarm_event = VXGE_HW_SET_LEVEL(
 731					VXGE_HW_EVENT_FIFO_ERR, alarm_event);
 732			}
 733
 734			if ((val64 &
 735			    VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
 736				~mask64)
 737				sw_stats->error_stats.statsb_pif_chain_error++;
 738
 739			if ((val64 &
 740			   VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
 741				~mask64)
 742				sw_stats->error_stats.statsb_drop_timeout++;
 743
 744			if ((val64 &
 745				VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
 746				~mask64)
 747				sw_stats->error_stats.target_illegal_access++;
 748
 749			if (!skip_alarms) {
 750				writeq(VXGE_HW_INTR_MASK_ALL,
 751					&vp_reg->general_errors_reg);
 752				alarm_event = VXGE_HW_SET_LEVEL(
 753					VXGE_HW_EVENT_ALARM_CLEARED,
 754					alarm_event);
 755			}
 756		}
 757
 758		if (pic_status &
 759		    VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
 760
 761			val64 = readq(&vp_reg->kdfcctl_errors_reg);
 762			mask64 = readq(&vp_reg->kdfcctl_errors_mask);
 763
 764			if ((val64 &
 765			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
 766				~mask64) {
 767				sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
 768
 769				alarm_event = VXGE_HW_SET_LEVEL(
 770					VXGE_HW_EVENT_FIFO_ERR,
 771					alarm_event);
 772			}
 773
 774			if ((val64 &
 775			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
 776				~mask64) {
 777				sw_stats->error_stats.kdfcctl_fifo0_poison++;
 778
 779				alarm_event = VXGE_HW_SET_LEVEL(
 780					VXGE_HW_EVENT_FIFO_ERR,
 781					alarm_event);
 782			}
 783
 784			if ((val64 &
 785			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
 786				~mask64) {
 787				sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
 788
 789				alarm_event = VXGE_HW_SET_LEVEL(
 790					VXGE_HW_EVENT_FIFO_ERR,
 791					alarm_event);
 792			}
 793
 794			if (!skip_alarms) {
 795				writeq(VXGE_HW_INTR_MASK_ALL,
 796					&vp_reg->kdfcctl_errors_reg);
 797				alarm_event = VXGE_HW_SET_LEVEL(
 798					VXGE_HW_EVENT_ALARM_CLEARED,
 799					alarm_event);
 800			}
 801		}
 802
 803	}
 804
 805	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
 806
 807		val64 = readq(&vp_reg->wrdma_alarm_status);
 808
 809		if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
 810
 811			val64 = readq(&vp_reg->prc_alarm_reg);
 812			mask64 = readq(&vp_reg->prc_alarm_mask);
 813
 814			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
 815				~mask64)
 816				sw_stats->error_stats.prc_ring_bumps++;
 817
 818			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
 819				~mask64) {
 820				sw_stats->error_stats.prc_rxdcm_sc_err++;
 821
 822				alarm_event = VXGE_HW_SET_LEVEL(
 823					VXGE_HW_EVENT_VPATH_ERR,
 824					alarm_event);
 825			}
 826
 827			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
 828				& ~mask64) {
 829				sw_stats->error_stats.prc_rxdcm_sc_abort++;
 830
 831				alarm_event = VXGE_HW_SET_LEVEL(
 832						VXGE_HW_EVENT_VPATH_ERR,
 833						alarm_event);
 834			}
 835
 836			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
 837				 & ~mask64) {
 838				sw_stats->error_stats.prc_quanta_size_err++;
 839
 840				alarm_event = VXGE_HW_SET_LEVEL(
 841					VXGE_HW_EVENT_VPATH_ERR,
 842					alarm_event);
 843			}
 844
 845			if (!skip_alarms) {
 846				writeq(VXGE_HW_INTR_MASK_ALL,
 847					&vp_reg->prc_alarm_reg);
 848				alarm_event = VXGE_HW_SET_LEVEL(
 849						VXGE_HW_EVENT_ALARM_CLEARED,
 850						alarm_event);
 851			}
 852		}
 853	}
 854out:
 855	hldev->stats.sw_dev_err_stats.vpath_alarms++;
 856out2:
 857	if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
 858		(alarm_event == VXGE_HW_EVENT_UNKNOWN))
 859		return VXGE_HW_OK;
 860
 861	__vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
 862
 863	if (alarm_event == VXGE_HW_EVENT_SERR)
 864		return VXGE_HW_ERR_CRITICAL;
 865
 866	return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
 867		VXGE_HW_ERR_SLOT_FREEZE :
 868		(alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
 869		VXGE_HW_ERR_VPATH;
 870}
 871
 872/**
 873 * vxge_hw_device_begin_irq - Begin IRQ processing.
 874 * @hldev: HW device handle.
 875 * @skip_alarms: Do not clear the alarms
 876 * @reason: "Reason" for the interrupt, the value of Titan's
 877 *	general_int_status register.
 878 *
 879 * The function	performs two actions, It first checks whether (shared IRQ) the
 880 * interrupt was raised	by the device. Next, it	masks the device interrupts.
 881 *
 882 * Note:
 883 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
 884 * bridge. Therefore, two back-to-back interrupts are potentially possible.
 885 *
 886 * Returns: 0, if the interrupt	is not "ours" (note that in this case the
 887 * device remain enabled).
 888 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
 889 * status.
 890 */
 891enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
 892					     u32 skip_alarms, u64 *reason)
 893{
 894	u32 i;
 895	u64 val64;
 896	u64 adapter_status;
 897	u64 vpath_mask;
 898	enum vxge_hw_status ret = VXGE_HW_OK;
 899
 900	val64 = readq(&hldev->common_reg->titan_general_int_status);
 901
 902	if (unlikely(!val64)) {
 903		/* not Titan interrupt	*/
 904		*reason	= 0;
 905		ret = VXGE_HW_ERR_WRONG_IRQ;
 906		goto exit;
 907	}
 908
 909	if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
 910
 911		adapter_status = readq(&hldev->common_reg->adapter_status);
 912
 913		if (adapter_status == VXGE_HW_ALL_FOXES) {
 914
 915			__vxge_hw_device_handle_error(hldev,
 916				NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
 917			*reason	= 0;
 918			ret = VXGE_HW_ERR_SLOT_FREEZE;
 919			goto exit;
 920		}
 921	}
 922
 923	hldev->stats.sw_dev_info_stats.total_intr_cnt++;
 924
 925	*reason	= val64;
 926
 927	vpath_mask = hldev->vpaths_deployed >>
 928				(64 - VXGE_HW_MAX_VIRTUAL_PATHS);
 929
 930	if (val64 &
 931	    VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
 932		hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
 933
 934		return VXGE_HW_OK;
 935	}
 936
 937	hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
 938
 939	if (unlikely(val64 &
 940			VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
 941
 942		enum vxge_hw_status error_level = VXGE_HW_OK;
 943
 944		hldev->stats.sw_dev_err_stats.vpath_alarms++;
 945
 946		for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
 947
 948			if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
 949				continue;
 950
 951			ret = __vxge_hw_vpath_alarm_process(
 952				&hldev->virtual_paths[i], skip_alarms);
 953
 954			error_level = VXGE_HW_SET_LEVEL(ret, error_level);
 955
 956			if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
 957				(ret == VXGE_HW_ERR_SLOT_FREEZE)))
 958				break;
 959		}
 960
 961		ret = error_level;
 962	}
 963exit:
 964	return ret;
 965}
 966
 967/**
 968 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
 969 * condition that has caused the Tx and RX interrupt.
 970 * @hldev: HW device.
 971 *
 972 * Acknowledge (that is, clear) the condition that has caused
 973 * the Tx and Rx interrupt.
 974 * See also: vxge_hw_device_begin_irq(),
 975 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
 976 */
 977void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
 978{
 979
 980	if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
 981	   (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
 982		writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
 983				 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
 984				&hldev->common_reg->tim_int_status0);
 985	}
 986
 987	if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
 988	   (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
 989		__vxge_hw_pio_mem_write32_upper(
 990				(hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
 991				 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
 992				&hldev->common_reg->tim_int_status1);
 993	}
 994}
 995
 996/*
 997 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
 998 * @channel: Channel
 999 * @dtrh: Buffer to return the DTR pointer
1000 *
1001 * Allocates a dtr from the reserve array. If the reserve array is empty,
1002 * it swaps the reserve and free arrays.
1003 *
1004 */
1005static enum vxge_hw_status
1006vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1007{
1008	void **tmp_arr;
1009
1010	if (channel->reserve_ptr - channel->reserve_top > 0) {
1011_alloc_after_swap:
1012		*dtrh =	channel->reserve_arr[--channel->reserve_ptr];
1013
1014		return VXGE_HW_OK;
1015	}
1016
1017	/* switch between empty	and full arrays	*/
1018
1019	/* the idea behind such	a design is that by having free	and reserved
1020	 * arrays separated we basically separated irq and non-irq parts.
1021	 * i.e.	no additional lock need	to be done when	we free	a resource */
1022
1023	if (channel->length - channel->free_ptr > 0) {
1024
1025		tmp_arr	= channel->reserve_arr;
1026		channel->reserve_arr = channel->free_arr;
1027		channel->free_arr = tmp_arr;
1028		channel->reserve_ptr = channel->length;
1029		channel->reserve_top = channel->free_ptr;
1030		channel->free_ptr = channel->length;
1031
1032		channel->stats->reserve_free_swaps_cnt++;
1033
1034		goto _alloc_after_swap;
1035	}
1036
1037	channel->stats->full_cnt++;
1038
1039	*dtrh =	NULL;
1040	return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1041}
1042
1043/*
1044 * vxge_hw_channel_dtr_post - Post a dtr to the channel
1045 * @channelh: Channel
1046 * @dtrh: DTR pointer
1047 *
1048 * Posts a dtr to work array.
1049 *
1050 */
1051static void
1052vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1053{
1054	vxge_assert(channel->work_arr[channel->post_index] == NULL);
1055
1056	channel->work_arr[channel->post_index++] = dtrh;
1057
1058	/* wrap-around */
1059	if (channel->post_index	== channel->length)
1060		channel->post_index = 0;
1061}
1062
1063/*
1064 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
1065 * @channel: Channel
1066 * @dtr: Buffer to return the next completed DTR pointer
1067 *
1068 * Returns the next completed dtr with out removing it from work array
1069 *
1070 */
1071void
1072vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1073{
1074	vxge_assert(channel->compl_index < channel->length);
1075
1076	*dtrh =	channel->work_arr[channel->compl_index];
1077	prefetch(*dtrh);
1078}
1079
1080/*
1081 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
1082 * @channel: Channel handle
1083 *
1084 * Removes the next completed dtr from work array
1085 *
1086 */
1087void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1088{
1089	channel->work_arr[channel->compl_index]	= NULL;
1090
1091	/* wrap-around */
1092	if (++channel->compl_index == channel->length)
1093		channel->compl_index = 0;
1094
1095	channel->stats->total_compl_cnt++;
1096}
1097
1098/*
1099 * vxge_hw_channel_dtr_free - Frees a dtr
1100 * @channel: Channel handle
1101 * @dtr:  DTR pointer
1102 *
1103 * Returns the dtr to free array
1104 *
1105 */
1106void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1107{
1108	channel->free_arr[--channel->free_ptr] = dtrh;
1109}
1110
1111/*
1112 * vxge_hw_channel_dtr_count
1113 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
1114 *
1115 * Retrieve number of DTRs available. This function can not be called
1116 * from data path. ring_initial_replenishi() is the only user.
1117 */
1118int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1119{
1120	return (channel->reserve_ptr - channel->reserve_top) +
1121		(channel->length - channel->free_ptr);
1122}
1123
1124/**
1125 * vxge_hw_ring_rxd_reserve	- Reserve ring descriptor.
1126 * @ring: Handle to the ring object used for receive
1127 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
1128 * with a valid handle.
1129 *
1130 * Reserve Rx descriptor for the subsequent filling-in driver
1131 * and posting on the corresponding channel (@channelh)
1132 * via vxge_hw_ring_rxd_post().
1133 *
1134 * Returns: VXGE_HW_OK - success.
1135 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
1136 *
1137 */
1138enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1139	void **rxdh)
1140{
1141	enum vxge_hw_status status;
1142	struct __vxge_hw_channel *channel;
1143
1144	channel = &ring->channel;
1145
1146	status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1147
1148	if (status == VXGE_HW_OK) {
1149		struct vxge_hw_ring_rxd_1 *rxdp =
1150			(struct vxge_hw_ring_rxd_1 *)*rxdh;
1151
1152		rxdp->control_0	= rxdp->control_1 = 0;
1153	}
1154
1155	return status;
1156}
1157
1158/**
1159 * vxge_hw_ring_rxd_free - Free descriptor.
1160 * @ring: Handle to the ring object used for receive
1161 * @rxdh: Descriptor handle.
1162 *
1163 * Free	the reserved descriptor. This operation is "symmetrical" to
1164 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
1165 * lifecycle.
1166 *
1167 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
1168 * be:
1169 *
1170 * - reserved (vxge_hw_ring_rxd_reserve);
1171 *
1172 * - posted	(vxge_hw_ring_rxd_post);
1173 *
1174 * - completed (vxge_hw_ring_rxd_next_completed);
1175 *
1176 * - and recycled again	(vxge_hw_ring_rxd_free).
1177 *
1178 * For alternative state transitions and more details please refer to
1179 * the design doc.
1180 *
1181 */
1182void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1183{
1184	struct __vxge_hw_channel *channel;
1185
1186	channel = &ring->channel;
1187
1188	vxge_hw_channel_dtr_free(channel, rxdh);
1189
1190}
1191
1192/**
1193 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
1194 * @ring: Handle to the ring object used for receive
1195 * @rxdh: Descriptor handle.
1196 *
1197 * This routine prepares a rxd and posts
1198 */
1199void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1200{
1201	struct __vxge_hw_channel *channel;
1202
1203	channel = &ring->channel;
1204
1205	vxge_hw_channel_dtr_post(channel, rxdh);
1206}
1207
1208/**
1209 * vxge_hw_ring_rxd_post_post - Process rxd after post.
1210 * @ring: Handle to the ring object used for receive
1211 * @rxdh: Descriptor handle.
1212 *
1213 * Processes rxd after post
1214 */
1215void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1216{
1217	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1218	struct __vxge_hw_channel *channel;
1219
1220	channel = &ring->channel;
1221
1222	rxdp->control_0	= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1223
1224	if (ring->stats->common_stats.usage_cnt > 0)
1225		ring->stats->common_stats.usage_cnt--;
1226}
1227
1228/**
1229 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
1230 * @ring: Handle to the ring object used for receive
1231 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
1232 *
1233 * Post	descriptor on the ring.
1234 * Prior to posting the	descriptor should be filled in accordance with
1235 * Host/Titan interface specification for a given service (LL, etc.).
1236 *
1237 */
1238void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1239{
1240	struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1241	struct __vxge_hw_channel *channel;
1242
1243	channel = &ring->channel;
1244
1245	wmb();
1246	rxdp->control_0	= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1247
1248	vxge_hw_channel_dtr_post(channel, rxdh);
1249
1250	if (ring->stats->common_stats.usage_cnt > 0)
1251		ring->stats->common_stats.usage_cnt--;
1252}
1253
1254/**
1255 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
1256 * @ring: Handle to the ring object used for receive
1257 * @rxdh: Descriptor handle.
1258 *
1259 * Processes rxd after post with memory barrier.
1260 */
1261void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1262{
1263	wmb();
1264	vxge_hw_ring_rxd_post_post(ring, rxdh);
1265}
1266
1267/**
1268 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
1269 * @ring: Handle to the ring object used for receive
1270 * @rxdh: Descriptor handle. Returned by HW.
1271 * @t_code:	Transfer code, as per Titan User Guide,
1272 *	 Receive Descriptor Format. Returned by HW.
1273 *
1274 * Retrieve the	_next_ completed descriptor.
1275 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
1276 * driver of new completed descriptors. After that
1277 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
1278 * completions (the very first completion is passed by HW via
1279 * vxge_hw_ring_callback_f).
1280 *
1281 * Implementation-wise, the driver is free to call
1282 * vxge_hw_ring_rxd_next_completed either immediately from inside the
1283 * ring callback, or in a deferred fashion and separate (from HW)
1284 * context.
1285 *
1286 * Non-zero @t_code means failure to fill-in receive buffer(s)
1287 * of the descriptor.
1288 * For instance, parity	error detected during the data transfer.
1289 * In this case	Titan will complete the descriptor and indicate
1290 * for the host	that the received data is not to be used.
1291 * For details please refer to Titan User Guide.
1292 *
1293 * Returns: VXGE_HW_OK - success.
1294 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1295 * are currently available for processing.
1296 *
1297 * See also: vxge_hw_ring_callback_f{},
1298 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
1299 */
1300enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1301	struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1302{
1303	struct __vxge_hw_channel *channel;
1304	struct vxge_hw_ring_rxd_1 *rxdp;
1305	enum vxge_hw_status status = VXGE_HW_OK;
1306	u64 control_0, own;
1307
1308	channel = &ring->channel;
1309
1310	vxge_hw_channel_dtr_try_complete(channel, rxdh);
1311
1312	rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
1313	if (rxdp == NULL) {
1314		status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1315		goto exit;
1316	}
1317
1318	control_0 = rxdp->control_0;
1319	own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1320	*t_code	= (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1321
1322	/* check whether it is not the end */
1323	if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1324
1325		vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
1326				0);
1327
1328		++ring->cmpl_cnt;
1329		vxge_hw_channel_dtr_complete(channel);
1330
1331		vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1332
1333		ring->stats->common_stats.usage_cnt++;
1334		if (ring->stats->common_stats.usage_max <
1335				ring->stats->common_stats.usage_cnt)
1336			ring->stats->common_stats.usage_max =
1337				ring->stats->common_stats.usage_cnt;
1338
1339		status = VXGE_HW_OK;
1340		goto exit;
1341	}
1342
1343	/* reset it. since we don't want to return
1344	 * garbage to the driver */
1345	*rxdh =	NULL;
1346	status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1347exit:
1348	return status;
1349}
1350
1351/**
1352 * vxge_hw_ring_handle_tcode - Handle transfer code.
1353 * @ring: Handle to the ring object used for receive
1354 * @rxdh: Descriptor handle.
1355 * @t_code: One of the enumerated (and documented in the Titan user guide)
1356 * "transfer codes".
1357 *
1358 * Handle descriptor's transfer code. The latter comes with each completed
1359 * descriptor.
1360 *
1361 * Returns: one of the enum vxge_hw_status{} enumerated types.
1362 * VXGE_HW_OK			- for success.
1363 * VXGE_HW_ERR_CRITICAL         - when encounters critical error.
1364 */
1365enum vxge_hw_status vxge_hw_ring_handle_tcode(
1366	struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1367{
1368	struct __vxge_hw_channel *channel;
1369	enum vxge_hw_status status = VXGE_HW_OK;
1370
1371	channel = &ring->channel;
1372
1373	/* If the t_code is not supported and if the
1374	 * t_code is other than 0x5 (unparseable packet
1375	 * such as unknown UPV6 header), Drop it !!!
1376	 */
1377
1378	if (t_code ==  VXGE_HW_RING_T_CODE_OK ||
1379		t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1380		status = VXGE_HW_OK;
1381		goto exit;
1382	}
1383
1384	if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1385		status = VXGE_HW_ERR_INVALID_TCODE;
1386		goto exit;
1387	}
1388
1389	ring->stats->rxd_t_code_err_cnt[t_code]++;
1390exit:
1391	return status;
1392}
1393
1394/**
1395 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1396 *
1397 * @fifo: fifohandle
1398 * @txdl_ptr: The starting location of the TxDL in host memory
1399 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1400 * @no_snoop: No snoop flags
1401 *
1402 * This function posts a non-offload doorbell to doorbell FIFO
1403 *
1404 */
1405static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1406	u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1407{
1408	struct __vxge_hw_channel *channel;
1409
1410	channel = &fifo->channel;
1411
1412	writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1413		VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1414		VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1415		&fifo->nofl_db->control_0);
1416
1417	mmiowb();
1418
1419	writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1420
1421	mmiowb();
1422}
1423
1424/**
1425 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1426 * the fifo
1427 * @fifoh: Handle to the fifo object used for non offload send
1428 */
1429u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1430{
1431	return vxge_hw_channel_dtr_count(&fifoh->channel);
1432}
1433
1434/**
1435 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1436 * @fifoh: Handle to the fifo object used for non offload send
1437 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1438 *        with a valid handle.
1439 * @txdl_priv: Buffer to return the pointer to per txdl space
1440 *
1441 * Reserve a single TxDL (that is, fifo descriptor)
1442 * for the subsequent filling-in by driver)
1443 * and posting on the corresponding channel (@channelh)
1444 * via vxge_hw_fifo_txdl_post().
1445 *
1446 * Note: it is the responsibility of driver to reserve multiple descriptors
1447 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1448 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1449 *
1450 * Returns: VXGE_HW_OK - success;
1451 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1452 *
1453 */
1454enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1455	struct __vxge_hw_fifo *fifo,
1456	void **txdlh, void **txdl_priv)
1457{
1458	struct __vxge_hw_channel *channel;
1459	enum vxge_hw_status status;
1460	int i;
1461
1462	channel = &fifo->channel;
1463
1464	status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1465
1466	if (status == VXGE_HW_OK) {
1467		struct vxge_hw_fifo_txd *txdp =
1468			(struct vxge_hw_fifo_txd *)*txdlh;
1469		struct __vxge_hw_fifo_txdl_priv *priv;
1470
1471		priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1472
1473		/* reset the TxDL's private */
1474		priv->align_dma_offset = 0;
1475		priv->align_vaddr_start = priv->align_vaddr;
1476		priv->align_used_frags = 0;
1477		priv->frags = 0;
1478		priv->alloc_frags = fifo->config->max_frags;
1479		priv->next_txdl_priv = NULL;
1480
1481		*txdl_priv = (void *)(size_t)txdp->host_control;
1482
1483		for (i = 0; i < fifo->config->max_frags; i++) {
1484			txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1485			txdp->control_0 = txdp->control_1 = 0;
1486		}
1487	}
1488
1489	return status;
1490}
1491
1492/**
1493 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1494 * descriptor.
1495 * @fifo: Handle to the fifo object used for non offload send
1496 * @txdlh: Descriptor handle.
1497 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1498 *            (of buffers).
1499 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1500 * @size: Size of the data buffer (in bytes).
1501 *
1502 * This API is part of the preparation of the transmit descriptor for posting
1503 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1504 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1505 * All three APIs fill in the fields of the fifo descriptor,
1506 * in accordance with the Titan specification.
1507 *
1508 */
1509void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1510				  void *txdlh, u32 frag_idx,
1511				  dma_addr_t dma_pointer, u32 size)
1512{
1513	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1514	struct vxge_hw_fifo_txd *txdp, *txdp_last;
1515	struct __vxge_hw_channel *channel;
1516
1517	channel = &fifo->channel;
1518
1519	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1520	txdp = (struct vxge_hw_fifo_txd *)txdlh  +  txdl_priv->frags;
1521
1522	if (frag_idx != 0)
1523		txdp->control_0 = txdp->control_1 = 0;
1524	else {
1525		txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1526			VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1527		txdp->control_1 |= fifo->interrupt_type;
1528		txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1529			fifo->tx_intr_num);
1530		if (txdl_priv->frags) {
1531			txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +
1532			(txdl_priv->frags - 1);
1533			txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1534				VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1535		}
1536	}
1537
1538	vxge_assert(frag_idx < txdl_priv->alloc_frags);
1539
1540	txdp->buffer_pointer = (u64)dma_pointer;
1541	txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1542	fifo->stats->total_buffers++;
1543	txdl_priv->frags++;
1544}
1545
1546/**
1547 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1548 * @fifo: Handle to the fifo object used for non offload send
1549 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1550 * @frags: Number of contiguous buffers that are part of a single
1551 *         transmit operation.
1552 *
1553 * Post descriptor on the 'fifo' type channel for transmission.
1554 * Prior to posting the descriptor should be filled in accordance with
1555 * Host/Titan interface specification for a given service (LL, etc.).
1556 *
1557 */
1558void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1559{
1560	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1561	struct vxge_hw_fifo_txd *txdp_last;
1562	struct vxge_hw_fifo_txd *txdp_first;
1563	struct __vxge_hw_channel *channel;
1564
1565	channel = &fifo->channel;
1566
1567	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1568	txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1569
1570	txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +  (txdl_priv->frags - 1);
1571	txdp_last->control_0 |=
1572	      VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1573	txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1574
1575	vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1576
1577	__vxge_hw_non_offload_db_post(fifo,
1578		(u64)txdl_priv->dma_addr,
1579		txdl_priv->frags - 1,
1580		fifo->no_snoop_bits);
1581
1582	fifo->stats->total_posts++;
1583	fifo->stats->common_stats.usage_cnt++;
1584	if (fifo->stats->common_stats.usage_max <
1585		fifo->stats->common_stats.usage_cnt)
1586		fifo->stats->common_stats.usage_max =
1587			fifo->stats->common_stats.usage_cnt;
1588}
1589
1590/**
1591 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1592 * @fifo: Handle to the fifo object used for non offload send
1593 * @txdlh: Descriptor handle. Returned by HW.
1594 * @t_code: Transfer code, as per Titan User Guide,
1595 *          Transmit Descriptor Format.
1596 *          Returned by HW.
1597 *
1598 * Retrieve the _next_ completed descriptor.
1599 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1600 * driver of new completed descriptors. After that
1601 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1602 * completions (the very first completion is passed by HW via
1603 * vxge_hw_channel_callback_f).
1604 *
1605 * Implementation-wise, the driver is free to call
1606 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1607 * channel callback, or in a deferred fashion and separate (from HW)
1608 * context.
1609 *
1610 * Non-zero @t_code means failure to process the descriptor.
1611 * The failure could happen, for instance, when the link is
1612 * down, in which case Titan completes the descriptor because it
1613 * is not able to send the data out.
1614 *
1615 * For details please refer to Titan User Guide.
1616 *
1617 * Returns: VXGE_HW_OK - success.
1618 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1619 * are currently available for processing.
1620 *
1621 */
1622enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1623	struct __vxge_hw_fifo *fifo, void **txdlh,
1624	enum vxge_hw_fifo_tcode *t_code)
1625{
1626	struct __vxge_hw_channel *channel;
1627	struct vxge_hw_fifo_txd *txdp;
1628	enum vxge_hw_status status = VXGE_HW_OK;
1629
1630	channel = &fifo->channel;
1631
1632	vxge_hw_channel_dtr_try_complete(channel, txdlh);
1633
1634	txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1635	if (txdp == NULL) {
1636		status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1637		goto exit;
1638	}
1639
1640	/* check whether host owns it */
1641	if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1642
1643		vxge_assert(txdp->host_control != 0);
1644
1645		vxge_hw_channel_dtr_complete(channel);
1646
1647		*t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1648
1649		if (fifo->stats->common_stats.usage_cnt > 0)
1650			fifo->stats->common_stats.usage_cnt--;
1651
1652		status = VXGE_HW_OK;
1653		goto exit;
1654	}
1655
1656	/* no more completions */
1657	*txdlh = NULL;
1658	status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1659exit:
1660	return status;
1661}
1662
1663/**
1664 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1665 * @fifo: Handle to the fifo object used for non offload send
1666 * @txdlh: Descriptor handle.
1667 * @t_code: One of the enumerated (and documented in the Titan user guide)
1668 *          "transfer codes".
1669 *
1670 * Handle descriptor's transfer code. The latter comes with each completed
1671 * descriptor.
1672 *
1673 * Returns: one of the enum vxge_hw_status{} enumerated types.
1674 * VXGE_HW_OK - for success.
1675 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1676 */
1677enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1678					      void *txdlh,
1679					      enum vxge_hw_fifo_tcode t_code)
1680{
1681	struct __vxge_hw_channel *channel;
1682
1683	enum vxge_hw_status status = VXGE_HW_OK;
1684	channel = &fifo->channel;
1685
1686	if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1687		status = VXGE_HW_ERR_INVALID_TCODE;
1688		goto exit;
1689	}
1690
1691	fifo->stats->txd_t_code_err_cnt[t_code]++;
1692exit:
1693	return status;
1694}
1695
1696/**
1697 * vxge_hw_fifo_txdl_free - Free descriptor.
1698 * @fifo: Handle to the fifo object used for non offload send
1699 * @txdlh: Descriptor handle.
1700 *
1701 * Free the reserved descriptor. This operation is "symmetrical" to
1702 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1703 * lifecycle.
1704 *
1705 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1706 * be:
1707 *
1708 * - reserved (vxge_hw_fifo_txdl_reserve);
1709 *
1710 * - posted (vxge_hw_fifo_txdl_post);
1711 *
1712 * - completed (vxge_hw_fifo_txdl_next_completed);
1713 *
1714 * - and recycled again (vxge_hw_fifo_txdl_free).
1715 *
1716 * For alternative state transitions and more details please refer to
1717 * the design doc.
1718 *
1719 */
1720void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1721{
1722	struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1723	u32 max_frags;
1724	struct __vxge_hw_channel *channel;
1725
1726	channel = &fifo->channel;
1727
1728	txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1729			(struct vxge_hw_fifo_txd *)txdlh);
1730
1731	max_frags = fifo->config->max_frags;
1732
1733	vxge_hw_channel_dtr_free(channel, txdlh);
1734}
1735
1736/**
1737 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1738 *               to MAC address table.
1739 * @vp: Vpath handle.
1740 * @macaddr: MAC address to be added for this vpath into the list
1741 * @macaddr_mask: MAC address mask for macaddr
1742 * @duplicate_mode: Duplicate MAC address add mode. Please see
1743 *             enum vxge_hw_vpath_mac_addr_add_mode{}
1744 *
1745 * Adds the given mac address and mac address mask into the list for this
1746 * vpath.
1747 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1748 * vxge_hw_vpath_mac_addr_get_next
1749 *
1750 */
1751enum vxge_hw_status
1752vxge_hw_vpath_mac_addr_add(
1753	struct __vxge_hw_vpath_handle *vp,
1754	u8 (macaddr)[ETH_ALEN],
1755	u8 (macaddr_mask)[ETH_ALEN],
1756	enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1757{
1758	u32 i;
1759	u64 data1 = 0ULL;
1760	u64 data2 = 0ULL;
1761	enum vxge_hw_status status = VXGE_HW_OK;
1762
1763	if (vp == NULL) {
1764		status = VXGE_HW_ERR_INVALID_HANDLE;
1765		goto exit;
1766	}
1767
1768	for (i = 0; i < ETH_ALEN; i++) {
1769		data1 <<= 8;
1770		data1 |= (u8)macaddr[i];
1771
1772		data2 <<= 8;
1773		data2 |= (u8)macaddr_mask[i];
1774	}
1775
1776	switch (duplicate_mode) {
1777	case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1778		i = 0;
1779		break;
1780	case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1781		i = 1;
1782		break;
1783	case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1784		i = 2;
1785		break;
1786	default:
1787		i = 0;
1788		break;
1789	}
1790
1791	status = __vxge_hw_vpath_rts_table_set(vp,
1792			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1793			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1794			0,
1795			VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1796			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1797			VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1798exit:
1799	return status;
1800}
1801
1802/**
1803 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1804 *               from MAC address table.
1805 * @vp: Vpath handle.
1806 * @macaddr: First MAC address entry for this vpath in the list
1807 * @macaddr_mask: MAC address mask for macaddr
1808 *
1809 * Returns the first mac address and mac address mask in the list for this
1810 * vpath.
1811 * see also: vxge_hw_vpath_mac_addr_get_next
1812 *
1813 */
1814enum vxge_hw_status
1815vxge_hw_vpath_mac_addr_get(
1816	struct __vxge_hw_vpath_handle *vp,
1817	

Large files files are truncated, but you can click here to view the full file