/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
C | 979 lines | 534 code | 143 blank | 302 comment | 96 complexity | 56fcc1c413a7a33e84f9da95627e191c MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
1/****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. 4 * 5 * Portions of this file are derived from the ipw3945 project, as well 6 * as portions of the ieee80211 subsystem header files. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of version 2 of the GNU General Public License as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program; if not, write to the Free Software Foundation, Inc., 19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 20 * 21 * The full GNU General Public License is included in this distribution in the 22 * file called LICENSE. 23 * 24 * Contact Information: 25 * Intel Linux Wireless <ilw@linux.intel.com> 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 27 * 28 *****************************************************************************/ 29#include <linux/sched.h> 30#include <linux/wait.h> 31#include <linux/gfp.h> 32 33#include "iwl-dev.h" 34#include "iwl-agn.h" 35#include "iwl-core.h" 36#include "iwl-io.h" 37#include "iwl-helpers.h" 38#include "iwl-trans-int-pcie.h" 39 40/****************************************************************************** 41 * 42 * RX path functions 43 * 44 ******************************************************************************/ 45 46/* 47 * Rx theory of operation 48 * 49 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 50 * each of which point to Receive Buffers to be filled by the NIC. These get 51 * used not only for Rx frames, but for any command response or notification 52 * from the NIC. The driver and NIC manage the Rx buffers by means 53 * of indexes into the circular buffer. 54 * 55 * Rx Queue Indexes 56 * The host/firmware share two index registers for managing the Rx buffers. 57 * 58 * The READ index maps to the first position that the firmware may be writing 59 * to -- the driver can read up to (but not including) this position and get 60 * good data. 61 * The READ index is managed by the firmware once the card is enabled. 62 * 63 * The WRITE index maps to the last position the driver has read from -- the 64 * position preceding WRITE is the last slot the firmware can place a packet. 65 * 66 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 67 * WRITE = READ. 68 * 69 * During initialization, the host sets up the READ queue position to the first 70 * INDEX position, and WRITE to the last (READ - 1 wrapped) 71 * 72 * When the firmware places a packet in a buffer, it will advance the READ index 73 * and fire the RX interrupt. The driver can then query the READ index and 74 * process as many packets as possible, moving the WRITE index forward as it 75 * resets the Rx queue buffers with new memory. 76 * 77 * The management in the driver is as follows: 78 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 79 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 80 * to replenish the iwl->rxq->rx_free. 81 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the 82 * iwl->rxq is replenished and the READ INDEX is updated (updating the 83 * 'processed' and 'read' driver indexes as well) 84 * + A received packet is processed and handed to the kernel network stack, 85 * detached from the iwl->rxq. The driver 'processed' index is updated. 86 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free 87 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ 88 * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there 89 * were enough free buffers and RX_STALLED is set it is cleared. 90 * 91 * 92 * Driver sequence: 93 * 94 * iwl_rx_queue_alloc() Allocates rx_free 95 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls 96 * iwl_rx_queue_restock 97 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx 98 * queue, updates firmware pointers, and updates 99 * the WRITE index. If insufficient rx_free buffers 100 * are available, schedules iwl_rx_replenish 101 * 102 * -- enable interrupts -- 103 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 104 * READ INDEX, detaching the SKB from the pool. 105 * Moves the packet buffer from queue to rx_used. 106 * Calls iwl_rx_queue_restock to refill any empty 107 * slots. 108 * ... 109 * 110 */ 111 112/** 113 * iwl_rx_queue_space - Return number of free slots available in queue. 114 */ 115static int iwl_rx_queue_space(const struct iwl_rx_queue *q) 116{ 117 int s = q->read - q->write; 118 if (s <= 0) 119 s += RX_QUEUE_SIZE; 120 /* keep some buffer to not confuse full and empty queue */ 121 s -= 2; 122 if (s < 0) 123 s = 0; 124 return s; 125} 126 127/** 128 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue 129 */ 130void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, 131 struct iwl_rx_queue *q) 132{ 133 unsigned long flags; 134 u32 reg; 135 136 spin_lock_irqsave(&q->lock, flags); 137 138 if (q->need_update == 0) 139 goto exit_unlock; 140 141 if (priv->cfg->base_params->shadow_reg_enable) { 142 /* shadow register enabled */ 143 /* Device expects a multiple of 8 */ 144 q->write_actual = (q->write & ~0x7); 145 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual); 146 } else { 147 /* If power-saving is in use, make sure device is awake */ 148 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 149 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); 150 151 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 152 IWL_DEBUG_INFO(priv, 153 "Rx queue requesting wakeup," 154 " GP1 = 0x%x\n", reg); 155 iwl_set_bit(priv, CSR_GP_CNTRL, 156 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 157 goto exit_unlock; 158 } 159 160 q->write_actual = (q->write & ~0x7); 161 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, 162 q->write_actual); 163 164 /* Else device is assumed to be awake */ 165 } else { 166 /* Device expects a multiple of 8 */ 167 q->write_actual = (q->write & ~0x7); 168 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, 169 q->write_actual); 170 } 171 } 172 q->need_update = 0; 173 174 exit_unlock: 175 spin_unlock_irqrestore(&q->lock, flags); 176} 177 178/** 179 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 180 */ 181static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv, 182 dma_addr_t dma_addr) 183{ 184 return cpu_to_le32((u32)(dma_addr >> 8)); 185} 186 187/** 188 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool 189 * 190 * If there are slots in the RX queue that need to be restocked, 191 * and we have free pre-allocated buffers, fill the ranks as much 192 * as we can, pulling from rx_free. 193 * 194 * This moves the 'write' index forward to catch up with 'processed', and 195 * also updates the memory address in the firmware to reference the new 196 * target buffer. 197 */ 198static void iwlagn_rx_queue_restock(struct iwl_priv *priv) 199{ 200 struct iwl_rx_queue *rxq = &priv->rxq; 201 struct list_head *element; 202 struct iwl_rx_mem_buffer *rxb; 203 unsigned long flags; 204 205 spin_lock_irqsave(&rxq->lock, flags); 206 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 207 /* The overwritten rxb must be a used one */ 208 rxb = rxq->queue[rxq->write]; 209 BUG_ON(rxb && rxb->page); 210 211 /* Get next free Rx buffer, remove from free list */ 212 element = rxq->rx_free.next; 213 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 214 list_del(element); 215 216 /* Point to Rx buffer via next RBD in circular buffer */ 217 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv, 218 rxb->page_dma); 219 rxq->queue[rxq->write] = rxb; 220 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 221 rxq->free_count--; 222 } 223 spin_unlock_irqrestore(&rxq->lock, flags); 224 /* If the pre-allocated buffer pool is dropping low, schedule to 225 * refill it */ 226 if (rxq->free_count <= RX_LOW_WATERMARK) 227 queue_work(priv->workqueue, &priv->rx_replenish); 228 229 230 /* If we've added more space for the firmware to place data, tell it. 231 * Increment device's write pointer in multiples of 8. */ 232 if (rxq->write_actual != (rxq->write & ~0x7)) { 233 spin_lock_irqsave(&rxq->lock, flags); 234 rxq->need_update = 1; 235 spin_unlock_irqrestore(&rxq->lock, flags); 236 iwl_rx_queue_update_write_ptr(priv, rxq); 237 } 238} 239 240/** 241 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free 242 * 243 * When moving to rx_free an SKB is allocated for the slot. 244 * 245 * Also restock the Rx queue via iwl_rx_queue_restock. 246 * This is called as a scheduled work item (except for during initialization) 247 */ 248static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority) 249{ 250 struct iwl_rx_queue *rxq = &priv->rxq; 251 struct list_head *element; 252 struct iwl_rx_mem_buffer *rxb; 253 struct page *page; 254 unsigned long flags; 255 gfp_t gfp_mask = priority; 256 257 while (1) { 258 spin_lock_irqsave(&rxq->lock, flags); 259 if (list_empty(&rxq->rx_used)) { 260 spin_unlock_irqrestore(&rxq->lock, flags); 261 return; 262 } 263 spin_unlock_irqrestore(&rxq->lock, flags); 264 265 if (rxq->free_count > RX_LOW_WATERMARK) 266 gfp_mask |= __GFP_NOWARN; 267 268 if (priv->hw_params.rx_page_order > 0) 269 gfp_mask |= __GFP_COMP; 270 271 /* Alloc a new receive buffer */ 272 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); 273 if (!page) { 274 if (net_ratelimit()) 275 IWL_DEBUG_INFO(priv, "alloc_pages failed, " 276 "order: %d\n", 277 priv->hw_params.rx_page_order); 278 279 if ((rxq->free_count <= RX_LOW_WATERMARK) && 280 net_ratelimit()) 281 IWL_CRIT(priv, "Failed to alloc_pages with %s." 282 "Only %u free buffers remaining.\n", 283 priority == GFP_ATOMIC ? 284 "GFP_ATOMIC" : "GFP_KERNEL", 285 rxq->free_count); 286 /* We don't reschedule replenish work here -- we will 287 * call the restock method and if it still needs 288 * more buffers it will schedule replenish */ 289 return; 290 } 291 292 spin_lock_irqsave(&rxq->lock, flags); 293 294 if (list_empty(&rxq->rx_used)) { 295 spin_unlock_irqrestore(&rxq->lock, flags); 296 __free_pages(page, priv->hw_params.rx_page_order); 297 return; 298 } 299 element = rxq->rx_used.next; 300 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 301 list_del(element); 302 303 spin_unlock_irqrestore(&rxq->lock, flags); 304 305 BUG_ON(rxb->page); 306 rxb->page = page; 307 /* Get physical address of the RB */ 308 rxb->page_dma = dma_map_page(priv->bus->dev, page, 0, 309 PAGE_SIZE << priv->hw_params.rx_page_order, 310 DMA_FROM_DEVICE); 311 /* dma address must be no more than 36 bits */ 312 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 313 /* and also 256 byte aligned! */ 314 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); 315 316 spin_lock_irqsave(&rxq->lock, flags); 317 318 list_add_tail(&rxb->list, &rxq->rx_free); 319 rxq->free_count++; 320 321 spin_unlock_irqrestore(&rxq->lock, flags); 322 } 323} 324 325void iwlagn_rx_replenish(struct iwl_priv *priv) 326{ 327 unsigned long flags; 328 329 iwlagn_rx_allocate(priv, GFP_KERNEL); 330 331 spin_lock_irqsave(&priv->lock, flags); 332 iwlagn_rx_queue_restock(priv); 333 spin_unlock_irqrestore(&priv->lock, flags); 334} 335 336static void iwlagn_rx_replenish_now(struct iwl_priv *priv) 337{ 338 iwlagn_rx_allocate(priv, GFP_ATOMIC); 339 340 iwlagn_rx_queue_restock(priv); 341} 342 343void iwl_bg_rx_replenish(struct work_struct *data) 344{ 345 struct iwl_priv *priv = 346 container_of(data, struct iwl_priv, rx_replenish); 347 348 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 349 return; 350 351 mutex_lock(&priv->mutex); 352 iwlagn_rx_replenish(priv); 353 mutex_unlock(&priv->mutex); 354} 355 356/** 357 * iwl_rx_handle - Main entry function for receiving responses from uCode 358 * 359 * Uses the priv->rx_handlers callback function array to invoke 360 * the appropriate handlers, including command responses, 361 * frame-received notifications, and other notifications. 362 */ 363static void iwl_rx_handle(struct iwl_priv *priv) 364{ 365 struct iwl_rx_mem_buffer *rxb; 366 struct iwl_rx_packet *pkt; 367 struct iwl_rx_queue *rxq = &priv->rxq; 368 u32 r, i; 369 int reclaim; 370 unsigned long flags; 371 u8 fill_rx = 0; 372 u32 count = 8; 373 int total_empty; 374 375 /* uCode's read index (stored in shared DRAM) indicates the last Rx 376 * buffer that the driver may process (last buffer filled by ucode). */ 377 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; 378 i = rxq->read; 379 380 /* Rx interrupt, but nothing sent from uCode */ 381 if (i == r) 382 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); 383 384 /* calculate total frames need to be restock after handling RX */ 385 total_empty = r - rxq->write_actual; 386 if (total_empty < 0) 387 total_empty += RX_QUEUE_SIZE; 388 389 if (total_empty > (RX_QUEUE_SIZE / 2)) 390 fill_rx = 1; 391 392 while (i != r) { 393 int len; 394 395 rxb = rxq->queue[i]; 396 397 /* If an RXB doesn't have a Rx queue slot associated with it, 398 * then a bug has been introduced in the queue refilling 399 * routines -- catch it here */ 400 if (WARN_ON(rxb == NULL)) { 401 i = (i + 1) & RX_QUEUE_MASK; 402 continue; 403 } 404 405 rxq->queue[i] = NULL; 406 407 dma_unmap_page(priv->bus->dev, rxb->page_dma, 408 PAGE_SIZE << priv->hw_params.rx_page_order, 409 DMA_FROM_DEVICE); 410 pkt = rxb_addr(rxb); 411 412 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, 413 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 414 415 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 416 len += sizeof(u32); /* account for status word */ 417 trace_iwlwifi_dev_rx(priv, pkt, len); 418 419 /* Reclaim a command buffer only if this packet is a response 420 * to a (driver-originated) command. 421 * If the packet (e.g. Rx frame) originated from uCode, 422 * there is no command buffer to reclaim. 423 * Ucode should set SEQ_RX_FRAME bit if ucode-originated, 424 * but apparently a few don't get set; catch them here. */ 425 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && 426 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && 427 (pkt->hdr.cmd != REPLY_RX) && 428 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && 429 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && 430 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && 431 (pkt->hdr.cmd != REPLY_TX); 432 433 iwl_rx_dispatch(priv, rxb); 434 435 /* 436 * XXX: After here, we should always check rxb->page 437 * against NULL before touching it or its virtual 438 * memory (pkt). Because some rx_handler might have 439 * already taken or freed the pages. 440 */ 441 442 if (reclaim) { 443 /* Invoke any callbacks, transfer the buffer to caller, 444 * and fire off the (possibly) blocking 445 * trans_send_cmd() 446 * as we reclaim the driver command queue */ 447 if (rxb->page) 448 iwl_tx_cmd_complete(priv, rxb); 449 else 450 IWL_WARN(priv, "Claim null rxb?\n"); 451 } 452 453 /* Reuse the page if possible. For notification packets and 454 * SKBs that fail to Rx correctly, add them back into the 455 * rx_free list for reuse later. */ 456 spin_lock_irqsave(&rxq->lock, flags); 457 if (rxb->page != NULL) { 458 rxb->page_dma = dma_map_page(priv->bus->dev, rxb->page, 459 0, PAGE_SIZE << priv->hw_params.rx_page_order, 460 DMA_FROM_DEVICE); 461 list_add_tail(&rxb->list, &rxq->rx_free); 462 rxq->free_count++; 463 } else 464 list_add_tail(&rxb->list, &rxq->rx_used); 465 466 spin_unlock_irqrestore(&rxq->lock, flags); 467 468 i = (i + 1) & RX_QUEUE_MASK; 469 /* If there are a lot of unused frames, 470 * restock the Rx queue so ucode wont assert. */ 471 if (fill_rx) { 472 count++; 473 if (count >= 8) { 474 rxq->read = i; 475 iwlagn_rx_replenish_now(priv); 476 count = 0; 477 } 478 } 479 } 480 481 /* Backtrack one entry */ 482 rxq->read = i; 483 if (fill_rx) 484 iwlagn_rx_replenish_now(priv); 485 else 486 iwlagn_rx_queue_restock(priv); 487} 488 489/* tasklet for iwlagn interrupt */ 490void iwl_irq_tasklet(struct iwl_priv *priv) 491{ 492 u32 inta = 0; 493 u32 handled = 0; 494 unsigned long flags; 495 u32 i; 496#ifdef CONFIG_IWLWIFI_DEBUG 497 u32 inta_mask; 498#endif 499 500 spin_lock_irqsave(&priv->lock, flags); 501 502 /* Ack/clear/reset pending uCode interrupts. 503 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 504 */ 505 /* There is a hardware bug in the interrupt mask function that some 506 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if 507 * they are disabled in the CSR_INT_MASK register. Furthermore the 508 * ICT interrupt handling mechanism has another bug that might cause 509 * these unmasked interrupts fail to be detected. We workaround the 510 * hardware bugs here by ACKing all the possible interrupts so that 511 * interrupt coalescing can still be achieved. 512 */ 513 iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask); 514 515 inta = priv->inta; 516 517#ifdef CONFIG_IWLWIFI_DEBUG 518 if (iwl_get_debug_level(priv) & IWL_DL_ISR) { 519 /* just for debug */ 520 inta_mask = iwl_read32(priv, CSR_INT_MASK); 521 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ", 522 inta, inta_mask); 523 } 524#endif 525 526 spin_unlock_irqrestore(&priv->lock, flags); 527 528 /* saved interrupt in inta variable now we can reset priv->inta */ 529 priv->inta = 0; 530 531 /* Now service all interrupt bits discovered above. */ 532 if (inta & CSR_INT_BIT_HW_ERR) { 533 IWL_ERR(priv, "Hardware error detected. Restarting.\n"); 534 535 /* Tell the device to stop sending interrupts */ 536 iwl_disable_interrupts(priv); 537 538 priv->isr_stats.hw++; 539 iwl_irq_handle_error(priv); 540 541 handled |= CSR_INT_BIT_HW_ERR; 542 543 return; 544 } 545 546#ifdef CONFIG_IWLWIFI_DEBUG 547 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 548 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 549 if (inta & CSR_INT_BIT_SCD) { 550 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " 551 "the frame/frames.\n"); 552 priv->isr_stats.sch++; 553 } 554 555 /* Alive notification via Rx interrupt will do the real work */ 556 if (inta & CSR_INT_BIT_ALIVE) { 557 IWL_DEBUG_ISR(priv, "Alive interrupt\n"); 558 priv->isr_stats.alive++; 559 } 560 } 561#endif 562 /* Safely ignore these bits for debug checks below */ 563 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 564 565 /* HW RF KILL switch toggled */ 566 if (inta & CSR_INT_BIT_RF_KILL) { 567 int hw_rf_kill = 0; 568 if (!(iwl_read32(priv, CSR_GP_CNTRL) & 569 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 570 hw_rf_kill = 1; 571 572 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", 573 hw_rf_kill ? "disable radio" : "enable radio"); 574 575 priv->isr_stats.rfkill++; 576 577 /* driver only loads ucode once setting the interface up. 578 * the driver allows loading the ucode even if the radio 579 * is killed. Hence update the killswitch state here. The 580 * rfkill handler will care about restarting if needed. 581 */ 582 if (!test_bit(STATUS_ALIVE, &priv->status)) { 583 if (hw_rf_kill) 584 set_bit(STATUS_RF_KILL_HW, &priv->status); 585 else 586 clear_bit(STATUS_RF_KILL_HW, &priv->status); 587 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); 588 } 589 590 handled |= CSR_INT_BIT_RF_KILL; 591 } 592 593 /* Chip got too hot and stopped itself */ 594 if (inta & CSR_INT_BIT_CT_KILL) { 595 IWL_ERR(priv, "Microcode CT kill error detected.\n"); 596 priv->isr_stats.ctkill++; 597 handled |= CSR_INT_BIT_CT_KILL; 598 } 599 600 /* Error detected by uCode */ 601 if (inta & CSR_INT_BIT_SW_ERR) { 602 IWL_ERR(priv, "Microcode SW error detected. " 603 " Restarting 0x%X.\n", inta); 604 priv->isr_stats.sw++; 605 iwl_irq_handle_error(priv); 606 handled |= CSR_INT_BIT_SW_ERR; 607 } 608 609 /* uCode wakes up after power-down sleep */ 610 if (inta & CSR_INT_BIT_WAKEUP) { 611 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); 612 iwl_rx_queue_update_write_ptr(priv, &priv->rxq); 613 for (i = 0; i < priv->hw_params.max_txq_num; i++) 614 iwl_txq_update_write_ptr(priv, &priv->txq[i]); 615 616 priv->isr_stats.wakeup++; 617 618 handled |= CSR_INT_BIT_WAKEUP; 619 } 620 621 /* All uCode command responses, including Tx command responses, 622 * Rx "responses" (frame-received notification), and other 623 * notifications from uCode come through here*/ 624 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 625 CSR_INT_BIT_RX_PERIODIC)) { 626 IWL_DEBUG_ISR(priv, "Rx interrupt\n"); 627 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 628 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 629 iwl_write32(priv, CSR_FH_INT_STATUS, 630 CSR_FH_INT_RX_MASK); 631 } 632 if (inta & CSR_INT_BIT_RX_PERIODIC) { 633 handled |= CSR_INT_BIT_RX_PERIODIC; 634 iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC); 635 } 636 /* Sending RX interrupt require many steps to be done in the 637 * the device: 638 * 1- write interrupt to current index in ICT table. 639 * 2- dma RX frame. 640 * 3- update RX shared data to indicate last write index. 641 * 4- send interrupt. 642 * This could lead to RX race, driver could receive RX interrupt 643 * but the shared data changes does not reflect this; 644 * periodic interrupt will detect any dangling Rx activity. 645 */ 646 647 /* Disable periodic interrupt; we use it as just a one-shot. */ 648 iwl_write8(priv, CSR_INT_PERIODIC_REG, 649 CSR_INT_PERIODIC_DIS); 650 iwl_rx_handle(priv); 651 652 /* 653 * Enable periodic interrupt in 8 msec only if we received 654 * real RX interrupt (instead of just periodic int), to catch 655 * any dangling Rx interrupt. If it was just the periodic 656 * interrupt, there was no dangling Rx activity, and no need 657 * to extend the periodic interrupt; one-shot is enough. 658 */ 659 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 660 iwl_write8(priv, CSR_INT_PERIODIC_REG, 661 CSR_INT_PERIODIC_ENA); 662 663 priv->isr_stats.rx++; 664 } 665 666 /* This "Tx" DMA channel is used only for loading uCode */ 667 if (inta & CSR_INT_BIT_FH_TX) { 668 iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); 669 IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); 670 priv->isr_stats.tx++; 671 handled |= CSR_INT_BIT_FH_TX; 672 /* Wake up uCode load routine, now that load is complete */ 673 priv->ucode_write_complete = 1; 674 wake_up(&priv->wait_command_queue); 675 } 676 677 if (inta & ~handled) { 678 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 679 priv->isr_stats.unhandled++; 680 } 681 682 if (inta & ~(priv->inta_mask)) { 683 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", 684 inta & ~priv->inta_mask); 685 } 686 687 /* Re-enable all interrupts */ 688 /* only Re-enable if disabled by irq */ 689 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 690 iwl_enable_interrupts(priv); 691 /* Re-enable RF_KILL if it occurred */ 692 else if (handled & CSR_INT_BIT_RF_KILL) 693 iwl_enable_rfkill_int(priv); 694} 695 696/****************************************************************************** 697 * 698 * ICT functions 699 * 700 ******************************************************************************/ 701#define ICT_COUNT (PAGE_SIZE/sizeof(u32)) 702 703/* Free dram table */ 704void iwl_free_isr_ict(struct iwl_priv *priv) 705{ 706 if (priv->ict_tbl_vir) { 707 dma_free_coherent(priv->bus->dev, 708 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, 709 priv->ict_tbl_vir, 710 priv->ict_tbl_dma); 711 priv->ict_tbl_vir = NULL; 712 memset(&priv->ict_tbl_dma, 0, 713 sizeof(priv->ict_tbl_dma)); 714 memset(&priv->aligned_ict_tbl_dma, 0, 715 sizeof(priv->aligned_ict_tbl_dma)); 716 } 717} 718 719 720/* allocate dram shared table it is a PAGE_SIZE aligned 721 * also reset all data related to ICT table interrupt. 722 */ 723int iwl_alloc_isr_ict(struct iwl_priv *priv) 724{ 725 726 /* allocate shrared data table */ 727 priv->ict_tbl_vir = 728 dma_alloc_coherent(priv->bus->dev, 729 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, 730 &priv->ict_tbl_dma, GFP_KERNEL); 731 if (!priv->ict_tbl_vir) 732 return -ENOMEM; 733 734 /* align table to PAGE_SIZE boundary */ 735 priv->aligned_ict_tbl_dma = 736 ALIGN(priv->ict_tbl_dma, PAGE_SIZE); 737 738 IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n", 739 (unsigned long long)priv->ict_tbl_dma, 740 (unsigned long long)priv->aligned_ict_tbl_dma, 741 (int)(priv->aligned_ict_tbl_dma - 742 priv->ict_tbl_dma)); 743 744 priv->ict_tbl = priv->ict_tbl_vir + 745 (priv->aligned_ict_tbl_dma - 746 priv->ict_tbl_dma); 747 748 IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n", 749 priv->ict_tbl, priv->ict_tbl_vir, 750 (int)(priv->aligned_ict_tbl_dma - 751 priv->ict_tbl_dma)); 752 753 /* reset table and index to all 0 */ 754 memset(priv->ict_tbl_vir, 0, 755 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE); 756 priv->ict_index = 0; 757 758 /* add periodic RX interrupt */ 759 priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC; 760 return 0; 761} 762 763/* Device is going up inform it about using ICT interrupt table, 764 * also we need to tell the driver to start using ICT interrupt. 765 */ 766int iwl_reset_ict(struct iwl_priv *priv) 767{ 768 u32 val; 769 unsigned long flags; 770 771 if (!priv->ict_tbl_vir) 772 return 0; 773 774 spin_lock_irqsave(&priv->lock, flags); 775 iwl_disable_interrupts(priv); 776 777 memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT); 778 779 val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT; 780 781 val |= CSR_DRAM_INT_TBL_ENABLE; 782 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; 783 784 IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X " 785 "aligned dma address %Lx\n", 786 val, 787 (unsigned long long)priv->aligned_ict_tbl_dma); 788 789 iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val); 790 priv->use_ict = true; 791 priv->ict_index = 0; 792 iwl_write32(priv, CSR_INT, priv->inta_mask); 793 iwl_enable_interrupts(priv); 794 spin_unlock_irqrestore(&priv->lock, flags); 795 796 return 0; 797} 798 799/* Device is going down disable ict interrupt usage */ 800void iwl_disable_ict(struct iwl_priv *priv) 801{ 802 unsigned long flags; 803 804 spin_lock_irqsave(&priv->lock, flags); 805 priv->use_ict = false; 806 spin_unlock_irqrestore(&priv->lock, flags); 807} 808 809static irqreturn_t iwl_isr(int irq, void *data) 810{ 811 struct iwl_priv *priv = data; 812 u32 inta, inta_mask; 813 unsigned long flags; 814#ifdef CONFIG_IWLWIFI_DEBUG 815 u32 inta_fh; 816#endif 817 if (!priv) 818 return IRQ_NONE; 819 820 spin_lock_irqsave(&priv->lock, flags); 821 822 /* Disable (but don't clear!) interrupts here to avoid 823 * back-to-back ISRs and sporadic interrupts from our NIC. 824 * If we have something to service, the tasklet will re-enable ints. 825 * If we *don't* have something, we'll re-enable before leaving here. */ 826 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ 827 iwl_write32(priv, CSR_INT_MASK, 0x00000000); 828 829 /* Discover which interrupts are active/pending */ 830 inta = iwl_read32(priv, CSR_INT); 831 832 /* Ignore interrupt if there's nothing in NIC to service. 833 * This may be due to IRQ shared with another device, 834 * or due to sporadic interrupts thrown from our NIC. */ 835 if (!inta) { 836 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n"); 837 goto none; 838 } 839 840 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 841 /* Hardware disappeared. It might have already raised 842 * an interrupt */ 843 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 844 goto unplugged; 845 } 846 847#ifdef CONFIG_IWLWIFI_DEBUG 848 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 849 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 850 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, " 851 "fh 0x%08x\n", inta, inta_mask, inta_fh); 852 } 853#endif 854 855 priv->inta |= inta; 856 /* iwl_irq_tasklet() will service interrupts and re-enable them */ 857 if (likely(inta)) 858 tasklet_schedule(&priv->irq_tasklet); 859 else if (test_bit(STATUS_INT_ENABLED, &priv->status) && 860 !priv->inta) 861 iwl_enable_interrupts(priv); 862 863 unplugged: 864 spin_unlock_irqrestore(&priv->lock, flags); 865 return IRQ_HANDLED; 866 867 none: 868 /* re-enable interrupts here since we don't have anything to service. */ 869 /* only Re-enable if disabled by irq and no schedules tasklet. */ 870 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) 871 iwl_enable_interrupts(priv); 872 873 spin_unlock_irqrestore(&priv->lock, flags); 874 return IRQ_NONE; 875} 876 877/* interrupt handler using ict table, with this interrupt driver will 878 * stop using INTA register to get device's interrupt, reading this register 879 * is expensive, device will write interrupts in ICT dram table, increment 880 * index then will fire interrupt to driver, driver will OR all ICT table 881 * entries from current index up to table entry with 0 value. the result is 882 * the interrupt we need to service, driver will set the entries back to 0 and 883 * set index. 884 */ 885irqreturn_t iwl_isr_ict(int irq, void *data) 886{ 887 struct iwl_priv *priv = data; 888 u32 inta, inta_mask; 889 u32 val = 0; 890 unsigned long flags; 891 892 if (!priv) 893 return IRQ_NONE; 894 895 /* dram interrupt table not set yet, 896 * use legacy interrupt. 897 */ 898 if (!priv->use_ict) 899 return iwl_isr(irq, data); 900 901 spin_lock_irqsave(&priv->lock, flags); 902 903 /* Disable (but don't clear!) interrupts here to avoid 904 * back-to-back ISRs and sporadic interrupts from our NIC. 905 * If we have something to service, the tasklet will re-enable ints. 906 * If we *don't* have something, we'll re-enable before leaving here. 907 */ 908 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ 909 iwl_write32(priv, CSR_INT_MASK, 0x00000000); 910 911 912 /* Ignore interrupt if there's nothing in NIC to service. 913 * This may be due to IRQ shared with another device, 914 * or due to sporadic interrupts thrown from our NIC. */ 915 if (!priv->ict_tbl[priv->ict_index]) { 916 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n"); 917 goto none; 918 } 919 920 /* read all entries that not 0 start with ict_index */ 921 while (priv->ict_tbl[priv->ict_index]) { 922 923 val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]); 924 IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n", 925 priv->ict_index, 926 le32_to_cpu( 927 priv->ict_tbl[priv->ict_index])); 928 priv->ict_tbl[priv->ict_index] = 0; 929 priv->ict_index = iwl_queue_inc_wrap(priv->ict_index, 930 ICT_COUNT); 931 932 } 933 934 /* We should not get this value, just ignore it. */ 935 if (val == 0xffffffff) 936 val = 0; 937 938 /* 939 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit 940 * (bit 15 before shifting it to 31) to clear when using interrupt 941 * coalescing. fortunately, bits 18 and 19 stay set when this happens 942 * so we use them to decide on the real state of the Rx bit. 943 * In order words, bit 15 is set if bit 18 or bit 19 are set. 944 */ 945 if (val & 0xC0000) 946 val |= 0x8000; 947 948 inta = (0xff & val) | ((0xff00 & val) << 16); 949 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", 950 inta, inta_mask, val); 951 952 inta &= priv->inta_mask; 953 priv->inta |= inta; 954 955 /* iwl_irq_tasklet() will service interrupts and re-enable them */ 956 if (likely(inta)) 957 tasklet_schedule(&priv->irq_tasklet); 958 else if (test_bit(STATUS_INT_ENABLED, &priv->status) && 959 !priv->inta) { 960 /* Allow interrupt if was disabled by this handler and 961 * no tasklet was schedules, We should not enable interrupt, 962 * tasklet will enable it. 963 */ 964 iwl_enable_interrupts(priv); 965 } 966 967 spin_unlock_irqrestore(&priv->lock, flags); 968 return IRQ_HANDLED; 969 970 none: 971 /* re-enable interrupts here since we don't have anything to service. 972 * only Re-enable if disabled by irq. 973 */ 974 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) 975 iwl_enable_interrupts(priv); 976 977 spin_unlock_irqrestore(&priv->lock, flags); 978 return IRQ_NONE; 979}