/drivers/net/wireless/b43legacy/dma.c
C | 1688 lines | 1368 code | 202 blank | 118 comment | 198 complexity | 8c085d28ee5e88ec6b7d446710ab1121 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
1/* 2 3 Broadcom B43legacy wireless driver 4 5 DMA ringbuffer and descriptor allocation/management 6 7 Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch> 8 9 Some code in this file is derived from the b44.c driver 10 Copyright (C) 2002 David S. Miller 11 Copyright (C) Pekka Pietikainen 12 13 This program is free software; you can redistribute it and/or modify 14 it under the terms of the GNU General Public License as published by 15 the Free Software Foundation; either version 2 of the License, or 16 (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, 19 but WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 GNU General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; see the file COPYING. If not, write to 25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, 26 Boston, MA 02110-1301, USA. 27 28*/ 29 30#include "b43legacy.h" 31#include "dma.h" 32#include "main.h" 33#include "debugfs.h" 34#include "xmit.h" 35 36#include <linux/dma-mapping.h> 37#include <linux/pci.h> 38#include <linux/delay.h> 39#include <linux/skbuff.h> 40#include <linux/slab.h> 41#include <net/dst.h> 42 43/* 32bit DMA ops. */ 44static 45struct b43legacy_dmadesc_generic *op32_idx2desc( 46 struct b43legacy_dmaring *ring, 47 int slot, 48 struct b43legacy_dmadesc_meta **meta) 49{ 50 struct b43legacy_dmadesc32 *desc; 51 52 *meta = &(ring->meta[slot]); 53 desc = ring->descbase; 54 desc = &(desc[slot]); 55 56 return (struct b43legacy_dmadesc_generic *)desc; 57} 58 59static void op32_fill_descriptor(struct b43legacy_dmaring *ring, 60 struct b43legacy_dmadesc_generic *desc, 61 dma_addr_t dmaaddr, u16 bufsize, 62 int start, int end, int irq) 63{ 64 struct b43legacy_dmadesc32 *descbase = ring->descbase; 65 int slot; 66 u32 ctl; 67 u32 addr; 68 u32 addrext; 69 70 slot = (int)(&(desc->dma32) - descbase); 71 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 72 73 addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK); 74 addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK) 75 >> SSB_DMA_TRANSLATION_SHIFT; 76 addr |= ring->dev->dma.translation; 77 ctl = (bufsize - ring->frameoffset) 78 & B43legacy_DMA32_DCTL_BYTECNT; 79 if (slot == ring->nr_slots - 1) 80 ctl |= B43legacy_DMA32_DCTL_DTABLEEND; 81 if (start) 82 ctl |= B43legacy_DMA32_DCTL_FRAMESTART; 83 if (end) 84 ctl |= B43legacy_DMA32_DCTL_FRAMEEND; 85 if (irq) 86 ctl |= B43legacy_DMA32_DCTL_IRQ; 87 ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT) 88 & B43legacy_DMA32_DCTL_ADDREXT_MASK; 89 90 desc->dma32.control = cpu_to_le32(ctl); 91 desc->dma32.address = cpu_to_le32(addr); 92} 93 94static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) 95{ 96 b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX, 97 (u32)(slot * sizeof(struct b43legacy_dmadesc32))); 98} 99 100static void op32_tx_suspend(struct b43legacy_dmaring *ring) 101{ 102 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, 103 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) 104 | B43legacy_DMA32_TXSUSPEND); 105} 106 107static void op32_tx_resume(struct b43legacy_dmaring *ring) 108{ 109 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, 110 b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL) 111 & ~B43legacy_DMA32_TXSUSPEND); 112} 113 114static int op32_get_current_rxslot(struct b43legacy_dmaring *ring) 115{ 116 u32 val; 117 118 val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS); 119 val &= B43legacy_DMA32_RXDPTR; 120 121 return (val / sizeof(struct b43legacy_dmadesc32)); 122} 123 124static void op32_set_current_rxslot(struct b43legacy_dmaring *ring, 125 int slot) 126{ 127 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 128 (u32)(slot * sizeof(struct b43legacy_dmadesc32))); 129} 130 131static const struct b43legacy_dma_ops dma32_ops = { 132 .idx2desc = op32_idx2desc, 133 .fill_descriptor = op32_fill_descriptor, 134 .poke_tx = op32_poke_tx, 135 .tx_suspend = op32_tx_suspend, 136 .tx_resume = op32_tx_resume, 137 .get_current_rxslot = op32_get_current_rxslot, 138 .set_current_rxslot = op32_set_current_rxslot, 139}; 140 141/* 64bit DMA ops. */ 142static 143struct b43legacy_dmadesc_generic *op64_idx2desc( 144 struct b43legacy_dmaring *ring, 145 int slot, 146 struct b43legacy_dmadesc_meta 147 **meta) 148{ 149 struct b43legacy_dmadesc64 *desc; 150 151 *meta = &(ring->meta[slot]); 152 desc = ring->descbase; 153 desc = &(desc[slot]); 154 155 return (struct b43legacy_dmadesc_generic *)desc; 156} 157 158static void op64_fill_descriptor(struct b43legacy_dmaring *ring, 159 struct b43legacy_dmadesc_generic *desc, 160 dma_addr_t dmaaddr, u16 bufsize, 161 int start, int end, int irq) 162{ 163 struct b43legacy_dmadesc64 *descbase = ring->descbase; 164 int slot; 165 u32 ctl0 = 0; 166 u32 ctl1 = 0; 167 u32 addrlo; 168 u32 addrhi; 169 u32 addrext; 170 171 slot = (int)(&(desc->dma64) - descbase); 172 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 173 174 addrlo = (u32)(dmaaddr & 0xFFFFFFFF); 175 addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); 176 addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) 177 >> SSB_DMA_TRANSLATION_SHIFT; 178 addrhi |= ring->dev->dma.translation; 179 if (slot == ring->nr_slots - 1) 180 ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND; 181 if (start) 182 ctl0 |= B43legacy_DMA64_DCTL0_FRAMESTART; 183 if (end) 184 ctl0 |= B43legacy_DMA64_DCTL0_FRAMEEND; 185 if (irq) 186 ctl0 |= B43legacy_DMA64_DCTL0_IRQ; 187 ctl1 |= (bufsize - ring->frameoffset) 188 & B43legacy_DMA64_DCTL1_BYTECNT; 189 ctl1 |= (addrext << B43legacy_DMA64_DCTL1_ADDREXT_SHIFT) 190 & B43legacy_DMA64_DCTL1_ADDREXT_MASK; 191 192 desc->dma64.control0 = cpu_to_le32(ctl0); 193 desc->dma64.control1 = cpu_to_le32(ctl1); 194 desc->dma64.address_low = cpu_to_le32(addrlo); 195 desc->dma64.address_high = cpu_to_le32(addrhi); 196} 197 198static void op64_poke_tx(struct b43legacy_dmaring *ring, int slot) 199{ 200 b43legacy_dma_write(ring, B43legacy_DMA64_TXINDEX, 201 (u32)(slot * sizeof(struct b43legacy_dmadesc64))); 202} 203 204static void op64_tx_suspend(struct b43legacy_dmaring *ring) 205{ 206 b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, 207 b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL) 208 | B43legacy_DMA64_TXSUSPEND); 209} 210 211static void op64_tx_resume(struct b43legacy_dmaring *ring) 212{ 213 b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, 214 b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL) 215 & ~B43legacy_DMA64_TXSUSPEND); 216} 217 218static int op64_get_current_rxslot(struct b43legacy_dmaring *ring) 219{ 220 u32 val; 221 222 val = b43legacy_dma_read(ring, B43legacy_DMA64_RXSTATUS); 223 val &= B43legacy_DMA64_RXSTATDPTR; 224 225 return (val / sizeof(struct b43legacy_dmadesc64)); 226} 227 228static void op64_set_current_rxslot(struct b43legacy_dmaring *ring, 229 int slot) 230{ 231 b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX, 232 (u32)(slot * sizeof(struct b43legacy_dmadesc64))); 233} 234 235static const struct b43legacy_dma_ops dma64_ops = { 236 .idx2desc = op64_idx2desc, 237 .fill_descriptor = op64_fill_descriptor, 238 .poke_tx = op64_poke_tx, 239 .tx_suspend = op64_tx_suspend, 240 .tx_resume = op64_tx_resume, 241 .get_current_rxslot = op64_get_current_rxslot, 242 .set_current_rxslot = op64_set_current_rxslot, 243}; 244 245 246static inline int free_slots(struct b43legacy_dmaring *ring) 247{ 248 return (ring->nr_slots - ring->used_slots); 249} 250 251static inline int next_slot(struct b43legacy_dmaring *ring, int slot) 252{ 253 B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); 254 if (slot == ring->nr_slots - 1) 255 return 0; 256 return slot + 1; 257} 258 259static inline int prev_slot(struct b43legacy_dmaring *ring, int slot) 260{ 261 B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); 262 if (slot == 0) 263 return ring->nr_slots - 1; 264 return slot - 1; 265} 266 267#ifdef CONFIG_B43LEGACY_DEBUG 268static void update_max_used_slots(struct b43legacy_dmaring *ring, 269 int current_used_slots) 270{ 271 if (current_used_slots <= ring->max_used_slots) 272 return; 273 ring->max_used_slots = current_used_slots; 274 if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE)) 275 b43legacydbg(ring->dev->wl, 276 "max_used_slots increased to %d on %s ring %d\n", 277 ring->max_used_slots, 278 ring->tx ? "TX" : "RX", 279 ring->index); 280} 281#else 282static inline 283void update_max_used_slots(struct b43legacy_dmaring *ring, 284 int current_used_slots) 285{ } 286#endif /* DEBUG */ 287 288/* Request a slot for usage. */ 289static inline 290int request_slot(struct b43legacy_dmaring *ring) 291{ 292 int slot; 293 294 B43legacy_WARN_ON(!ring->tx); 295 B43legacy_WARN_ON(ring->stopped); 296 B43legacy_WARN_ON(free_slots(ring) == 0); 297 298 slot = next_slot(ring, ring->current_slot); 299 ring->current_slot = slot; 300 ring->used_slots++; 301 302 update_max_used_slots(ring, ring->used_slots); 303 304 return slot; 305} 306 307/* Mac80211-queue to b43legacy-ring mapping */ 308static struct b43legacy_dmaring *priority_to_txring( 309 struct b43legacy_wldev *dev, 310 int queue_priority) 311{ 312 struct b43legacy_dmaring *ring; 313 314/*FIXME: For now we always run on TX-ring-1 */ 315return dev->dma.tx_ring1; 316 317 /* 0 = highest priority */ 318 switch (queue_priority) { 319 default: 320 B43legacy_WARN_ON(1); 321 /* fallthrough */ 322 case 0: 323 ring = dev->dma.tx_ring3; 324 break; 325 case 1: 326 ring = dev->dma.tx_ring2; 327 break; 328 case 2: 329 ring = dev->dma.tx_ring1; 330 break; 331 case 3: 332 ring = dev->dma.tx_ring0; 333 break; 334 case 4: 335 ring = dev->dma.tx_ring4; 336 break; 337 case 5: 338 ring = dev->dma.tx_ring5; 339 break; 340 } 341 342 return ring; 343} 344 345/* Bcm4301-ring to mac80211-queue mapping */ 346static inline int txring_to_priority(struct b43legacy_dmaring *ring) 347{ 348 static const u8 idx_to_prio[] = 349 { 3, 2, 1, 0, 4, 5, }; 350 351/*FIXME: have only one queue, for now */ 352return 0; 353 354 return idx_to_prio[ring->index]; 355} 356 357 358static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type, 359 int controller_idx) 360{ 361 static const u16 map64[] = { 362 B43legacy_MMIO_DMA64_BASE0, 363 B43legacy_MMIO_DMA64_BASE1, 364 B43legacy_MMIO_DMA64_BASE2, 365 B43legacy_MMIO_DMA64_BASE3, 366 B43legacy_MMIO_DMA64_BASE4, 367 B43legacy_MMIO_DMA64_BASE5, 368 }; 369 static const u16 map32[] = { 370 B43legacy_MMIO_DMA32_BASE0, 371 B43legacy_MMIO_DMA32_BASE1, 372 B43legacy_MMIO_DMA32_BASE2, 373 B43legacy_MMIO_DMA32_BASE3, 374 B43legacy_MMIO_DMA32_BASE4, 375 B43legacy_MMIO_DMA32_BASE5, 376 }; 377 378 if (type == B43legacy_DMA_64BIT) { 379 B43legacy_WARN_ON(!(controller_idx >= 0 && 380 controller_idx < ARRAY_SIZE(map64))); 381 return map64[controller_idx]; 382 } 383 B43legacy_WARN_ON(!(controller_idx >= 0 && 384 controller_idx < ARRAY_SIZE(map32))); 385 return map32[controller_idx]; 386} 387 388static inline 389dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring, 390 unsigned char *buf, 391 size_t len, 392 int tx) 393{ 394 dma_addr_t dmaaddr; 395 396 if (tx) 397 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, 398 buf, len, 399 DMA_TO_DEVICE); 400 else 401 dmaaddr = dma_map_single(ring->dev->dev->dma_dev, 402 buf, len, 403 DMA_FROM_DEVICE); 404 405 return dmaaddr; 406} 407 408static inline 409void unmap_descbuffer(struct b43legacy_dmaring *ring, 410 dma_addr_t addr, 411 size_t len, 412 int tx) 413{ 414 if (tx) 415 dma_unmap_single(ring->dev->dev->dma_dev, 416 addr, len, 417 DMA_TO_DEVICE); 418 else 419 dma_unmap_single(ring->dev->dev->dma_dev, 420 addr, len, 421 DMA_FROM_DEVICE); 422} 423 424static inline 425void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring, 426 dma_addr_t addr, 427 size_t len) 428{ 429 B43legacy_WARN_ON(ring->tx); 430 431 dma_sync_single_for_cpu(ring->dev->dev->dma_dev, 432 addr, len, DMA_FROM_DEVICE); 433} 434 435static inline 436void sync_descbuffer_for_device(struct b43legacy_dmaring *ring, 437 dma_addr_t addr, 438 size_t len) 439{ 440 B43legacy_WARN_ON(ring->tx); 441 442 dma_sync_single_for_device(ring->dev->dev->dma_dev, 443 addr, len, DMA_FROM_DEVICE); 444} 445 446static inline 447void free_descriptor_buffer(struct b43legacy_dmaring *ring, 448 struct b43legacy_dmadesc_meta *meta, 449 int irq_context) 450{ 451 if (meta->skb) { 452 if (irq_context) 453 dev_kfree_skb_irq(meta->skb); 454 else 455 dev_kfree_skb(meta->skb); 456 meta->skb = NULL; 457 } 458} 459 460static int alloc_ringmemory(struct b43legacy_dmaring *ring) 461{ 462 /* GFP flags must match the flags in free_ringmemory()! */ 463 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 464 B43legacy_DMA_RINGMEMSIZE, 465 &(ring->dmabase), 466 GFP_KERNEL); 467 if (!ring->descbase) { 468 b43legacyerr(ring->dev->wl, "DMA ringmemory allocation" 469 " failed\n"); 470 return -ENOMEM; 471 } 472 memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE); 473 474 return 0; 475} 476 477static void free_ringmemory(struct b43legacy_dmaring *ring) 478{ 479 dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE, 480 ring->descbase, ring->dmabase); 481} 482 483/* Reset the RX DMA channel */ 484static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev, 485 u16 mmio_base, 486 enum b43legacy_dmatype type) 487{ 488 int i; 489 u32 value; 490 u16 offset; 491 492 might_sleep(); 493 494 offset = (type == B43legacy_DMA_64BIT) ? 495 B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL; 496 b43legacy_write32(dev, mmio_base + offset, 0); 497 for (i = 0; i < 10; i++) { 498 offset = (type == B43legacy_DMA_64BIT) ? 499 B43legacy_DMA64_RXSTATUS : B43legacy_DMA32_RXSTATUS; 500 value = b43legacy_read32(dev, mmio_base + offset); 501 if (type == B43legacy_DMA_64BIT) { 502 value &= B43legacy_DMA64_RXSTAT; 503 if (value == B43legacy_DMA64_RXSTAT_DISABLED) { 504 i = -1; 505 break; 506 } 507 } else { 508 value &= B43legacy_DMA32_RXSTATE; 509 if (value == B43legacy_DMA32_RXSTAT_DISABLED) { 510 i = -1; 511 break; 512 } 513 } 514 msleep(1); 515 } 516 if (i != -1) { 517 b43legacyerr(dev->wl, "DMA RX reset timed out\n"); 518 return -ENODEV; 519 } 520 521 return 0; 522} 523 524/* Reset the RX DMA channel */ 525static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev, 526 u16 mmio_base, 527 enum b43legacy_dmatype type) 528{ 529 int i; 530 u32 value; 531 u16 offset; 532 533 might_sleep(); 534 535 for (i = 0; i < 10; i++) { 536 offset = (type == B43legacy_DMA_64BIT) ? 537 B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS; 538 value = b43legacy_read32(dev, mmio_base + offset); 539 if (type == B43legacy_DMA_64BIT) { 540 value &= B43legacy_DMA64_TXSTAT; 541 if (value == B43legacy_DMA64_TXSTAT_DISABLED || 542 value == B43legacy_DMA64_TXSTAT_IDLEWAIT || 543 value == B43legacy_DMA64_TXSTAT_STOPPED) 544 break; 545 } else { 546 value &= B43legacy_DMA32_TXSTATE; 547 if (value == B43legacy_DMA32_TXSTAT_DISABLED || 548 value == B43legacy_DMA32_TXSTAT_IDLEWAIT || 549 value == B43legacy_DMA32_TXSTAT_STOPPED) 550 break; 551 } 552 msleep(1); 553 } 554 offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXCTL : 555 B43legacy_DMA32_TXCTL; 556 b43legacy_write32(dev, mmio_base + offset, 0); 557 for (i = 0; i < 10; i++) { 558 offset = (type == B43legacy_DMA_64BIT) ? 559 B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS; 560 value = b43legacy_read32(dev, mmio_base + offset); 561 if (type == B43legacy_DMA_64BIT) { 562 value &= B43legacy_DMA64_TXSTAT; 563 if (value == B43legacy_DMA64_TXSTAT_DISABLED) { 564 i = -1; 565 break; 566 } 567 } else { 568 value &= B43legacy_DMA32_TXSTATE; 569 if (value == B43legacy_DMA32_TXSTAT_DISABLED) { 570 i = -1; 571 break; 572 } 573 } 574 msleep(1); 575 } 576 if (i != -1) { 577 b43legacyerr(dev->wl, "DMA TX reset timed out\n"); 578 return -ENODEV; 579 } 580 /* ensure the reset is completed. */ 581 msleep(1); 582 583 return 0; 584} 585 586/* Check if a DMA mapping address is invalid. */ 587static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring, 588 dma_addr_t addr, 589 size_t buffersize, 590 bool dma_to_device) 591{ 592 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) 593 return 1; 594 595 switch (ring->type) { 596 case B43legacy_DMA_30BIT: 597 if ((u64)addr + buffersize > (1ULL << 30)) 598 goto address_error; 599 break; 600 case B43legacy_DMA_32BIT: 601 if ((u64)addr + buffersize > (1ULL << 32)) 602 goto address_error; 603 break; 604 case B43legacy_DMA_64BIT: 605 /* Currently we can't have addresses beyond 64 bits in the kernel. */ 606 break; 607 } 608 609 /* The address is OK. */ 610 return 0; 611 612address_error: 613 /* We can't support this address. Unmap it again. */ 614 unmap_descbuffer(ring, addr, buffersize, dma_to_device); 615 616 return 1; 617} 618 619static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, 620 struct b43legacy_dmadesc_generic *desc, 621 struct b43legacy_dmadesc_meta *meta, 622 gfp_t gfp_flags) 623{ 624 struct b43legacy_rxhdr_fw3 *rxhdr; 625 struct b43legacy_hwtxstatus *txstat; 626 dma_addr_t dmaaddr; 627 struct sk_buff *skb; 628 629 B43legacy_WARN_ON(ring->tx); 630 631 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 632 if (unlikely(!skb)) 633 return -ENOMEM; 634 dmaaddr = map_descbuffer(ring, skb->data, 635 ring->rx_buffersize, 0); 636 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 637 /* ugh. try to realloc in zone_dma */ 638 gfp_flags |= GFP_DMA; 639 640 dev_kfree_skb_any(skb); 641 642 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 643 if (unlikely(!skb)) 644 return -ENOMEM; 645 dmaaddr = map_descbuffer(ring, skb->data, 646 ring->rx_buffersize, 0); 647 } 648 649 if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) { 650 dev_kfree_skb_any(skb); 651 return -EIO; 652 } 653 654 meta->skb = skb; 655 meta->dmaaddr = dmaaddr; 656 ring->ops->fill_descriptor(ring, desc, dmaaddr, 657 ring->rx_buffersize, 0, 0, 0); 658 659 rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data); 660 rxhdr->frame_len = 0; 661 txstat = (struct b43legacy_hwtxstatus *)(skb->data); 662 txstat->cookie = 0; 663 664 return 0; 665} 666 667/* Allocate the initial descbuffers. 668 * This is used for an RX ring only. 669 */ 670static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring) 671{ 672 int i; 673 int err = -ENOMEM; 674 struct b43legacy_dmadesc_generic *desc; 675 struct b43legacy_dmadesc_meta *meta; 676 677 for (i = 0; i < ring->nr_slots; i++) { 678 desc = ring->ops->idx2desc(ring, i, &meta); 679 680 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); 681 if (err) { 682 b43legacyerr(ring->dev->wl, 683 "Failed to allocate initial descbuffers\n"); 684 goto err_unwind; 685 } 686 } 687 mb(); /* all descbuffer setup before next line */ 688 ring->used_slots = ring->nr_slots; 689 err = 0; 690out: 691 return err; 692 693err_unwind: 694 for (i--; i >= 0; i--) { 695 desc = ring->ops->idx2desc(ring, i, &meta); 696 697 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); 698 dev_kfree_skb(meta->skb); 699 } 700 goto out; 701} 702 703/* Do initial setup of the DMA controller. 704 * Reset the controller, write the ring busaddress 705 * and switch the "enable" bit on. 706 */ 707static int dmacontroller_setup(struct b43legacy_dmaring *ring) 708{ 709 int err = 0; 710 u32 value; 711 u32 addrext; 712 u32 trans = ring->dev->dma.translation; 713 714 if (ring->tx) { 715 if (ring->type == B43legacy_DMA_64BIT) { 716 u64 ringbase = (u64)(ring->dmabase); 717 718 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 719 >> SSB_DMA_TRANSLATION_SHIFT; 720 value = B43legacy_DMA64_TXENABLE; 721 value |= (addrext << B43legacy_DMA64_TXADDREXT_SHIFT) 722 & B43legacy_DMA64_TXADDREXT_MASK; 723 b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, 724 value); 725 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 726 (ringbase & 0xFFFFFFFF)); 727 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 728 ((ringbase >> 32) 729 & ~SSB_DMA_TRANSLATION_MASK) 730 | trans); 731 } else { 732 u32 ringbase = (u32)(ring->dmabase); 733 734 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) 735 >> SSB_DMA_TRANSLATION_SHIFT; 736 value = B43legacy_DMA32_TXENABLE; 737 value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT) 738 & B43legacy_DMA32_TXADDREXT_MASK; 739 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, 740 value); 741 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 742 (ringbase & 743 ~SSB_DMA_TRANSLATION_MASK) 744 | trans); 745 } 746 } else { 747 err = alloc_initial_descbuffers(ring); 748 if (err) 749 goto out; 750 if (ring->type == B43legacy_DMA_64BIT) { 751 u64 ringbase = (u64)(ring->dmabase); 752 753 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 754 >> SSB_DMA_TRANSLATION_SHIFT; 755 value = (ring->frameoffset << 756 B43legacy_DMA64_RXFROFF_SHIFT); 757 value |= B43legacy_DMA64_RXENABLE; 758 value |= (addrext << B43legacy_DMA64_RXADDREXT_SHIFT) 759 & B43legacy_DMA64_RXADDREXT_MASK; 760 b43legacy_dma_write(ring, B43legacy_DMA64_RXCTL, 761 value); 762 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 763 (ringbase & 0xFFFFFFFF)); 764 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 765 ((ringbase >> 32) & 766 ~SSB_DMA_TRANSLATION_MASK) | 767 trans); 768 b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX, 769 200); 770 } else { 771 u32 ringbase = (u32)(ring->dmabase); 772 773 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK) 774 >> SSB_DMA_TRANSLATION_SHIFT; 775 value = (ring->frameoffset << 776 B43legacy_DMA32_RXFROFF_SHIFT); 777 value |= B43legacy_DMA32_RXENABLE; 778 value |= (addrext << 779 B43legacy_DMA32_RXADDREXT_SHIFT) 780 & B43legacy_DMA32_RXADDREXT_MASK; 781 b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, 782 value); 783 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 784 (ringbase & 785 ~SSB_DMA_TRANSLATION_MASK) 786 | trans); 787 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 788 200); 789 } 790 } 791 792out: 793 return err; 794} 795 796/* Shutdown the DMA controller. */ 797static void dmacontroller_cleanup(struct b43legacy_dmaring *ring) 798{ 799 if (ring->tx) { 800 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, 801 ring->type); 802 if (ring->type == B43legacy_DMA_64BIT) { 803 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0); 804 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0); 805 } else 806 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0); 807 } else { 808 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, 809 ring->type); 810 if (ring->type == B43legacy_DMA_64BIT) { 811 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0); 812 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0); 813 } else 814 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0); 815 } 816} 817 818static void free_all_descbuffers(struct b43legacy_dmaring *ring) 819{ 820 struct b43legacy_dmadesc_meta *meta; 821 int i; 822 823 if (!ring->used_slots) 824 return; 825 for (i = 0; i < ring->nr_slots; i++) { 826 ring->ops->idx2desc(ring, i, &meta); 827 828 if (!meta->skb) { 829 B43legacy_WARN_ON(!ring->tx); 830 continue; 831 } 832 if (ring->tx) 833 unmap_descbuffer(ring, meta->dmaaddr, 834 meta->skb->len, 1); 835 else 836 unmap_descbuffer(ring, meta->dmaaddr, 837 ring->rx_buffersize, 0); 838 free_descriptor_buffer(ring, meta, 0); 839 } 840} 841 842static u64 supported_dma_mask(struct b43legacy_wldev *dev) 843{ 844 u32 tmp; 845 u16 mmio_base; 846 847 tmp = b43legacy_read32(dev, SSB_TMSHIGH); 848 if (tmp & SSB_TMSHIGH_DMA64) 849 return DMA_BIT_MASK(64); 850 mmio_base = b43legacy_dmacontroller_base(0, 0); 851 b43legacy_write32(dev, 852 mmio_base + B43legacy_DMA32_TXCTL, 853 B43legacy_DMA32_TXADDREXT_MASK); 854 tmp = b43legacy_read32(dev, mmio_base + 855 B43legacy_DMA32_TXCTL); 856 if (tmp & B43legacy_DMA32_TXADDREXT_MASK) 857 return DMA_BIT_MASK(32); 858 859 return DMA_BIT_MASK(30); 860} 861 862static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask) 863{ 864 if (dmamask == DMA_BIT_MASK(30)) 865 return B43legacy_DMA_30BIT; 866 if (dmamask == DMA_BIT_MASK(32)) 867 return B43legacy_DMA_32BIT; 868 if (dmamask == DMA_BIT_MASK(64)) 869 return B43legacy_DMA_64BIT; 870 B43legacy_WARN_ON(1); 871 return B43legacy_DMA_30BIT; 872} 873 874/* Main initialization function. */ 875static 876struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, 877 int controller_index, 878 int for_tx, 879 enum b43legacy_dmatype type) 880{ 881 struct b43legacy_dmaring *ring; 882 int err; 883 int nr_slots; 884 dma_addr_t dma_test; 885 886 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 887 if (!ring) 888 goto out; 889 ring->type = type; 890 ring->dev = dev; 891 892 nr_slots = B43legacy_RXRING_SLOTS; 893 if (for_tx) 894 nr_slots = B43legacy_TXRING_SLOTS; 895 896 ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta), 897 GFP_KERNEL); 898 if (!ring->meta) 899 goto err_kfree_ring; 900 if (for_tx) { 901 ring->txhdr_cache = kcalloc(nr_slots, 902 sizeof(struct b43legacy_txhdr_fw3), 903 GFP_KERNEL); 904 if (!ring->txhdr_cache) 905 goto err_kfree_meta; 906 907 /* test for ability to dma to txhdr_cache */ 908 dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, 909 sizeof(struct b43legacy_txhdr_fw3), 910 DMA_TO_DEVICE); 911 912 if (b43legacy_dma_mapping_error(ring, dma_test, 913 sizeof(struct b43legacy_txhdr_fw3), 1)) { 914 /* ugh realloc */ 915 kfree(ring->txhdr_cache); 916 ring->txhdr_cache = kcalloc(nr_slots, 917 sizeof(struct b43legacy_txhdr_fw3), 918 GFP_KERNEL | GFP_DMA); 919 if (!ring->txhdr_cache) 920 goto err_kfree_meta; 921 922 dma_test = dma_map_single(dev->dev->dma_dev, 923 ring->txhdr_cache, 924 sizeof(struct b43legacy_txhdr_fw3), 925 DMA_TO_DEVICE); 926 927 if (b43legacy_dma_mapping_error(ring, dma_test, 928 sizeof(struct b43legacy_txhdr_fw3), 1)) 929 goto err_kfree_txhdr_cache; 930 } 931 932 dma_unmap_single(dev->dev->dma_dev, dma_test, 933 sizeof(struct b43legacy_txhdr_fw3), 934 DMA_TO_DEVICE); 935 } 936 937 ring->nr_slots = nr_slots; 938 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); 939 ring->index = controller_index; 940 if (type == B43legacy_DMA_64BIT) 941 ring->ops = &dma64_ops; 942 else 943 ring->ops = &dma32_ops; 944 if (for_tx) { 945 ring->tx = 1; 946 ring->current_slot = -1; 947 } else { 948 if (ring->index == 0) { 949 ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE; 950 ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET; 951 } else if (ring->index == 3) { 952 ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE; 953 ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET; 954 } else 955 B43legacy_WARN_ON(1); 956 } 957 spin_lock_init(&ring->lock); 958#ifdef CONFIG_B43LEGACY_DEBUG 959 ring->last_injected_overflow = jiffies; 960#endif 961 962 err = alloc_ringmemory(ring); 963 if (err) 964 goto err_kfree_txhdr_cache; 965 err = dmacontroller_setup(ring); 966 if (err) 967 goto err_free_ringmemory; 968 969out: 970 return ring; 971 972err_free_ringmemory: 973 free_ringmemory(ring); 974err_kfree_txhdr_cache: 975 kfree(ring->txhdr_cache); 976err_kfree_meta: 977 kfree(ring->meta); 978err_kfree_ring: 979 kfree(ring); 980 ring = NULL; 981 goto out; 982} 983 984/* Main cleanup function. */ 985static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring) 986{ 987 if (!ring) 988 return; 989 990 b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:" 991 " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base, 992 (ring->tx) ? "TX" : "RX", ring->max_used_slots, 993 ring->nr_slots); 994 /* Device IRQs are disabled prior entering this function, 995 * so no need to take care of concurrency with rx handler stuff. 996 */ 997 dmacontroller_cleanup(ring); 998 free_all_descbuffers(ring); 999 free_ringmemory(ring); 1000 1001 kfree(ring->txhdr_cache); 1002 kfree(ring->meta); 1003 kfree(ring); 1004} 1005 1006void b43legacy_dma_free(struct b43legacy_wldev *dev) 1007{ 1008 struct b43legacy_dma *dma; 1009 1010 if (b43legacy_using_pio(dev)) 1011 return; 1012 dma = &dev->dma; 1013 1014 b43legacy_destroy_dmaring(dma->rx_ring3); 1015 dma->rx_ring3 = NULL; 1016 b43legacy_destroy_dmaring(dma->rx_ring0); 1017 dma->rx_ring0 = NULL; 1018 1019 b43legacy_destroy_dmaring(dma->tx_ring5); 1020 dma->tx_ring5 = NULL; 1021 b43legacy_destroy_dmaring(dma->tx_ring4); 1022 dma->tx_ring4 = NULL; 1023 b43legacy_destroy_dmaring(dma->tx_ring3); 1024 dma->tx_ring3 = NULL; 1025 b43legacy_destroy_dmaring(dma->tx_ring2); 1026 dma->tx_ring2 = NULL; 1027 b43legacy_destroy_dmaring(dma->tx_ring1); 1028 dma->tx_ring1 = NULL; 1029 b43legacy_destroy_dmaring(dma->tx_ring0); 1030 dma->tx_ring0 = NULL; 1031} 1032 1033static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask) 1034{ 1035 u64 orig_mask = mask; 1036 bool fallback = 0; 1037 int err; 1038 1039 /* Try to set the DMA mask. If it fails, try falling back to a 1040 * lower mask, as we can always also support a lower one. */ 1041 while (1) { 1042 err = dma_set_mask(dev->dev->dma_dev, mask); 1043 if (!err) { 1044 err = dma_set_coherent_mask(dev->dev->dma_dev, mask); 1045 if (!err) 1046 break; 1047 } 1048 if (mask == DMA_BIT_MASK(64)) { 1049 mask = DMA_BIT_MASK(32); 1050 fallback = 1; 1051 continue; 1052 } 1053 if (mask == DMA_BIT_MASK(32)) { 1054 mask = DMA_BIT_MASK(30); 1055 fallback = 1; 1056 continue; 1057 } 1058 b43legacyerr(dev->wl, "The machine/kernel does not support " 1059 "the required %u-bit DMA mask\n", 1060 (unsigned int)dma_mask_to_engine_type(orig_mask)); 1061 return -EOPNOTSUPP; 1062 } 1063 if (fallback) { 1064 b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-" 1065 "bit\n", 1066 (unsigned int)dma_mask_to_engine_type(orig_mask), 1067 (unsigned int)dma_mask_to_engine_type(mask)); 1068 } 1069 1070 return 0; 1071} 1072 1073int b43legacy_dma_init(struct b43legacy_wldev *dev) 1074{ 1075 struct b43legacy_dma *dma = &dev->dma; 1076 struct b43legacy_dmaring *ring; 1077 int err; 1078 u64 dmamask; 1079 enum b43legacy_dmatype type; 1080 1081 dmamask = supported_dma_mask(dev); 1082 type = dma_mask_to_engine_type(dmamask); 1083 err = b43legacy_dma_set_mask(dev, dmamask); 1084 if (err) { 1085#ifdef CONFIG_B43LEGACY_PIO 1086 b43legacywarn(dev->wl, "DMA for this device not supported. " 1087 "Falling back to PIO\n"); 1088 dev->__using_pio = 1; 1089 return -EAGAIN; 1090#else 1091 b43legacyerr(dev->wl, "DMA for this device not supported and " 1092 "no PIO support compiled in\n"); 1093 return -EOPNOTSUPP; 1094#endif 1095 } 1096 dma->translation = ssb_dma_translation(dev->dev); 1097 1098 err = -ENOMEM; 1099 /* setup TX DMA channels. */ 1100 ring = b43legacy_setup_dmaring(dev, 0, 1, type); 1101 if (!ring) 1102 goto out; 1103 dma->tx_ring0 = ring; 1104 1105 ring = b43legacy_setup_dmaring(dev, 1, 1, type); 1106 if (!ring) 1107 goto err_destroy_tx0; 1108 dma->tx_ring1 = ring; 1109 1110 ring = b43legacy_setup_dmaring(dev, 2, 1, type); 1111 if (!ring) 1112 goto err_destroy_tx1; 1113 dma->tx_ring2 = ring; 1114 1115 ring = b43legacy_setup_dmaring(dev, 3, 1, type); 1116 if (!ring) 1117 goto err_destroy_tx2; 1118 dma->tx_ring3 = ring; 1119 1120 ring = b43legacy_setup_dmaring(dev, 4, 1, type); 1121 if (!ring) 1122 goto err_destroy_tx3; 1123 dma->tx_ring4 = ring; 1124 1125 ring = b43legacy_setup_dmaring(dev, 5, 1, type); 1126 if (!ring) 1127 goto err_destroy_tx4; 1128 dma->tx_ring5 = ring; 1129 1130 /* setup RX DMA channels. */ 1131 ring = b43legacy_setup_dmaring(dev, 0, 0, type); 1132 if (!ring) 1133 goto err_destroy_tx5; 1134 dma->rx_ring0 = ring; 1135 1136 if (dev->dev->id.revision < 5) { 1137 ring = b43legacy_setup_dmaring(dev, 3, 0, type); 1138 if (!ring) 1139 goto err_destroy_rx0; 1140 dma->rx_ring3 = ring; 1141 } 1142 1143 b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); 1144 err = 0; 1145out: 1146 return err; 1147 1148err_destroy_rx0: 1149 b43legacy_destroy_dmaring(dma->rx_ring0); 1150 dma->rx_ring0 = NULL; 1151err_destroy_tx5: 1152 b43legacy_destroy_dmaring(dma->tx_ring5); 1153 dma->tx_ring5 = NULL; 1154err_destroy_tx4: 1155 b43legacy_destroy_dmaring(dma->tx_ring4); 1156 dma->tx_ring4 = NULL; 1157err_destroy_tx3: 1158 b43legacy_destroy_dmaring(dma->tx_ring3); 1159 dma->tx_ring3 = NULL; 1160err_destroy_tx2: 1161 b43legacy_destroy_dmaring(dma->tx_ring2); 1162 dma->tx_ring2 = NULL; 1163err_destroy_tx1: 1164 b43legacy_destroy_dmaring(dma->tx_ring1); 1165 dma->tx_ring1 = NULL; 1166err_destroy_tx0: 1167 b43legacy_destroy_dmaring(dma->tx_ring0); 1168 dma->tx_ring0 = NULL; 1169 goto out; 1170} 1171 1172/* Generate a cookie for the TX header. */ 1173static u16 generate_cookie(struct b43legacy_dmaring *ring, 1174 int slot) 1175{ 1176 u16 cookie = 0x1000; 1177 1178 /* Use the upper 4 bits of the cookie as 1179 * DMA controller ID and store the slot number 1180 * in the lower 12 bits. 1181 * Note that the cookie must never be 0, as this 1182 * is a special value used in RX path. 1183 */ 1184 switch (ring->index) { 1185 case 0: 1186 cookie = 0xA000; 1187 break; 1188 case 1: 1189 cookie = 0xB000; 1190 break; 1191 case 2: 1192 cookie = 0xC000; 1193 break; 1194 case 3: 1195 cookie = 0xD000; 1196 break; 1197 case 4: 1198 cookie = 0xE000; 1199 break; 1200 case 5: 1201 cookie = 0xF000; 1202 break; 1203 } 1204 B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000)); 1205 cookie |= (u16)slot; 1206 1207 return cookie; 1208} 1209 1210/* Inspect a cookie and find out to which controller/slot it belongs. */ 1211static 1212struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev, 1213 u16 cookie, int *slot) 1214{ 1215 struct b43legacy_dma *dma = &dev->dma; 1216 struct b43legacy_dmaring *ring = NULL; 1217 1218 switch (cookie & 0xF000) { 1219 case 0xA000: 1220 ring = dma->tx_ring0; 1221 break; 1222 case 0xB000: 1223 ring = dma->tx_ring1; 1224 break; 1225 case 0xC000: 1226 ring = dma->tx_ring2; 1227 break; 1228 case 0xD000: 1229 ring = dma->tx_ring3; 1230 break; 1231 case 0xE000: 1232 ring = dma->tx_ring4; 1233 break; 1234 case 0xF000: 1235 ring = dma->tx_ring5; 1236 break; 1237 default: 1238 B43legacy_WARN_ON(1); 1239 } 1240 *slot = (cookie & 0x0FFF); 1241 B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); 1242 1243 return ring; 1244} 1245 1246static int dma_tx_fragment(struct b43legacy_dmaring *ring, 1247 struct sk_buff **in_skb) 1248{ 1249 struct sk_buff *skb = *in_skb; 1250 const struct b43legacy_dma_ops *ops = ring->ops; 1251 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1252 u8 *header; 1253 int slot, old_top_slot, old_used_slots; 1254 int err; 1255 struct b43legacy_dmadesc_generic *desc; 1256 struct b43legacy_dmadesc_meta *meta; 1257 struct b43legacy_dmadesc_meta *meta_hdr; 1258 struct sk_buff *bounce_skb; 1259 1260#define SLOTS_PER_PACKET 2 1261 B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0); 1262 1263 old_top_slot = ring->current_slot; 1264 old_used_slots = ring->used_slots; 1265 1266 /* Get a slot for the header. */ 1267 slot = request_slot(ring); 1268 desc = ops->idx2desc(ring, slot, &meta_hdr); 1269 memset(meta_hdr, 0, sizeof(*meta_hdr)); 1270 1271 header = &(ring->txhdr_cache[slot * sizeof( 1272 struct b43legacy_txhdr_fw3)]); 1273 err = b43legacy_generate_txhdr(ring->dev, header, 1274 skb->data, skb->len, info, 1275 generate_cookie(ring, slot)); 1276 if (unlikely(err)) { 1277 ring->current_slot = old_top_slot; 1278 ring->used_slots = old_used_slots; 1279 return err; 1280 } 1281 1282 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header, 1283 sizeof(struct b43legacy_txhdr_fw3), 1); 1284 if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr, 1285 sizeof(struct b43legacy_txhdr_fw3), 1)) { 1286 ring->current_slot = old_top_slot; 1287 ring->used_slots = old_used_slots; 1288 return -EIO; 1289 } 1290 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, 1291 sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0); 1292 1293 /* Get a slot for the payload. */ 1294 slot = request_slot(ring); 1295 desc = ops->idx2desc(ring, slot, &meta); 1296 memset(meta, 0, sizeof(*meta)); 1297 1298 meta->skb = skb; 1299 meta->is_last_fragment = 1; 1300 1301 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1302 /* create a bounce buffer in zone_dma on mapping failure. */ 1303 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1304 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 1305 if (!bounce_skb) { 1306 ring->current_slot = old_top_slot; 1307 ring->used_slots = old_used_slots; 1308 err = -ENOMEM; 1309 goto out_unmap_hdr; 1310 } 1311 1312 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); 1313 memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb)); 1314 bounce_skb->dev = skb->dev; 1315 skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb)); 1316 info = IEEE80211_SKB_CB(bounce_skb); 1317 1318 dev_kfree_skb_any(skb); 1319 skb = bounce_skb; 1320 *in_skb = bounce_skb; 1321 meta->skb = skb; 1322 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1323 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1324 ring->current_slot = old_top_slot; 1325 ring->used_slots = old_used_slots; 1326 err = -EIO; 1327 goto out_free_bounce; 1328 } 1329 } 1330 1331 ops->fill_descriptor(ring, desc, meta->dmaaddr, 1332 skb->len, 0, 1, 1); 1333 1334 wmb(); /* previous stuff MUST be done */ 1335 /* Now transfer the whole frame. */ 1336 ops->poke_tx(ring, next_slot(ring, slot)); 1337 return 0; 1338 1339out_free_bounce: 1340 dev_kfree_skb_any(skb); 1341out_unmap_hdr: 1342 unmap_descbuffer(ring, meta_hdr->dmaaddr, 1343 sizeof(struct b43legacy_txhdr_fw3), 1); 1344 return err; 1345} 1346 1347static inline 1348int should_inject_overflow(struct b43legacy_dmaring *ring) 1349{ 1350#ifdef CONFIG_B43LEGACY_DEBUG 1351 if (unlikely(b43legacy_debug(ring->dev, 1352 B43legacy_DBG_DMAOVERFLOW))) { 1353 /* Check if we should inject another ringbuffer overflow 1354 * to test handling of this situation in the stack. */ 1355 unsigned long next_overflow; 1356 1357 next_overflow = ring->last_injected_overflow + HZ; 1358 if (time_after(jiffies, next_overflow)) { 1359 ring->last_injected_overflow = jiffies; 1360 b43legacydbg(ring->dev->wl, 1361 "Injecting TX ring overflow on " 1362 "DMA controller %d\n", ring->index); 1363 return 1; 1364 } 1365 } 1366#endif /* CONFIG_B43LEGACY_DEBUG */ 1367 return 0; 1368} 1369 1370int b43legacy_dma_tx(struct b43legacy_wldev *dev, 1371 struct sk_buff *skb) 1372{ 1373 struct b43legacy_dmaring *ring; 1374 int err = 0; 1375 unsigned long flags; 1376 1377 ring = priority_to_txring(dev, skb_get_queue_mapping(skb)); 1378 spin_lock_irqsave(&ring->lock, flags); 1379 B43legacy_WARN_ON(!ring->tx); 1380 1381 if (unlikely(ring->stopped)) { 1382 /* We get here only because of a bug in mac80211. 1383 * Because of a race, one packet may be queued after 1384 * the queue is stopped, thus we got called when we shouldn't. 1385 * For now, just refuse the transmit. */ 1386 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) 1387 b43legacyerr(dev->wl, "Packet after queue stopped\n"); 1388 err = -ENOSPC; 1389 goto out_unlock; 1390 } 1391 1392 if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) { 1393 /* If we get here, we have a real error with the queue 1394 * full, but queues not stopped. */ 1395 b43legacyerr(dev->wl, "DMA queue overflow\n"); 1396 err = -ENOSPC; 1397 goto out_unlock; 1398 } 1399 1400 /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing 1401 * into the skb data or cb now. */ 1402 err = dma_tx_fragment(ring, &skb); 1403 if (unlikely(err == -ENOKEY)) { 1404 /* Drop this packet, as we don't have the encryption key 1405 * anymore and must not transmit it unencrypted. */ 1406 dev_kfree_skb_any(skb); 1407 err = 0; 1408 goto out_unlock; 1409 } 1410 if (unlikely(err)) { 1411 b43legacyerr(dev->wl, "DMA tx mapping failure\n"); 1412 goto out_unlock; 1413 } 1414 if ((free_slots(ring) < SLOTS_PER_PACKET) || 1415 should_inject_overflow(ring)) { 1416 /* This TX ring is full. */ 1417 ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); 1418 ring->stopped = 1; 1419 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) 1420 b43legacydbg(dev->wl, "Stopped TX ring %d\n", 1421 ring->index); 1422 } 1423out_unlock: 1424 spin_unlock_irqrestore(&ring->lock, flags); 1425 1426 return err; 1427} 1428 1429void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, 1430 const struct b43legacy_txstatus *status) 1431{ 1432 const struct b43legacy_dma_ops *ops; 1433 struct b43legacy_dmaring *ring; 1434 struct b43legacy_dmadesc_meta *meta; 1435 int retry_limit; 1436 int slot; 1437 1438 ring = parse_cookie(dev, status->cookie, &slot); 1439 if (unlikely(!ring)) 1440 return; 1441 B43legacy_WARN_ON(!irqs_disabled()); 1442 spin_lock(&ring->lock); 1443 1444 B43legacy_WARN_ON(!ring->tx); 1445 ops = ring->ops; 1446 while (1) { 1447 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 1448 ops->idx2desc(ring, slot, &meta); 1449 1450 if (meta->skb) 1451 unmap_descbuffer(ring, meta->dmaaddr, 1452 meta->skb->len, 1); 1453 else 1454 unmap_descbuffer(ring, meta->dmaaddr, 1455 sizeof(struct b43legacy_txhdr_fw3), 1456 1); 1457 1458 if (meta->is_last_fragment) { 1459 struct ieee80211_tx_info *info; 1460 BUG_ON(!meta->skb); 1461 info = IEEE80211_SKB_CB(meta->skb); 1462 1463 /* preserve the confiured retry limit before clearing the status 1464 * The xmit function has overwritten the rc's value with the actual 1465 * retry limit done by the hardware */ 1466 retry_limit = info->status.rates[0].count; 1467 ieee80211_tx_info_clear_status(info); 1468 1469 if (status->acked) 1470 info->flags |= IEEE80211_TX_STAT_ACK; 1471 1472 if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) { 1473 /* 1474 * If the short retries (RTS, not data frame) have exceeded 1475 * the limit, the hw will not have tried the selected rate, 1476 * but will have used the fallback rate instead. 1477 * Don't let the rate control count attempts for the selected 1478 * rate in this case, otherwise the statistics will be off. 1479 */ 1480 info->status.rates[0].count = 0; 1481 info->status.rates[1].count = status->frame_count; 1482 } else { 1483 if (status->frame_count > retry_limit) { 1484 info->status.rates[0].count = retry_limit; 1485 info->status.rates[1].count = status->frame_count - 1486 retry_limit; 1487 1488 } else { 1489 info->status.rates[0].count = status->frame_count; 1490 info->status.rates[1].idx = -1; 1491 } 1492 } 1493 1494 /* Call back to inform the ieee80211 subsystem about the 1495 * status of the transmission. 1496 * Some fields of txstat are already filled in dma_tx(). 1497 */ 1498 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb); 1499 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1500 meta->skb = NULL; 1501 } else { 1502 /* No need to call free_descriptor_buffer here, as 1503 * this is only the txhdr, which is not allocated. 1504 */ 1505 B43legacy_WARN_ON(meta->skb != NULL); 1506 } 1507 1508 /* Everything unmapped and free'd. So it's not used anymore. */ 1509 ring->used_slots--; 1510 1511 if (meta->is_last_fragment) 1512 break; 1513 slot = next_slot(ring, slot); 1514 } 1515 dev->stats.last_tx = jiffies; 1516 if (ring->stopped) { 1517 B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); 1518 ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); 1519 ring->stopped = 0; 1520 if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE)) 1521 b43legacydbg(dev->wl, "Woke up TX ring %d\n", 1522 ring->index); 1523 } 1524 1525 spin_unlock(&ring->lock); 1526} 1527 1528static void dma_rx(struct b43legacy_dmaring *ring, 1529 int *slot) 1530{ 1531 const struct b43legacy_dma_ops *ops = ring->ops; 1532 struct b43legacy_dmadesc_generic *desc; 1533 struct b43legacy_dmadesc_meta *meta; 1534 struct b43legacy_rxhdr_fw3 *rxhdr; 1535 struct sk_buff *skb; 1536 u16 len; 1537 int err; 1538 dma_addr_t dmaaddr; 1539 1540 desc = ops->idx2desc(ring, *slot, &meta); 1541 1542 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); 1543 skb = meta->skb; 1544 1545 if (ring->index == 3) { 1546 /* We received an xmit status. */ 1547 struct b43legacy_hwtxstatus *hw = 1548 (struct b43legacy_hwtxstatus *)skb->data; 1549 int i = 0; 1550 1551 while (hw->cookie == 0) { 1552 if (i > 100) 1553 break; 1554 i++; 1555 udelay(2); 1556 barrier(); 1557 } 1558 b43legacy_handle_hwtxstatus(ring->dev, hw); 1559 /* recycle the descriptor buffer. */ 1560 sync_descbuffer_for_device(ring, meta->dmaaddr, 1561 ring->rx_buffersize); 1562 1563 return; 1564 } 1565 rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data; 1566 len = le16_to_cpu(rxhdr->frame_len); 1567 if (len == 0) { 1568 int i = 0; 1569 1570 do { 1571 udelay(2); 1572 barrier(); 1573 len = le16_to_cpu(rxhdr->frame_len); 1574 } while (len == 0 && i++ < 5); 1575 if (unlikely(len == 0)) { 1576 /* recycle the descriptor buffer. */ 1577 sync_descbuffer_for_device(ring, meta->dmaaddr, 1578 ring->rx_buffersize); 1579 goto drop; 1580 } 1581 } 1582 if (unlikely(len > ring->rx_buffersize)) { 1583 /* The data did not fit into one descriptor buffer 1584 * and is split over multiple buffers. 1585 * This should never happen, as we try to allocate buffers 1586 * big enough. So simply ignore this packet. 1587 */ 1588 int cnt = 0; 1589 s32 tmp = len; 1590 1591 while (1) { 1592 desc = ops->idx2desc(ring, *slot, &meta); 1593 /* recycle the descriptor buffer. */ 1594 sync_descbuffer_for_device(ring, meta->dmaaddr, 1595 ring->rx_buffersize); 1596 *slot = next_slot(ring, *slot); 1597 cnt++; 1598 tmp -= ring->rx_buffersize; 1599 if (tmp <= 0) 1600 break; 1601 } 1602 b43legacyerr(ring->dev->wl, "DMA RX buffer too small " 1603 "(len: %u, buffer: %u, nr-dropped: %d)\n", 1604 len, ring->rx_buffersize, cnt); 1605 goto drop; 1606 } 1607 1608 dmaaddr = meta->dmaaddr; 1609 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); 1610 if (unlikely(err)) { 1611 b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()" 1612 " failed\n"); 1613 sync_descbuffer_for_device(ring, dmaaddr, 1614 ring->rx_buffersize); 1615 goto drop; 1616 } 1617 1618 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); 1619 skb_put(skb, len + ring->frameoffset); 1620 skb_pull(skb, ring->frameoffset); 1621 1622 b43legacy_rx(ring->dev, skb, rxhdr); 1623drop: 1624 return; 1625} 1626 1627void b43legacy_dma_rx(struct b43legacy_dmaring *ring) 1628{ 1629 const struct b43legacy_dma_ops *ops = ring->ops; 1630 int slot; 1631 int current_slot; 1632 int used_slots = 0; 1633 1634 B43legacy_WARN_ON(ring->tx); 1635 current_slot = ops->get_current_rxslot(ring); 1636 B43legacy_WARN_ON(!(current_slot >= 0 && current_slot < 1637 ring->nr_slots)); 1638 1639 slot = ring->current_slot; 1640 for (; slot != current_slot; slot = next_slot(ring, slot)) { 1641 dma_rx(ring, &slot); 1642 update_max_used_slots(ring, ++used_slots); 1643 } 1644 ops->set_current_rxslot(ring, slot); 1645 ring->current_slot = slot; 1646} 1647 1648static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring) 1649{ 1650 unsigned long flags; 1651 1652 spin_lock_irqsave(&ring->lock, flags); 1653 B43legacy_WARN_ON(!ring->tx); 1654 ring->ops->tx_suspend(ring); 1655 spin_unlock_irqrestore(&ring->lock, flags); 1656} 1657 1658static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring) 1659{ 1660 unsigned long flags; 1661 1662 spin_lock_irqsave(&ring->lock, flags); 1663 B43legacy_WARN_ON(!ring->tx); 1664 ring->ops->tx_resume(ring); 1665 spin_unlock_irqrestore(&ring->lock, flags); 1666} 1667 1668void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev) 1669{ 1670 b43legacy_power_saving_ctl_bits(dev, -1, 1); 1671 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0); 1672 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1); 1673 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2); 1674 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3); 1675 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4); 1676 b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5); 1677} 1678 1679void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev) 1680{ 1681 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5); 1682 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4); 1683 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3); 1684 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2); 1685 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1); 1686 b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0); 1687 b43legacy_power_saving_ctl_bits(dev, -1, -1); 1688}