/drivers/net/qla3xxx.c
C | 3970 lines | 3021 code | 608 blank | 341 comment | 417 complexity | 4888bbc497e2b144d18a0d09d967762a MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
Large files files are truncated, but you can click here to view the full file
1/* 2 * QLogic QLA3xxx NIC HBA Driver 3 * Copyright (c) 2003-2006 QLogic Corporation 4 * 5 * See LICENSE.qla3xxx for copyright and licensing details. 6 */ 7 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10#include <linux/kernel.h> 11#include <linux/init.h> 12#include <linux/types.h> 13#include <linux/module.h> 14#include <linux/list.h> 15#include <linux/pci.h> 16#include <linux/dma-mapping.h> 17#include <linux/sched.h> 18#include <linux/slab.h> 19#include <linux/dmapool.h> 20#include <linux/mempool.h> 21#include <linux/spinlock.h> 22#include <linux/kthread.h> 23#include <linux/interrupt.h> 24#include <linux/errno.h> 25#include <linux/ioport.h> 26#include <linux/ip.h> 27#include <linux/in.h> 28#include <linux/if_arp.h> 29#include <linux/if_ether.h> 30#include <linux/netdevice.h> 31#include <linux/etherdevice.h> 32#include <linux/ethtool.h> 33#include <linux/skbuff.h> 34#include <linux/rtnetlink.h> 35#include <linux/if_vlan.h> 36#include <linux/delay.h> 37#include <linux/mm.h> 38#include <linux/prefetch.h> 39 40#include "qla3xxx.h" 41 42#define DRV_NAME "qla3xxx" 43#define DRV_STRING "QLogic ISP3XXX Network Driver" 44#define DRV_VERSION "v2.03.00-k5" 45 46static const char ql3xxx_driver_name[] = DRV_NAME; 47static const char ql3xxx_driver_version[] = DRV_VERSION; 48 49#define TIMED_OUT_MSG \ 50"Timed out waiting for management port to get free before issuing command\n" 51 52MODULE_AUTHOR("QLogic Corporation"); 53MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); 54MODULE_LICENSE("GPL"); 55MODULE_VERSION(DRV_VERSION); 56 57static const u32 default_msg 58 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 59 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; 60 61static int debug = -1; /* defaults above */ 62module_param(debug, int, 0); 63MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 64 65static int msi; 66module_param(msi, int, 0); 67MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); 68 69static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = { 70 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 71 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, 72 /* required last entry */ 73 {0,} 74}; 75 76MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); 77 78/* 79 * These are the known PHY's which are used 80 */ 81enum PHY_DEVICE_TYPE { 82 PHY_TYPE_UNKNOWN = 0, 83 PHY_VITESSE_VSC8211, 84 PHY_AGERE_ET1011C, 85 MAX_PHY_DEV_TYPES 86}; 87 88struct PHY_DEVICE_INFO { 89 const enum PHY_DEVICE_TYPE phyDevice; 90 const u32 phyIdOUI; 91 const u16 phyIdModel; 92 const char *name; 93}; 94 95static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { 96 {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, 97 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, 98 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, 99}; 100 101 102/* 103 * Caller must take hw_lock. 104 */ 105static int ql_sem_spinlock(struct ql3_adapter *qdev, 106 u32 sem_mask, u32 sem_bits) 107{ 108 struct ql3xxx_port_registers __iomem *port_regs = 109 qdev->mem_map_registers; 110 u32 value; 111 unsigned int seconds = 3; 112 113 do { 114 writel((sem_mask | sem_bits), 115 &port_regs->CommonRegs.semaphoreReg); 116 value = readl(&port_regs->CommonRegs.semaphoreReg); 117 if ((value & (sem_mask >> 16)) == sem_bits) 118 return 0; 119 ssleep(1); 120 } while (--seconds); 121 return -1; 122} 123 124static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) 125{ 126 struct ql3xxx_port_registers __iomem *port_regs = 127 qdev->mem_map_registers; 128 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); 129 readl(&port_regs->CommonRegs.semaphoreReg); 130} 131 132static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) 133{ 134 struct ql3xxx_port_registers __iomem *port_regs = 135 qdev->mem_map_registers; 136 u32 value; 137 138 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); 139 value = readl(&port_regs->CommonRegs.semaphoreReg); 140 return ((value & (sem_mask >> 16)) == sem_bits); 141} 142 143/* 144 * Caller holds hw_lock. 145 */ 146static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) 147{ 148 int i = 0; 149 150 while (i < 10) { 151 if (i) 152 ssleep(1); 153 154 if (ql_sem_lock(qdev, 155 QL_DRVR_SEM_MASK, 156 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 157 * 2) << 1)) { 158 netdev_printk(KERN_DEBUG, qdev->ndev, 159 "driver lock acquired\n"); 160 return 1; 161 } 162 } 163 164 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); 165 return 0; 166} 167 168static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) 169{ 170 struct ql3xxx_port_registers __iomem *port_regs = 171 qdev->mem_map_registers; 172 173 writel(((ISP_CONTROL_NP_MASK << 16) | page), 174 &port_regs->CommonRegs.ispControlStatus); 175 readl(&port_regs->CommonRegs.ispControlStatus); 176 qdev->current_page = page; 177} 178 179static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 180{ 181 u32 value; 182 unsigned long hw_flags; 183 184 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 185 value = readl(reg); 186 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 187 188 return value; 189} 190 191static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 192{ 193 return readl(reg); 194} 195 196static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 197{ 198 u32 value; 199 unsigned long hw_flags; 200 201 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 202 203 if (qdev->current_page != 0) 204 ql_set_register_page(qdev, 0); 205 value = readl(reg); 206 207 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 208 return value; 209} 210 211static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 212{ 213 if (qdev->current_page != 0) 214 ql_set_register_page(qdev, 0); 215 return readl(reg); 216} 217 218static void ql_write_common_reg_l(struct ql3_adapter *qdev, 219 u32 __iomem *reg, u32 value) 220{ 221 unsigned long hw_flags; 222 223 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 224 writel(value, reg); 225 readl(reg); 226 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 227} 228 229static void ql_write_common_reg(struct ql3_adapter *qdev, 230 u32 __iomem *reg, u32 value) 231{ 232 writel(value, reg); 233 readl(reg); 234} 235 236static void ql_write_nvram_reg(struct ql3_adapter *qdev, 237 u32 __iomem *reg, u32 value) 238{ 239 writel(value, reg); 240 readl(reg); 241 udelay(1); 242} 243 244static void ql_write_page0_reg(struct ql3_adapter *qdev, 245 u32 __iomem *reg, u32 value) 246{ 247 if (qdev->current_page != 0) 248 ql_set_register_page(qdev, 0); 249 writel(value, reg); 250 readl(reg); 251} 252 253/* 254 * Caller holds hw_lock. Only called during init. 255 */ 256static void ql_write_page1_reg(struct ql3_adapter *qdev, 257 u32 __iomem *reg, u32 value) 258{ 259 if (qdev->current_page != 1) 260 ql_set_register_page(qdev, 1); 261 writel(value, reg); 262 readl(reg); 263} 264 265/* 266 * Caller holds hw_lock. Only called during init. 267 */ 268static void ql_write_page2_reg(struct ql3_adapter *qdev, 269 u32 __iomem *reg, u32 value) 270{ 271 if (qdev->current_page != 2) 272 ql_set_register_page(qdev, 2); 273 writel(value, reg); 274 readl(reg); 275} 276 277static void ql_disable_interrupts(struct ql3_adapter *qdev) 278{ 279 struct ql3xxx_port_registers __iomem *port_regs = 280 qdev->mem_map_registers; 281 282 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 283 (ISP_IMR_ENABLE_INT << 16)); 284 285} 286 287static void ql_enable_interrupts(struct ql3_adapter *qdev) 288{ 289 struct ql3xxx_port_registers __iomem *port_regs = 290 qdev->mem_map_registers; 291 292 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 293 ((0xff << 16) | ISP_IMR_ENABLE_INT)); 294 295} 296 297static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, 298 struct ql_rcv_buf_cb *lrg_buf_cb) 299{ 300 dma_addr_t map; 301 int err; 302 lrg_buf_cb->next = NULL; 303 304 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ 305 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; 306 } else { 307 qdev->lrg_buf_free_tail->next = lrg_buf_cb; 308 qdev->lrg_buf_free_tail = lrg_buf_cb; 309 } 310 311 if (!lrg_buf_cb->skb) { 312 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 313 qdev->lrg_buffer_len); 314 if (unlikely(!lrg_buf_cb->skb)) { 315 netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); 316 qdev->lrg_buf_skb_check++; 317 } else { 318 /* 319 * We save some space to copy the ethhdr from first 320 * buffer 321 */ 322 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 323 map = pci_map_single(qdev->pdev, 324 lrg_buf_cb->skb->data, 325 qdev->lrg_buffer_len - 326 QL_HEADER_SPACE, 327 PCI_DMA_FROMDEVICE); 328 err = pci_dma_mapping_error(qdev->pdev, map); 329 if (err) { 330 netdev_err(qdev->ndev, 331 "PCI mapping failed with error: %d\n", 332 err); 333 dev_kfree_skb(lrg_buf_cb->skb); 334 lrg_buf_cb->skb = NULL; 335 336 qdev->lrg_buf_skb_check++; 337 return; 338 } 339 340 lrg_buf_cb->buf_phy_addr_low = 341 cpu_to_le32(LS_64BITS(map)); 342 lrg_buf_cb->buf_phy_addr_high = 343 cpu_to_le32(MS_64BITS(map)); 344 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 345 dma_unmap_len_set(lrg_buf_cb, maplen, 346 qdev->lrg_buffer_len - 347 QL_HEADER_SPACE); 348 } 349 } 350 351 qdev->lrg_buf_free_count++; 352} 353 354static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter 355 *qdev) 356{ 357 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 358 359 if (lrg_buf_cb != NULL) { 360 qdev->lrg_buf_free_head = lrg_buf_cb->next; 361 if (qdev->lrg_buf_free_head == NULL) 362 qdev->lrg_buf_free_tail = NULL; 363 qdev->lrg_buf_free_count--; 364 } 365 366 return lrg_buf_cb; 367} 368 369static u32 addrBits = EEPROM_NO_ADDR_BITS; 370static u32 dataBits = EEPROM_NO_DATA_BITS; 371 372static void fm93c56a_deselect(struct ql3_adapter *qdev); 373static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, 374 unsigned short *value); 375 376/* 377 * Caller holds hw_lock. 378 */ 379static void fm93c56a_select(struct ql3_adapter *qdev) 380{ 381 struct ql3xxx_port_registers __iomem *port_regs = 382 qdev->mem_map_registers; 383 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 384 385 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 386 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 387 ql_write_nvram_reg(qdev, spir, 388 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); 389} 390 391/* 392 * Caller holds hw_lock. 393 */ 394static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) 395{ 396 int i; 397 u32 mask; 398 u32 dataBit; 399 u32 previousBit; 400 struct ql3xxx_port_registers __iomem *port_regs = 401 qdev->mem_map_registers; 402 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 403 404 /* Clock in a zero, then do the start bit */ 405 ql_write_nvram_reg(qdev, spir, 406 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 407 AUBURN_EEPROM_DO_1)); 408 ql_write_nvram_reg(qdev, spir, 409 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 410 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); 411 ql_write_nvram_reg(qdev, spir, 412 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 413 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); 414 415 mask = 1 << (FM93C56A_CMD_BITS - 1); 416 /* Force the previous data bit to be different */ 417 previousBit = 0xffff; 418 for (i = 0; i < FM93C56A_CMD_BITS; i++) { 419 dataBit = (cmd & mask) 420 ? AUBURN_EEPROM_DO_1 421 : AUBURN_EEPROM_DO_0; 422 if (previousBit != dataBit) { 423 /* If the bit changed, change the DO state to match */ 424 ql_write_nvram_reg(qdev, spir, 425 (ISP_NVRAM_MASK | 426 qdev->eeprom_cmd_data | dataBit)); 427 previousBit = dataBit; 428 } 429 ql_write_nvram_reg(qdev, spir, 430 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 431 dataBit | AUBURN_EEPROM_CLK_RISE)); 432 ql_write_nvram_reg(qdev, spir, 433 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 434 dataBit | AUBURN_EEPROM_CLK_FALL)); 435 cmd = cmd << 1; 436 } 437 438 mask = 1 << (addrBits - 1); 439 /* Force the previous data bit to be different */ 440 previousBit = 0xffff; 441 for (i = 0; i < addrBits; i++) { 442 dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 443 : AUBURN_EEPROM_DO_0; 444 if (previousBit != dataBit) { 445 /* 446 * If the bit changed, then change the DO state to 447 * match 448 */ 449 ql_write_nvram_reg(qdev, spir, 450 (ISP_NVRAM_MASK | 451 qdev->eeprom_cmd_data | dataBit)); 452 previousBit = dataBit; 453 } 454 ql_write_nvram_reg(qdev, spir, 455 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 456 dataBit | AUBURN_EEPROM_CLK_RISE)); 457 ql_write_nvram_reg(qdev, spir, 458 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 459 dataBit | AUBURN_EEPROM_CLK_FALL)); 460 eepromAddr = eepromAddr << 1; 461 } 462} 463 464/* 465 * Caller holds hw_lock. 466 */ 467static void fm93c56a_deselect(struct ql3_adapter *qdev) 468{ 469 struct ql3xxx_port_registers __iomem *port_regs = 470 qdev->mem_map_registers; 471 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 472 473 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 474 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 475} 476 477/* 478 * Caller holds hw_lock. 479 */ 480static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) 481{ 482 int i; 483 u32 data = 0; 484 u32 dataBit; 485 struct ql3xxx_port_registers __iomem *port_regs = 486 qdev->mem_map_registers; 487 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 488 489 /* Read the data bits */ 490 /* The first bit is a dummy. Clock right over it. */ 491 for (i = 0; i < dataBits; i++) { 492 ql_write_nvram_reg(qdev, spir, 493 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 494 AUBURN_EEPROM_CLK_RISE); 495 ql_write_nvram_reg(qdev, spir, 496 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 497 AUBURN_EEPROM_CLK_FALL); 498 dataBit = (ql_read_common_reg(qdev, spir) & 499 AUBURN_EEPROM_DI_1) ? 1 : 0; 500 data = (data << 1) | dataBit; 501 } 502 *value = (u16)data; 503} 504 505/* 506 * Caller holds hw_lock. 507 */ 508static void eeprom_readword(struct ql3_adapter *qdev, 509 u32 eepromAddr, unsigned short *value) 510{ 511 fm93c56a_select(qdev); 512 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); 513 fm93c56a_datain(qdev, value); 514 fm93c56a_deselect(qdev); 515} 516 517static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) 518{ 519 __le16 *p = (__le16 *)ndev->dev_addr; 520 p[0] = cpu_to_le16(addr[0]); 521 p[1] = cpu_to_le16(addr[1]); 522 p[2] = cpu_to_le16(addr[2]); 523} 524 525static int ql_get_nvram_params(struct ql3_adapter *qdev) 526{ 527 u16 *pEEPROMData; 528 u16 checksum = 0; 529 u32 index; 530 unsigned long hw_flags; 531 532 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 533 534 pEEPROMData = (u16 *)&qdev->nvram_data; 535 qdev->eeprom_cmd_data = 0; 536 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, 537 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 538 2) << 10)) { 539 pr_err("%s: Failed ql_sem_spinlock()\n", __func__); 540 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 541 return -1; 542 } 543 544 for (index = 0; index < EEPROM_SIZE; index++) { 545 eeprom_readword(qdev, index, pEEPROMData); 546 checksum += *pEEPROMData; 547 pEEPROMData++; 548 } 549 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); 550 551 if (checksum != 0) { 552 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", 553 checksum); 554 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 555 return -1; 556 } 557 558 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 559 return checksum; 560} 561 562static const u32 PHYAddr[2] = { 563 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS 564}; 565 566static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) 567{ 568 struct ql3xxx_port_registers __iomem *port_regs = 569 qdev->mem_map_registers; 570 u32 temp; 571 int count = 1000; 572 573 while (count) { 574 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); 575 if (!(temp & MAC_MII_STATUS_BSY)) 576 return 0; 577 udelay(10); 578 count--; 579 } 580 return -1; 581} 582 583static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) 584{ 585 struct ql3xxx_port_registers __iomem *port_regs = 586 qdev->mem_map_registers; 587 u32 scanControl; 588 589 if (qdev->numPorts > 1) { 590 /* Auto scan will cycle through multiple ports */ 591 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; 592 } else { 593 scanControl = MAC_MII_CONTROL_SC; 594 } 595 596 /* 597 * Scan register 1 of PHY/PETBI, 598 * Set up to scan both devices 599 * The autoscan starts from the first register, completes 600 * the last one before rolling over to the first 601 */ 602 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 603 PHYAddr[0] | MII_SCAN_REGISTER); 604 605 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 606 (scanControl) | 607 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); 608} 609 610static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) 611{ 612 u8 ret; 613 struct ql3xxx_port_registers __iomem *port_regs = 614 qdev->mem_map_registers; 615 616 /* See if scan mode is enabled before we turn it off */ 617 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & 618 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { 619 /* Scan is enabled */ 620 ret = 1; 621 } else { 622 /* Scan is disabled */ 623 ret = 0; 624 } 625 626 /* 627 * When disabling scan mode you must first change the MII register 628 * address 629 */ 630 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 631 PHYAddr[0] | MII_SCAN_REGISTER); 632 633 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 634 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | 635 MAC_MII_CONTROL_RC) << 16)); 636 637 return ret; 638} 639 640static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, 641 u16 regAddr, u16 value, u32 phyAddr) 642{ 643 struct ql3xxx_port_registers __iomem *port_regs = 644 qdev->mem_map_registers; 645 u8 scanWasEnabled; 646 647 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 648 649 if (ql_wait_for_mii_ready(qdev)) { 650 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 651 return -1; 652 } 653 654 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 655 phyAddr | regAddr); 656 657 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 658 659 /* Wait for write to complete 9/10/04 SJP */ 660 if (ql_wait_for_mii_ready(qdev)) { 661 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 662 return -1; 663 } 664 665 if (scanWasEnabled) 666 ql_mii_enable_scan_mode(qdev); 667 668 return 0; 669} 670 671static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, 672 u16 *value, u32 phyAddr) 673{ 674 struct ql3xxx_port_registers __iomem *port_regs = 675 qdev->mem_map_registers; 676 u8 scanWasEnabled; 677 u32 temp; 678 679 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 680 681 if (ql_wait_for_mii_ready(qdev)) { 682 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 683 return -1; 684 } 685 686 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 687 phyAddr | regAddr); 688 689 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 690 (MAC_MII_CONTROL_RC << 16)); 691 692 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 693 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 694 695 /* Wait for the read to complete */ 696 if (ql_wait_for_mii_ready(qdev)) { 697 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 698 return -1; 699 } 700 701 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 702 *value = (u16) temp; 703 704 if (scanWasEnabled) 705 ql_mii_enable_scan_mode(qdev); 706 707 return 0; 708} 709 710static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) 711{ 712 struct ql3xxx_port_registers __iomem *port_regs = 713 qdev->mem_map_registers; 714 715 ql_mii_disable_scan_mode(qdev); 716 717 if (ql_wait_for_mii_ready(qdev)) { 718 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 719 return -1; 720 } 721 722 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 723 qdev->PHYAddr | regAddr); 724 725 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 726 727 /* Wait for write to complete. */ 728 if (ql_wait_for_mii_ready(qdev)) { 729 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 730 return -1; 731 } 732 733 ql_mii_enable_scan_mode(qdev); 734 735 return 0; 736} 737 738static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) 739{ 740 u32 temp; 741 struct ql3xxx_port_registers __iomem *port_regs = 742 qdev->mem_map_registers; 743 744 ql_mii_disable_scan_mode(qdev); 745 746 if (ql_wait_for_mii_ready(qdev)) { 747 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 748 return -1; 749 } 750 751 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 752 qdev->PHYAddr | regAddr); 753 754 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 755 (MAC_MII_CONTROL_RC << 16)); 756 757 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 758 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 759 760 /* Wait for the read to complete */ 761 if (ql_wait_for_mii_ready(qdev)) { 762 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 763 return -1; 764 } 765 766 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 767 *value = (u16) temp; 768 769 ql_mii_enable_scan_mode(qdev); 770 771 return 0; 772} 773 774static void ql_petbi_reset(struct ql3_adapter *qdev) 775{ 776 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); 777} 778 779static void ql_petbi_start_neg(struct ql3_adapter *qdev) 780{ 781 u16 reg; 782 783 /* Enable Auto-negotiation sense */ 784 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); 785 reg |= PETBI_TBI_AUTO_SENSE; 786 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); 787 788 ql_mii_write_reg(qdev, PETBI_NEG_ADVER, 789 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); 790 791 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, 792 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 793 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); 794 795} 796 797static void ql_petbi_reset_ex(struct ql3_adapter *qdev) 798{ 799 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, 800 PHYAddr[qdev->mac_index]); 801} 802 803static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) 804{ 805 u16 reg; 806 807 /* Enable Auto-negotiation sense */ 808 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, 809 PHYAddr[qdev->mac_index]); 810 reg |= PETBI_TBI_AUTO_SENSE; 811 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, 812 PHYAddr[qdev->mac_index]); 813 814 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, 815 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, 816 PHYAddr[qdev->mac_index]); 817 818 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, 819 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 820 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, 821 PHYAddr[qdev->mac_index]); 822} 823 824static void ql_petbi_init(struct ql3_adapter *qdev) 825{ 826 ql_petbi_reset(qdev); 827 ql_petbi_start_neg(qdev); 828} 829 830static void ql_petbi_init_ex(struct ql3_adapter *qdev) 831{ 832 ql_petbi_reset_ex(qdev); 833 ql_petbi_start_neg_ex(qdev); 834} 835 836static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) 837{ 838 u16 reg; 839 840 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) 841 return 0; 842 843 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; 844} 845 846static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) 847{ 848 netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); 849 /* power down device bit 11 = 1 */ 850 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); 851 /* enable diagnostic mode bit 2 = 1 */ 852 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); 853 /* 1000MB amplitude adjust (see Agere errata) */ 854 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); 855 /* 1000MB amplitude adjust (see Agere errata) */ 856 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); 857 /* 100MB amplitude adjust (see Agere errata) */ 858 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); 859 /* 100MB amplitude adjust (see Agere errata) */ 860 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); 861 /* 10MB amplitude adjust (see Agere errata) */ 862 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); 863 /* 10MB amplitude adjust (see Agere errata) */ 864 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); 865 /* point to hidden reg 0x2806 */ 866 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); 867 /* Write new PHYAD w/bit 5 set */ 868 ql_mii_write_reg_ex(qdev, 0x11, 869 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); 870 /* 871 * Disable diagnostic mode bit 2 = 0 872 * Power up device bit 11 = 0 873 * Link up (on) and activity (blink) 874 */ 875 ql_mii_write_reg(qdev, 0x12, 0x840a); 876 ql_mii_write_reg(qdev, 0x00, 0x1140); 877 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); 878} 879 880static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, 881 u16 phyIdReg0, u16 phyIdReg1) 882{ 883 enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; 884 u32 oui; 885 u16 model; 886 int i; 887 888 if (phyIdReg0 == 0xffff) 889 return result; 890 891 if (phyIdReg1 == 0xffff) 892 return result; 893 894 /* oui is split between two registers */ 895 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); 896 897 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; 898 899 /* Scan table for this PHY */ 900 for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { 901 if ((oui == PHY_DEVICES[i].phyIdOUI) && 902 (model == PHY_DEVICES[i].phyIdModel)) { 903 netdev_info(qdev->ndev, "Phy: %s\n", 904 PHY_DEVICES[i].name); 905 result = PHY_DEVICES[i].phyDevice; 906 break; 907 } 908 } 909 910 return result; 911} 912 913static int ql_phy_get_speed(struct ql3_adapter *qdev) 914{ 915 u16 reg; 916 917 switch (qdev->phyType) { 918 case PHY_AGERE_ET1011C: { 919 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) 920 return 0; 921 922 reg = (reg >> 8) & 3; 923 break; 924 } 925 default: 926 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 927 return 0; 928 929 reg = (((reg & 0x18) >> 3) & 3); 930 } 931 932 switch (reg) { 933 case 2: 934 return SPEED_1000; 935 case 1: 936 return SPEED_100; 937 case 0: 938 return SPEED_10; 939 default: 940 return -1; 941 } 942} 943 944static int ql_is_full_dup(struct ql3_adapter *qdev) 945{ 946 u16 reg; 947 948 switch (qdev->phyType) { 949 case PHY_AGERE_ET1011C: { 950 if (ql_mii_read_reg(qdev, 0x1A, ®)) 951 return 0; 952 953 return ((reg & 0x0080) && (reg & 0x1000)) != 0; 954 } 955 case PHY_VITESSE_VSC8211: 956 default: { 957 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 958 return 0; 959 return (reg & PHY_AUX_DUPLEX_STAT) != 0; 960 } 961 } 962} 963 964static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) 965{ 966 u16 reg; 967 968 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) 969 return 0; 970 971 return (reg & PHY_NEG_PAUSE) != 0; 972} 973 974static int PHY_Setup(struct ql3_adapter *qdev) 975{ 976 u16 reg1; 977 u16 reg2; 978 bool agereAddrChangeNeeded = false; 979 u32 miiAddr = 0; 980 int err; 981 982 /* Determine the PHY we are using by reading the ID's */ 983 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); 984 if (err != 0) { 985 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); 986 return err; 987 } 988 989 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); 990 if (err != 0) { 991 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); 992 return err; 993 } 994 995 /* Check if we have a Agere PHY */ 996 if ((reg1 == 0xffff) || (reg2 == 0xffff)) { 997 998 /* Determine which MII address we should be using 999 determined by the index of the card */ 1000 if (qdev->mac_index == 0) 1001 miiAddr = MII_AGERE_ADDR_1; 1002 else 1003 miiAddr = MII_AGERE_ADDR_2; 1004 1005 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); 1006 if (err != 0) { 1007 netdev_err(qdev->ndev, 1008 "Could not read from reg PHY_ID_0_REG after Agere detected\n"); 1009 return err; 1010 } 1011 1012 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); 1013 if (err != 0) { 1014 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); 1015 return err; 1016 } 1017 1018 /* We need to remember to initialize the Agere PHY */ 1019 agereAddrChangeNeeded = true; 1020 } 1021 1022 /* Determine the particular PHY we have on board to apply 1023 PHY specific initializations */ 1024 qdev->phyType = getPhyType(qdev, reg1, reg2); 1025 1026 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { 1027 /* need this here so address gets changed */ 1028 phyAgereSpecificInit(qdev, miiAddr); 1029 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1030 netdev_err(qdev->ndev, "PHY is unknown\n"); 1031 return -EIO; 1032 } 1033 1034 return 0; 1035} 1036 1037/* 1038 * Caller holds hw_lock. 1039 */ 1040static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) 1041{ 1042 struct ql3xxx_port_registers __iomem *port_regs = 1043 qdev->mem_map_registers; 1044 u32 value; 1045 1046 if (enable) 1047 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); 1048 else 1049 value = (MAC_CONFIG_REG_PE << 16); 1050 1051 if (qdev->mac_index) 1052 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1053 else 1054 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1055} 1056 1057/* 1058 * Caller holds hw_lock. 1059 */ 1060static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) 1061{ 1062 struct ql3xxx_port_registers __iomem *port_regs = 1063 qdev->mem_map_registers; 1064 u32 value; 1065 1066 if (enable) 1067 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); 1068 else 1069 value = (MAC_CONFIG_REG_SR << 16); 1070 1071 if (qdev->mac_index) 1072 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1073 else 1074 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1075} 1076 1077/* 1078 * Caller holds hw_lock. 1079 */ 1080static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) 1081{ 1082 struct ql3xxx_port_registers __iomem *port_regs = 1083 qdev->mem_map_registers; 1084 u32 value; 1085 1086 if (enable) 1087 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); 1088 else 1089 value = (MAC_CONFIG_REG_GM << 16); 1090 1091 if (qdev->mac_index) 1092 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1093 else 1094 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1095} 1096 1097/* 1098 * Caller holds hw_lock. 1099 */ 1100static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) 1101{ 1102 struct ql3xxx_port_registers __iomem *port_regs = 1103 qdev->mem_map_registers; 1104 u32 value; 1105 1106 if (enable) 1107 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); 1108 else 1109 value = (MAC_CONFIG_REG_FD << 16); 1110 1111 if (qdev->mac_index) 1112 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1113 else 1114 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1115} 1116 1117/* 1118 * Caller holds hw_lock. 1119 */ 1120static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) 1121{ 1122 struct ql3xxx_port_registers __iomem *port_regs = 1123 qdev->mem_map_registers; 1124 u32 value; 1125 1126 if (enable) 1127 value = 1128 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | 1129 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); 1130 else 1131 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); 1132 1133 if (qdev->mac_index) 1134 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1135 else 1136 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1137} 1138 1139/* 1140 * Caller holds hw_lock. 1141 */ 1142static int ql_is_fiber(struct ql3_adapter *qdev) 1143{ 1144 struct ql3xxx_port_registers __iomem *port_regs = 1145 qdev->mem_map_registers; 1146 u32 bitToCheck = 0; 1147 u32 temp; 1148 1149 switch (qdev->mac_index) { 1150 case 0: 1151 bitToCheck = PORT_STATUS_SM0; 1152 break; 1153 case 1: 1154 bitToCheck = PORT_STATUS_SM1; 1155 break; 1156 } 1157 1158 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1159 return (temp & bitToCheck) != 0; 1160} 1161 1162static int ql_is_auto_cfg(struct ql3_adapter *qdev) 1163{ 1164 u16 reg; 1165 ql_mii_read_reg(qdev, 0x00, ®); 1166 return (reg & 0x1000) != 0; 1167} 1168 1169/* 1170 * Caller holds hw_lock. 1171 */ 1172static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) 1173{ 1174 struct ql3xxx_port_registers __iomem *port_regs = 1175 qdev->mem_map_registers; 1176 u32 bitToCheck = 0; 1177 u32 temp; 1178 1179 switch (qdev->mac_index) { 1180 case 0: 1181 bitToCheck = PORT_STATUS_AC0; 1182 break; 1183 case 1: 1184 bitToCheck = PORT_STATUS_AC1; 1185 break; 1186 } 1187 1188 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1189 if (temp & bitToCheck) { 1190 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); 1191 return 1; 1192 } 1193 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); 1194 return 0; 1195} 1196 1197/* 1198 * ql_is_neg_pause() returns 1 if pause was negotiated to be on 1199 */ 1200static int ql_is_neg_pause(struct ql3_adapter *qdev) 1201{ 1202 if (ql_is_fiber(qdev)) 1203 return ql_is_petbi_neg_pause(qdev); 1204 else 1205 return ql_is_phy_neg_pause(qdev); 1206} 1207 1208static int ql_auto_neg_error(struct ql3_adapter *qdev) 1209{ 1210 struct ql3xxx_port_registers __iomem *port_regs = 1211 qdev->mem_map_registers; 1212 u32 bitToCheck = 0; 1213 u32 temp; 1214 1215 switch (qdev->mac_index) { 1216 case 0: 1217 bitToCheck = PORT_STATUS_AE0; 1218 break; 1219 case 1: 1220 bitToCheck = PORT_STATUS_AE1; 1221 break; 1222 } 1223 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1224 return (temp & bitToCheck) != 0; 1225} 1226 1227static u32 ql_get_link_speed(struct ql3_adapter *qdev) 1228{ 1229 if (ql_is_fiber(qdev)) 1230 return SPEED_1000; 1231 else 1232 return ql_phy_get_speed(qdev); 1233} 1234 1235static int ql_is_link_full_dup(struct ql3_adapter *qdev) 1236{ 1237 if (ql_is_fiber(qdev)) 1238 return 1; 1239 else 1240 return ql_is_full_dup(qdev); 1241} 1242 1243/* 1244 * Caller holds hw_lock. 1245 */ 1246static int ql_link_down_detect(struct ql3_adapter *qdev) 1247{ 1248 struct ql3xxx_port_registers __iomem *port_regs = 1249 qdev->mem_map_registers; 1250 u32 bitToCheck = 0; 1251 u32 temp; 1252 1253 switch (qdev->mac_index) { 1254 case 0: 1255 bitToCheck = ISP_CONTROL_LINK_DN_0; 1256 break; 1257 case 1: 1258 bitToCheck = ISP_CONTROL_LINK_DN_1; 1259 break; 1260 } 1261 1262 temp = 1263 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 1264 return (temp & bitToCheck) != 0; 1265} 1266 1267/* 1268 * Caller holds hw_lock. 1269 */ 1270static int ql_link_down_detect_clear(struct ql3_adapter *qdev) 1271{ 1272 struct ql3xxx_port_registers __iomem *port_regs = 1273 qdev->mem_map_registers; 1274 1275 switch (qdev->mac_index) { 1276 case 0: 1277 ql_write_common_reg(qdev, 1278 &port_regs->CommonRegs.ispControlStatus, 1279 (ISP_CONTROL_LINK_DN_0) | 1280 (ISP_CONTROL_LINK_DN_0 << 16)); 1281 break; 1282 1283 case 1: 1284 ql_write_common_reg(qdev, 1285 &port_regs->CommonRegs.ispControlStatus, 1286 (ISP_CONTROL_LINK_DN_1) | 1287 (ISP_CONTROL_LINK_DN_1 << 16)); 1288 break; 1289 1290 default: 1291 return 1; 1292 } 1293 1294 return 0; 1295} 1296 1297/* 1298 * Caller holds hw_lock. 1299 */ 1300static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) 1301{ 1302 struct ql3xxx_port_registers __iomem *port_regs = 1303 qdev->mem_map_registers; 1304 u32 bitToCheck = 0; 1305 u32 temp; 1306 1307 switch (qdev->mac_index) { 1308 case 0: 1309 bitToCheck = PORT_STATUS_F1_ENABLED; 1310 break; 1311 case 1: 1312 bitToCheck = PORT_STATUS_F3_ENABLED; 1313 break; 1314 default: 1315 break; 1316 } 1317 1318 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1319 if (temp & bitToCheck) { 1320 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1321 "not link master\n"); 1322 return 0; 1323 } 1324 1325 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); 1326 return 1; 1327} 1328 1329static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1330{ 1331 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, 1332 PHYAddr[qdev->mac_index]); 1333} 1334 1335static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) 1336{ 1337 u16 reg; 1338 u16 portConfiguration; 1339 1340 if (qdev->phyType == PHY_AGERE_ET1011C) 1341 ql_mii_write_reg(qdev, 0x13, 0x0000); 1342 /* turn off external loopback */ 1343 1344 if (qdev->mac_index == 0) 1345 portConfiguration = 1346 qdev->nvram_data.macCfg_port0.portConfiguration; 1347 else 1348 portConfiguration = 1349 qdev->nvram_data.macCfg_port1.portConfiguration; 1350 1351 /* Some HBA's in the field are set to 0 and they need to 1352 be reinterpreted with a default value */ 1353 if (portConfiguration == 0) 1354 portConfiguration = PORT_CONFIG_DEFAULT; 1355 1356 /* Set the 1000 advertisements */ 1357 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, 1358 PHYAddr[qdev->mac_index]); 1359 reg &= ~PHY_GIG_ALL_PARAMS; 1360 1361 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { 1362 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) 1363 reg |= PHY_GIG_ADV_1000F; 1364 else 1365 reg |= PHY_GIG_ADV_1000H; 1366 } 1367 1368 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, 1369 PHYAddr[qdev->mac_index]); 1370 1371 /* Set the 10/100 & pause negotiation advertisements */ 1372 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, 1373 PHYAddr[qdev->mac_index]); 1374 reg &= ~PHY_NEG_ALL_PARAMS; 1375 1376 if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) 1377 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; 1378 1379 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { 1380 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1381 reg |= PHY_NEG_ADV_100F; 1382 1383 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1384 reg |= PHY_NEG_ADV_10F; 1385 } 1386 1387 if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { 1388 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1389 reg |= PHY_NEG_ADV_100H; 1390 1391 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1392 reg |= PHY_NEG_ADV_10H; 1393 } 1394 1395 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) 1396 reg |= 1; 1397 1398 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, 1399 PHYAddr[qdev->mac_index]); 1400 1401 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); 1402 1403 ql_mii_write_reg_ex(qdev, CONTROL_REG, 1404 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, 1405 PHYAddr[qdev->mac_index]); 1406} 1407 1408static void ql_phy_init_ex(struct ql3_adapter *qdev) 1409{ 1410 ql_phy_reset_ex(qdev); 1411 PHY_Setup(qdev); 1412 ql_phy_start_neg_ex(qdev); 1413} 1414 1415/* 1416 * Caller holds hw_lock. 1417 */ 1418static u32 ql_get_link_state(struct ql3_adapter *qdev) 1419{ 1420 struct ql3xxx_port_registers __iomem *port_regs = 1421 qdev->mem_map_registers; 1422 u32 bitToCheck = 0; 1423 u32 temp, linkState; 1424 1425 switch (qdev->mac_index) { 1426 case 0: 1427 bitToCheck = PORT_STATUS_UP0; 1428 break; 1429 case 1: 1430 bitToCheck = PORT_STATUS_UP1; 1431 break; 1432 } 1433 1434 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1435 if (temp & bitToCheck) 1436 linkState = LS_UP; 1437 else 1438 linkState = LS_DOWN; 1439 1440 return linkState; 1441} 1442 1443static int ql_port_start(struct ql3_adapter *qdev) 1444{ 1445 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1446 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1447 2) << 7)) { 1448 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); 1449 return -1; 1450 } 1451 1452 if (ql_is_fiber(qdev)) { 1453 ql_petbi_init(qdev); 1454 } else { 1455 /* Copper port */ 1456 ql_phy_init_ex(qdev); 1457 } 1458 1459 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1460 return 0; 1461} 1462 1463static int ql_finish_auto_neg(struct ql3_adapter *qdev) 1464{ 1465 1466 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1467 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1468 2) << 7)) 1469 return -1; 1470 1471 if (!ql_auto_neg_error(qdev)) { 1472 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1473 /* configure the MAC */ 1474 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1475 "Configuring link\n"); 1476 ql_mac_cfg_soft_reset(qdev, 1); 1477 ql_mac_cfg_gig(qdev, 1478 (ql_get_link_speed 1479 (qdev) == 1480 SPEED_1000)); 1481 ql_mac_cfg_full_dup(qdev, 1482 ql_is_link_full_dup 1483 (qdev)); 1484 ql_mac_cfg_pause(qdev, 1485 ql_is_neg_pause 1486 (qdev)); 1487 ql_mac_cfg_soft_reset(qdev, 0); 1488 1489 /* enable the MAC */ 1490 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1491 "Enabling mac\n"); 1492 ql_mac_enable(qdev, 1); 1493 } 1494 1495 qdev->port_link_state = LS_UP; 1496 netif_start_queue(qdev->ndev); 1497 netif_carrier_on(qdev->ndev); 1498 netif_info(qdev, link, qdev->ndev, 1499 "Link is up at %d Mbps, %s duplex\n", 1500 ql_get_link_speed(qdev), 1501 ql_is_link_full_dup(qdev) ? "full" : "half"); 1502 1503 } else { /* Remote error detected */ 1504 1505 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1506 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1507 "Remote error detected. Calling ql_port_start()\n"); 1508 /* 1509 * ql_port_start() is shared code and needs 1510 * to lock the PHY on it's own. 1511 */ 1512 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1513 if (ql_port_start(qdev)) /* Restart port */ 1514 return -1; 1515 return 0; 1516 } 1517 } 1518 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1519 return 0; 1520} 1521 1522static void ql_link_state_machine_work(struct work_struct *work) 1523{ 1524 struct ql3_adapter *qdev = 1525 container_of(work, struct ql3_adapter, link_state_work.work); 1526 1527 u32 curr_link_state; 1528 unsigned long hw_flags; 1529 1530 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1531 1532 curr_link_state = ql_get_link_state(qdev); 1533 1534 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { 1535 netif_info(qdev, link, qdev->ndev, 1536 "Reset in progress, skip processing link state\n"); 1537 1538 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1539 1540 /* Restart timer on 2 second interval. */ 1541 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1542 1543 return; 1544 } 1545 1546 switch (qdev->port_link_state) { 1547 default: 1548 if (test_bit(QL_LINK_MASTER, &qdev->flags)) 1549 ql_port_start(qdev); 1550 qdev->port_link_state = LS_DOWN; 1551 /* Fall Through */ 1552 1553 case LS_DOWN: 1554 if (curr_link_state == LS_UP) { 1555 netif_info(qdev, link, qdev->ndev, "Link is up\n"); 1556 if (ql_is_auto_neg_complete(qdev)) 1557 ql_finish_auto_neg(qdev); 1558 1559 if (qdev->port_link_state == LS_UP) 1560 ql_link_down_detect_clear(qdev); 1561 1562 qdev->port_link_state = LS_UP; 1563 } 1564 break; 1565 1566 case LS_UP: 1567 /* 1568 * See if the link is currently down or went down and came 1569 * back up 1570 */ 1571 if (curr_link_state == LS_DOWN) { 1572 netif_info(qdev, link, qdev->ndev, "Link is down\n"); 1573 qdev->port_link_state = LS_DOWN; 1574 } 1575 if (ql_link_down_detect(qdev)) 1576 qdev->port_link_state = LS_DOWN; 1577 break; 1578 } 1579 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1580 1581 /* Restart timer on 2 second interval. */ 1582 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1583} 1584 1585/* 1586 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1587 */ 1588static void ql_get_phy_owner(struct ql3_adapter *qdev) 1589{ 1590 if (ql_this_adapter_controls_port(qdev)) 1591 set_bit(QL_LINK_MASTER, &qdev->flags); 1592 else 1593 clear_bit(QL_LINK_MASTER, &qdev->flags); 1594} 1595 1596/* 1597 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1598 */ 1599static void ql_init_scan_mode(struct ql3_adapter *qdev) 1600{ 1601 ql_mii_enable_scan_mode(qdev); 1602 1603 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1604 if (ql_this_adapter_controls_port(qdev)) 1605 ql_petbi_init_ex(qdev); 1606 } else { 1607 if (ql_this_adapter_controls_port(qdev)) 1608 ql_phy_init_ex(qdev); 1609 } 1610} 1611 1612/* 1613 * MII_Setup needs to be called before taking the PHY out of reset 1614 * so that the management interface clock speed can be set properly. 1615 * It would be better if we had a way to disable MDC until after the 1616 * PHY is out of reset, but we don't have that capability. 1617 */ 1618static int ql_mii_setup(struct ql3_adapter *qdev) 1619{ 1620 u32 reg; 1621 struct ql3xxx_port_registers __iomem *port_regs = 1622 qdev->mem_map_registers; 1623 1624 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1625 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1626 2) << 7)) 1627 return -1; 1628 1629 if (qdev->device_id == QL3032_DEVICE_ID) 1630 ql_write_page0_reg(qdev, 1631 &port_regs->macMIIMgmtControlReg, 0x0f00000); 1632 1633 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1634 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1635 1636 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 1637 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); 1638 1639 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1640 return 0; 1641} 1642 1643#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ 1644 SUPPORTED_FIBRE | \ 1645 SUPPORTED_Autoneg) 1646#define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ 1647 SUPPORTED_10baseT_Full | \ 1648 SUPPORTED_100baseT_Half | \ 1649 SUPPORTED_100baseT_Full | \ 1650 SUPPORTED_1000baseT_Half | \ 1651 SUPPORTED_1000baseT_Full | \ 1652 SUPPORTED_Autoneg | \ 1653 SUPPORTED_TP); \ 1654 1655static u32 ql_supported_modes(struct ql3_adapter *qdev) 1656{ 1657 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) 1658 return SUPPORTED_OPTICAL_MODES; 1659 1660 return SUPPORTED_TP_MODES; 1661} 1662 1663static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) 1664{ 1665 int status; 1666 unsigned long hw_flags; 1667 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1668 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1669 (QL_RESOURCE_BITS_BASE_CODE | 1670 (qdev->mac_index) * 2) << 7)) { 1671 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1672 return 0; 1673 } 1674 status = ql_is_auto_cfg(qdev); 1675 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1676 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1677 return status; 1678} 1679 1680static u32 ql_get_speed(struct ql3_adapter *qdev) 1681{ 1682 u32 status; 1683 unsigned long hw_flags; 1684 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1685 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1686 (QL_RESOURCE_BITS_BASE_CODE | 1687 (qdev->mac_index) * 2) << 7)) { 1688 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1689 return 0; 1690 } 1691 status = ql_get_link_speed(qdev); 1692 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1693 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1694 return status; 1695} 1696 1697static int ql_get_full_dup(struct ql3_adapter *qdev) 1698{ 1699 int status; 1700 unsigned long hw_flags; 1701 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1702 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1703 (QL_RESOURCE_BITS_BASE_CODE | 1704 (qdev->mac_index) * 2) << 7)) { 1705 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1706 return 0; 1707 } 1708 status = ql_is_link_full_dup(qdev); 1709 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1710 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1711 return status; 1712} 1713 1714static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 1715{ 1716 struct ql3_adapter *qdev = netdev_priv(ndev); 1717 1718 ecmd->transceiver = XCVR_INTERNAL; 1719 ecmd->supported = ql_supported_modes(qdev); 1720 1721 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1722 ecmd->port = PORT_FIBRE; 1723 } else { 1724 ecmd->port = PORT_TP; 1725 ecmd->phy_address = qdev->PHYAddr; 1726 } 1727 ecmd->advertising = ql_supported_modes(qdev); 1728 ecmd->autoneg = ql_get_auto_cfg_status(qdev); 1729 ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); 1730 ecmd->duplex = ql_get_full_dup(qdev); 1731 return 0; 1732} 1733 1734static void ql_get_drvinfo(struct net_device *ndev, 1735 struct ethtool_drvinfo *drvinfo) 1736{ 1737 struct ql3_adapter *qdev = netdev_priv(ndev); 1738 strncpy(drvinfo->driver, ql3xxx_driver_name, 32); 1739 strncpy(drvinfo->version, ql3xxx_driver_version, 32); 1740 strncpy(drvinfo->fw_version, "N/A", 32); 1741 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); 1742 drvinfo->regdump_len = 0; 1743 drvinfo->eedump_len = 0; 1744} 1745 1746static u32 ql_get_msglevel(struct net_device *ndev) 1747{ 1748 struct ql3_adapter *qdev = netdev_priv(ndev); 1749 return qdev->msg_enable; 1750} 1751 1752static void ql_set_msglevel(struct net_device *ndev, u32 value) 1753{ 1754 struct ql3_adapter *qdev = netdev_priv(ndev); 1755 qdev->msg_enable = value; 1756} 1757 1758static void ql_get_pauseparam(struct net_device *ndev, 1759 struct ethtool_pauseparam *pause) 1760{ 1761 struct ql3_adapter *qdev = netdev_priv(ndev); 1762 struct ql3xxx_port_registers __iomem *port_regs = 1763 qdev->mem_map_registers; 1764 1765 u32 reg; 1766 if (qdev->mac_index == 0) 1767 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); 1768 else 1769 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); 1770 1771 pause->autoneg = ql_get_auto_cfg_status(qdev); 1772 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; 1773 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; 1774} 1775 1776static const struct ethtool_ops ql3xxx_ethtool_ops = { 1777 .get_settings = ql_get_settings, 1778 .get_drvinfo = ql_get_drvinfo, 1779 .get_link = ethtool_op_get_link, 1780 .get_msglevel = ql_get_msglevel, 1781 .set_msglevel = ql_set_msglevel, 1782 .get_pauseparam = ql_get_pauseparam, 1783}; 1784 1785static int ql_populate_free_queue(struct ql3_adapter *qdev) 1786{ 1787 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 1788 dma_addr_t map; 1789 int err; 1790 1791 while (lrg_buf_cb) { 1792 if (!lrg_buf_cb->skb) { 1793 lrg_buf_cb->skb = 1794 netdev_alloc_skb(qdev->ndev, 1795 qdev->lrg_buffer_len); 1796 if (unlikely(!lrg_buf_cb->skb)) { 1797 netdev_printk(KERN_DEBUG, qdev->ndev, 1798 "Failed netdev_alloc_skb()\n"); 1799 break; 1800 } else { 1801 /* 1802 * We save some space to copy the ethhdr from 1803 * first buffer 1804 */ 1805 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 1806 map = pci_map_single(qdev->pdev, 1807 lrg_buf_cb->skb->data, 1808 qdev->lrg_buffer_len - 1809 QL_HEADER_SPACE, 1810 PCI_DMA_FROMDEVICE); 1811 1812 err = pci_dma_mapping_error(qdev->pdev, map); 1813 if (err) { 1814 netdev_err(qdev->ndev, 1815 "PCI mapping failed with error: %d\n", 1816 err); 1817 dev_kfree_skb(lrg_buf_cb->skb); 1818 lrg_buf_cb->skb = NULL; 1819 break; 1820 } 1821 1822 1823 lrg_buf_cb->buf_phy_addr_low = 1824 cpu_to_le32(LS_64BITS(map)); 1825 lrg_buf_cb->buf_phy_addr_high = 1826 cpu_to_le32(MS_64BITS(map)); 1827 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1828 dma_unmap_len_set(lrg_buf_cb, maplen, 1829 qdev->lrg_buffer_len - 1830 QL_HEADER_SPACE); 1831 --qdev->lrg_buf_skb_check; 1832 if (!qdev->lrg_buf_skb_check) 1833 return 1; 1834 } 1835 } 1836 lrg_buf_cb = lrg_buf_cb->next; 1837 } 1838 return 0; 1839} 1840 1841/* 1842 * Caller holds hw_lock. 1843 */ 1844static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) 1845{ 1846 struct ql3xxx_port_registers __iomem *port_regs = 1847 qdev->mem_map_registers; 1848 1849 if (qdev->small_buf_release_cnt >= 16) { 1850 while (qdev->small_buf_release_cnt >= 16) { 1851 qdev->small_buf_q_producer_index++; 1852 1853 if (qdev->small_buf_q_producer_index == 1854 NUM_SBUFQ_ENTRIES) 1855 qdev->small_buf_q_producer_index = 0; 1856 qdev->small_buf_release_cnt -= 8; 1857 } 1858 wmb(); 1859 writel(qdev->small_buf_q_producer_index, 1860 &port_regs->CommonRegs.rxSmallQProducerIndex); 1861 } 1862} 1863 1864/* 1865 * Caller holds hw_lock. 1866 */ 1867static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) 1868{ 1869 struct bufq_addr_element *lrg_buf_q_ele; 1870 int i; 1871 struct ql_rcv_buf_cb *lrg_buf_cb; 1872 struct ql3xxx_port_registers __iomem *port_regs = 1873 qdev->mem_map_registers; 1874 1875 if ((qdev->lrg_buf_free_count >= 8) && 1876 (qdev->lrg_buf_release_cnt >= 16)) { 1877 1878 if (qdev->lrg_buf_skb_check) 1879 if (!ql_populate_free_queue(qdev)) 1880 return; 1881 1882 lrg_buf_q_ele = qdev->lrg_buf_next_free; 1883 1884 while ((qdev->lrg_buf_release_cnt >= 16) && 1885 (qdev->lrg_buf_free_count >= 8)) { 1886 1887 for (i = 0; i < 8; i++) { 1888 lrg_buf_cb = 1889 ql_get_from_lrg_buf_free_list(qdev); 1890 lrg_buf_q_ele->addr_high = 1891 lrg_buf_cb->buf_phy_addr_high; 1892 lrg_buf_q_ele->addr_low = 1893 lrg_buf_cb->buf_phy_addr_low; 1894 lrg_buf_q_ele++; 1895 1896 qdev->lrg_buf_release_cnt--; 1897 } 1898 1899 qdev->lrg_buf_q_producer_index++; 1900 1901 if (qdev->lrg_buf_q_producer_index == 1902 qdev->num_lbufq_entries) 1903 qdev->lrg_buf_q_producer_index = 0; 1904 1905 if (qdev->lrg_buf_q_producer_index == 1906 (qdev->num_lbufq_entries - 1)) { 1907 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1908 } 1909 } 1910 wmb(); 1911 qdev->lrg_buf_next_free = lrg_buf_q_ele; 1912 writel(qdev->lrg_buf_q_producer_index, 1913 &port_regs->CommonRegs.rxLargeQProducerIndex); 1914 } 1915} 1916 1917static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, 1918 struct ob_mac_iocb_rsp *mac_rsp) 1919{ 1920 struct ql_tx_buf_cb *tx_cb; 1921 int i; 1922 int retval = 0; 1923 1924 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1925 netdev_warn(qdev->ndev, 1926 "Frame too short but it was padded and sent\n"); 1927 } 1928 1929 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1930 1931 /* Check the transmit response flags for any errors */ 1932 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1933 netdev_err(qdev->ndev, 1934 "Frame too short to be legal, frame not sent\n"); 1935 1936 qdev->ndev->stats.tx_errors++; 1937 retval = -EIO; 1938 goto frame_not_sent; 1939 } 1940 1941 if (tx_cb->seg_count == 0) { 1942 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", 1943 mac_rsp->transaction_id); 1944 1945 qdev->ndev->stats.tx_errors++; 1946 retval = -EIO; 1947 goto invalid_seg_count; 1948 } 1949 1950 pci_unmap_single(qdev->pdev, 1951 dma_unmap_addr(&tx_cb->map[0], mapaddr), 1952 dma_unmap_len(&tx_cb->map[0], maplen), 1953 PCI_DMA_TODEVICE); 1954 tx_cb->seg_count--; 1955 if (tx_cb->seg_count) { 1956 for (i = 1; i < tx_cb->seg_count; i++) { 1957 pci_unmap_page(qdev->pdev, 1958 dma_unmap_addr(&tx_cb->map[i], 1959 mapaddr), 1960 dma_unmap_len(&tx_cb->map[i], maplen), 1961 PCI_DMA_TODEVICE); 1962 } 1963 } 1964 qdev->ndev->stats.tx_packets++; 1965 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; 1966 1967frame_not_sent: 1968 dev_kfree_skb_irq(tx_cb->skb); 1969 tx_cb->skb = NULL; 1970 1971invalid_seg_count: 1972 atomic_inc(&qdev->tx_count); 1973} 1974 1975static void ql_get_sbuf(struct ql3_adapter *qdev) 1976{ 1977 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) 1978 qdev->small_buf_index = 0; 1979 qdev->small_buf_release_cnt++; 1980} 1981 1982static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) 1983{ 1984 struct ql_rcv_buf_cb *lrg_buf_cb = NULL; 1985 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; 1986 qdev->lrg_buf_release_cnt++; 1987 if (++qdev->lrg_buf_index == qdev->num_large_buffers) 1988 qdev->lrg_buf_index = 0; 1989 return lrg_buf_cb; 1990} 1991 1992/* 1993 * The difference between 3022 and 3032 for inbound completions: 1994 * 3022 uses two buffers per completion. The first buffer contains 1995 * (some) header info, the second the remainder of the headers plus 1996 * the data. For this chip we reserve some space at the top of the 1997 * receive buffer so that the header info in buffer one can be 1998 * prepended to the buffer two. Buffer two is the sent up while 1999 * buffer one is returned to the hardware to be reused. 2000 * 3032 receives all of it's data and headers in one buffer for a 2001 * simpler process. 3032 also supports checksum verification as 2002 * can be seen in ql_process_macip_rx_intr(). 2003 */ 2004stat…
Large files files are truncated, but you can click here to view the full file