PageRenderTime 63ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/staging/slicoss/slicoss.c

https://bitbucket.org/cyanogenmod/android_kernel_asus_tf300t
C | 4001 lines | 3281 code | 456 blank | 264 comment | 570 complexity | 9034ab53207207b47fdcebc3ae7758e2 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. /**************************************************************************
  2. *
  3. * Copyright 2000-2006 Alacritech, Inc. All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above
  12. * copyright notice, this list of conditions and the following
  13. * disclaimer in the documentation and/or other materials provided
  14. * with the distribution.
  15. *
  16. * Alternatively, this software may be distributed under the terms of the
  17. * GNU General Public License ("GPL") version 2 as published by the Free
  18. * Software Foundation.
  19. *
  20. * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
  21. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  22. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  23. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
  24. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  25. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  26. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  27. * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  28. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  29. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  30. * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  31. * SUCH DAMAGE.
  32. *
  33. * The views and conclusions contained in the software and documentation
  34. * are those of the authors and should not be interpreted as representing
  35. * official policies, either expressed or implied, of Alacritech, Inc.
  36. *
  37. **************************************************************************/
  38. /*
  39. * FILENAME: slicoss.c
  40. *
  41. * The SLICOSS driver for Alacritech's IS-NIC products.
  42. *
  43. * This driver is supposed to support:
  44. *
  45. * Mojave cards (single port PCI Gigabit) both copper and fiber
  46. * Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
  47. * Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
  48. *
  49. * The driver was acutally tested on Oasis and Kalahari cards.
  50. *
  51. *
  52. * NOTE: This is the standard, non-accelerated version of Alacritech's
  53. * IS-NIC driver.
  54. */
  55. #define KLUDGE_FOR_4GB_BOUNDARY 1
  56. #define DEBUG_MICROCODE 1
  57. #define DBG 1
  58. #define SLIC_INTERRUPT_PROCESS_LIMIT 1
  59. #define SLIC_OFFLOAD_IP_CHECKSUM 1
  60. #define STATS_TIMER_INTERVAL 2
  61. #define PING_TIMER_INTERVAL 1
  62. #include <linux/kernel.h>
  63. #include <linux/string.h>
  64. #include <linux/errno.h>
  65. #include <linux/ioport.h>
  66. #include <linux/slab.h>
  67. #include <linux/interrupt.h>
  68. #include <linux/timer.h>
  69. #include <linux/pci.h>
  70. #include <linux/spinlock.h>
  71. #include <linux/init.h>
  72. #include <linux/bitops.h>
  73. #include <linux/io.h>
  74. #include <linux/netdevice.h>
  75. #include <linux/etherdevice.h>
  76. #include <linux/skbuff.h>
  77. #include <linux/delay.h>
  78. #include <linux/debugfs.h>
  79. #include <linux/seq_file.h>
  80. #include <linux/kthread.h>
  81. #include <linux/module.h>
  82. #include <linux/moduleparam.h>
  83. #include <linux/firmware.h>
  84. #include <linux/types.h>
  85. #include <linux/dma-mapping.h>
  86. #include <linux/mii.h>
  87. #include <linux/if_vlan.h>
  88. #include <asm/unaligned.h>
  89. #include <linux/ethtool.h>
  90. #include <linux/uaccess.h>
  91. #include "slichw.h"
  92. #include "slic.h"
  93. static uint slic_first_init = 1;
  94. static char *slic_banner = "Alacritech SLIC Technology(tm) Server "\
  95. "and Storage Accelerator (Non-Accelerated)";
  96. static char *slic_proc_version = "2.0.351 2006/07/14 12:26:00";
  97. static char *slic_product_name = "SLIC Technology(tm) Server "\
  98. "and Storage Accelerator (Non-Accelerated)";
  99. static char *slic_vendor = "Alacritech, Inc.";
  100. static int slic_debug = 1;
  101. static int debug = -1;
  102. static struct net_device *head_netdevice;
  103. static struct base_driver slic_global = { {}, 0, 0, 0, 1, NULL, NULL };
  104. static int intagg_delay = 100;
  105. static u32 dynamic_intagg;
  106. static unsigned int rcv_count;
  107. static struct dentry *slic_debugfs;
  108. #define DRV_NAME "slicoss"
  109. #define DRV_VERSION "2.0.1"
  110. #define DRV_AUTHOR "Alacritech, Inc. Engineering"
  111. #define DRV_DESCRIPTION "Alacritech SLIC Techonology(tm) "\
  112. "Non-Accelerated Driver"
  113. #define DRV_COPYRIGHT "Copyright 2000-2006 Alacritech, Inc. "\
  114. "All rights reserved."
  115. #define PFX DRV_NAME " "
  116. MODULE_AUTHOR(DRV_AUTHOR);
  117. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  118. MODULE_LICENSE("Dual BSD/GPL");
  119. module_param(dynamic_intagg, int, 0);
  120. MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
  121. module_param(intagg_delay, int, 0);
  122. MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
  123. static DEFINE_PCI_DEVICE_TABLE(slic_pci_tbl) = {
  124. { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) },
  125. { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) },
  126. { 0 }
  127. };
  128. MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
  129. #ifdef ASSERT
  130. #undef ASSERT
  131. #endif
  132. static void slic_assert_fail(void)
  133. {
  134. u32 cpuid;
  135. u32 curr_pid;
  136. cpuid = smp_processor_id();
  137. curr_pid = current->pid;
  138. printk(KERN_ERR "%s CPU # %d ---- PID # %d\n",
  139. __func__, cpuid, curr_pid);
  140. }
  141. #ifndef ASSERT
  142. #define ASSERT(a) do { \
  143. if (!(a)) { \
  144. printk(KERN_ERR "slicoss ASSERT() Failure: function %s" \
  145. "line %d\n", __func__, __LINE__); \
  146. slic_assert_fail(); \
  147. } \
  148. } while (0)
  149. #endif
  150. #define SLIC_GET_SLIC_HANDLE(_adapter, _pslic_handle) \
  151. { \
  152. spin_lock_irqsave(&_adapter->handle_lock.lock, \
  153. _adapter->handle_lock.flags); \
  154. _pslic_handle = _adapter->pfree_slic_handles; \
  155. if (_pslic_handle) { \
  156. ASSERT(_pslic_handle->type == SLIC_HANDLE_FREE); \
  157. _adapter->pfree_slic_handles = _pslic_handle->next; \
  158. } \
  159. spin_unlock_irqrestore(&_adapter->handle_lock.lock, \
  160. _adapter->handle_lock.flags); \
  161. }
  162. #define SLIC_FREE_SLIC_HANDLE(_adapter, _pslic_handle) \
  163. { \
  164. _pslic_handle->type = SLIC_HANDLE_FREE; \
  165. spin_lock_irqsave(&_adapter->handle_lock.lock, \
  166. _adapter->handle_lock.flags); \
  167. _pslic_handle->next = _adapter->pfree_slic_handles; \
  168. _adapter->pfree_slic_handles = _pslic_handle; \
  169. spin_unlock_irqrestore(&_adapter->handle_lock.lock, \
  170. _adapter->handle_lock.flags); \
  171. }
  172. static inline void slic_reg32_write(void __iomem *reg, u32 value, bool flush)
  173. {
  174. writel(value, reg);
  175. if (flush)
  176. mb();
  177. }
  178. static inline void slic_reg64_write(struct adapter *adapter, void __iomem *reg,
  179. u32 value, void __iomem *regh, u32 paddrh,
  180. bool flush)
  181. {
  182. spin_lock_irqsave(&adapter->bit64reglock.lock,
  183. adapter->bit64reglock.flags);
  184. if (paddrh != adapter->curaddrupper) {
  185. adapter->curaddrupper = paddrh;
  186. writel(paddrh, regh);
  187. }
  188. writel(value, reg);
  189. if (flush)
  190. mb();
  191. spin_unlock_irqrestore(&adapter->bit64reglock.lock,
  192. adapter->bit64reglock.flags);
  193. }
  194. /*
  195. * Functions to obtain the CRC corresponding to the destination mac address.
  196. * This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
  197. * the polynomial:
  198. * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5 +
  199. * x^4 + x^2 + x^1.
  200. *
  201. * After the CRC for the 6 bytes is generated (but before the value is
  202. * complemented),
  203. * we must then transpose the value and return bits 30-23.
  204. *
  205. */
  206. static u32 slic_crc_table[256]; /* Table of CRCs for all possible byte values */
  207. static u32 slic_crc_init; /* Is table initialized */
  208. /*
  209. * Contruct the CRC32 table
  210. */
  211. static void slic_mcast_init_crc32(void)
  212. {
  213. u32 c; /* CRC shit reg */
  214. u32 e = 0; /* Poly X-or pattern */
  215. int i; /* counter */
  216. int k; /* byte being shifted into crc */
  217. static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
  218. for (i = 0; i < ARRAY_SIZE(p); i++)
  219. e |= 1L << (31 - p[i]);
  220. for (i = 1; i < 256; i++) {
  221. c = i;
  222. for (k = 8; k; k--)
  223. c = c & 1 ? (c >> 1) ^ e : c >> 1;
  224. slic_crc_table[i] = c;
  225. }
  226. }
  227. /*
  228. * Return the MAC hast as described above.
  229. */
  230. static unsigned char slic_mcast_get_mac_hash(char *macaddr)
  231. {
  232. u32 crc;
  233. char *p;
  234. int i;
  235. unsigned char machash = 0;
  236. if (!slic_crc_init) {
  237. slic_mcast_init_crc32();
  238. slic_crc_init = 1;
  239. }
  240. crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
  241. for (i = 0, p = macaddr; i < 6; ++p, ++i)
  242. crc = (crc >> 8) ^ slic_crc_table[(crc ^ *p) & 0xFF];
  243. /* Return bits 1-8, transposed */
  244. for (i = 1; i < 9; i++)
  245. machash |= (((crc >> i) & 1) << (8 - i));
  246. return machash;
  247. }
  248. static void slic_mcast_set_bit(struct adapter *adapter, char *address)
  249. {
  250. unsigned char crcpoly;
  251. /* Get the CRC polynomial for the mac address */
  252. crcpoly = slic_mcast_get_mac_hash(address);
  253. /* We only have space on the SLIC for 64 entries. Lop
  254. * off the top two bits. (2^6 = 64)
  255. */
  256. crcpoly &= 0x3F;
  257. /* OR in the new bit into our 64 bit mask. */
  258. adapter->mcastmask |= (u64) 1 << crcpoly;
  259. }
  260. static void slic_mcast_set_mask(struct adapter *adapter)
  261. {
  262. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  263. if (adapter->macopts & (MAC_ALLMCAST | MAC_PROMISC)) {
  264. /* Turn on all multicast addresses. We have to do this for
  265. * promiscuous mode as well as ALLMCAST mode. It saves the
  266. * Microcode from having to keep state about the MAC
  267. * configuration.
  268. */
  269. slic_reg32_write(&slic_regs->slic_mcastlow, 0xFFFFFFFF, FLUSH);
  270. slic_reg32_write(&slic_regs->slic_mcasthigh, 0xFFFFFFFF,
  271. FLUSH);
  272. } else {
  273. /* Commit our multicast mast to the SLIC by writing to the
  274. * multicast address mask registers
  275. */
  276. slic_reg32_write(&slic_regs->slic_mcastlow,
  277. (u32)(adapter->mcastmask & 0xFFFFFFFF), FLUSH);
  278. slic_reg32_write(&slic_regs->slic_mcasthigh,
  279. (u32)((adapter->mcastmask >> 32) & 0xFFFFFFFF), FLUSH);
  280. }
  281. }
  282. static void slic_timer_ping(ulong dev)
  283. {
  284. struct adapter *adapter;
  285. struct sliccard *card;
  286. ASSERT(dev);
  287. adapter = netdev_priv((struct net_device *)dev);
  288. ASSERT(adapter);
  289. card = adapter->card;
  290. ASSERT(card);
  291. adapter->pingtimer.expires = jiffies + (PING_TIMER_INTERVAL * HZ);
  292. add_timer(&adapter->pingtimer);
  293. }
  294. static void slic_unmap_mmio_space(struct adapter *adapter)
  295. {
  296. if (adapter->slic_regs)
  297. iounmap(adapter->slic_regs);
  298. adapter->slic_regs = NULL;
  299. }
  300. /*
  301. * slic_link_config
  302. *
  303. * Write phy control to configure link duplex/speed
  304. *
  305. */
  306. static void slic_link_config(struct adapter *adapter,
  307. u32 linkspeed, u32 linkduplex)
  308. {
  309. u32 __iomem *wphy;
  310. u32 speed;
  311. u32 duplex;
  312. u32 phy_config;
  313. u32 phy_advreg;
  314. u32 phy_gctlreg;
  315. if (adapter->state != ADAPT_UP)
  316. return;
  317. ASSERT((adapter->devid == SLIC_1GB_DEVICE_ID)
  318. || (adapter->devid == SLIC_2GB_DEVICE_ID));
  319. if (linkspeed > LINK_1000MB)
  320. linkspeed = LINK_AUTOSPEED;
  321. if (linkduplex > LINK_AUTOD)
  322. linkduplex = LINK_AUTOD;
  323. wphy = &adapter->slic_regs->slic_wphy;
  324. if ((linkspeed == LINK_AUTOSPEED) || (linkspeed == LINK_1000MB)) {
  325. if (adapter->flags & ADAPT_FLAGS_FIBERMEDIA) {
  326. /* We've got a fiber gigabit interface, and register
  327. * 4 is different in fiber mode than in copper mode
  328. */
  329. /* advertise FD only @1000 Mb */
  330. phy_advreg = (MIICR_REG_4 | (PAR_ADV1000XFD));
  331. /* enable PAUSE frames */
  332. phy_advreg |= PAR_ASYMPAUSE_FIBER;
  333. slic_reg32_write(wphy, phy_advreg, FLUSH);
  334. if (linkspeed == LINK_AUTOSPEED) {
  335. /* reset phy, enable auto-neg */
  336. phy_config =
  337. (MIICR_REG_PCR |
  338. (PCR_RESET | PCR_AUTONEG |
  339. PCR_AUTONEG_RST));
  340. slic_reg32_write(wphy, phy_config, FLUSH);
  341. } else { /* forced 1000 Mb FD*/
  342. /* power down phy to break link
  343. this may not work) */
  344. phy_config = (MIICR_REG_PCR | PCR_POWERDOWN);
  345. slic_reg32_write(wphy, phy_config, FLUSH);
  346. /* wait, Marvell says 1 sec,
  347. try to get away with 10 ms */
  348. mdelay(10);
  349. /* disable auto-neg, set speed/duplex,
  350. soft reset phy, powerup */
  351. phy_config =
  352. (MIICR_REG_PCR |
  353. (PCR_RESET | PCR_SPEED_1000 |
  354. PCR_DUPLEX_FULL));
  355. slic_reg32_write(wphy, phy_config, FLUSH);
  356. }
  357. } else { /* copper gigabit */
  358. /* Auto-Negotiate or 1000 Mb must be auto negotiated
  359. * We've got a copper gigabit interface, and
  360. * register 4 is different in copper mode than
  361. * in fiber mode
  362. */
  363. if (linkspeed == LINK_AUTOSPEED) {
  364. /* advertise 10/100 Mb modes */
  365. phy_advreg =
  366. (MIICR_REG_4 |
  367. (PAR_ADV100FD | PAR_ADV100HD | PAR_ADV10FD
  368. | PAR_ADV10HD));
  369. } else {
  370. /* linkspeed == LINK_1000MB -
  371. don't advertise 10/100 Mb modes */
  372. phy_advreg = MIICR_REG_4;
  373. }
  374. /* enable PAUSE frames */
  375. phy_advreg |= PAR_ASYMPAUSE;
  376. /* required by the Cicada PHY */
  377. phy_advreg |= PAR_802_3;
  378. slic_reg32_write(wphy, phy_advreg, FLUSH);
  379. /* advertise FD only @1000 Mb */
  380. phy_gctlreg = (MIICR_REG_9 | (PGC_ADV1000FD));
  381. slic_reg32_write(wphy, phy_gctlreg, FLUSH);
  382. if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
  383. /* if a Marvell PHY
  384. enable auto crossover */
  385. phy_config =
  386. (MIICR_REG_16 | (MRV_REG16_XOVERON));
  387. slic_reg32_write(wphy, phy_config, FLUSH);
  388. /* reset phy, enable auto-neg */
  389. phy_config =
  390. (MIICR_REG_PCR |
  391. (PCR_RESET | PCR_AUTONEG |
  392. PCR_AUTONEG_RST));
  393. slic_reg32_write(wphy, phy_config, FLUSH);
  394. } else { /* it's a Cicada PHY */
  395. /* enable and restart auto-neg (don't reset) */
  396. phy_config =
  397. (MIICR_REG_PCR |
  398. (PCR_AUTONEG | PCR_AUTONEG_RST));
  399. slic_reg32_write(wphy, phy_config, FLUSH);
  400. }
  401. }
  402. } else {
  403. /* Forced 10/100 */
  404. if (linkspeed == LINK_10MB)
  405. speed = 0;
  406. else
  407. speed = PCR_SPEED_100;
  408. if (linkduplex == LINK_HALFD)
  409. duplex = 0;
  410. else
  411. duplex = PCR_DUPLEX_FULL;
  412. if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
  413. /* if a Marvell PHY
  414. disable auto crossover */
  415. phy_config = (MIICR_REG_16 | (MRV_REG16_XOVEROFF));
  416. slic_reg32_write(wphy, phy_config, FLUSH);
  417. }
  418. /* power down phy to break link (this may not work) */
  419. phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN | speed | duplex));
  420. slic_reg32_write(wphy, phy_config, FLUSH);
  421. /* wait, Marvell says 1 sec, try to get away with 10 ms */
  422. mdelay(10);
  423. if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
  424. /* if a Marvell PHY
  425. disable auto-neg, set speed,
  426. soft reset phy, powerup */
  427. phy_config =
  428. (MIICR_REG_PCR | (PCR_RESET | speed | duplex));
  429. slic_reg32_write(wphy, phy_config, FLUSH);
  430. } else { /* it's a Cicada PHY */
  431. /* disable auto-neg, set speed, powerup */
  432. phy_config = (MIICR_REG_PCR | (speed | duplex));
  433. slic_reg32_write(wphy, phy_config, FLUSH);
  434. }
  435. }
  436. }
  437. static int slic_card_download_gbrcv(struct adapter *adapter)
  438. {
  439. const struct firmware *fw;
  440. const char *file = "";
  441. int ret;
  442. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  443. u32 codeaddr;
  444. u32 instruction;
  445. int index = 0;
  446. u32 rcvucodelen = 0;
  447. switch (adapter->devid) {
  448. case SLIC_2GB_DEVICE_ID:
  449. file = "slicoss/oasisrcvucode.sys";
  450. break;
  451. case SLIC_1GB_DEVICE_ID:
  452. file = "slicoss/gbrcvucode.sys";
  453. break;
  454. default:
  455. ASSERT(0);
  456. break;
  457. }
  458. ret = request_firmware(&fw, file, &adapter->pcidev->dev);
  459. if (ret) {
  460. dev_err(&adapter->pcidev->dev,
  461. "SLICOSS: Failed to load firmware %s\n", file);
  462. return ret;
  463. }
  464. rcvucodelen = *(u32 *)(fw->data + index);
  465. index += 4;
  466. switch (adapter->devid) {
  467. case SLIC_2GB_DEVICE_ID:
  468. if (rcvucodelen != OasisRcvUCodeLen)
  469. return -EINVAL;
  470. break;
  471. case SLIC_1GB_DEVICE_ID:
  472. if (rcvucodelen != GBRcvUCodeLen)
  473. return -EINVAL;
  474. break;
  475. default:
  476. ASSERT(0);
  477. break;
  478. }
  479. /* start download */
  480. slic_reg32_write(&slic_regs->slic_rcv_wcs, SLIC_RCVWCS_BEGIN, FLUSH);
  481. /* download the rcv sequencer ucode */
  482. for (codeaddr = 0; codeaddr < rcvucodelen; codeaddr++) {
  483. /* write out instruction address */
  484. slic_reg32_write(&slic_regs->slic_rcv_wcs, codeaddr, FLUSH);
  485. instruction = *(u32 *)(fw->data + index);
  486. index += 4;
  487. /* write out the instruction data low addr */
  488. slic_reg32_write(&slic_regs->slic_rcv_wcs, instruction, FLUSH);
  489. instruction = *(u8 *)(fw->data + index);
  490. index++;
  491. /* write out the instruction data high addr */
  492. slic_reg32_write(&slic_regs->slic_rcv_wcs, (u8)instruction,
  493. FLUSH);
  494. }
  495. /* download finished */
  496. release_firmware(fw);
  497. slic_reg32_write(&slic_regs->slic_rcv_wcs, SLIC_RCVWCS_FINISH, FLUSH);
  498. return 0;
  499. }
  500. MODULE_FIRMWARE("slicoss/oasisrcvucode.sys");
  501. MODULE_FIRMWARE("slicoss/gbrcvucode.sys");
  502. static int slic_card_download(struct adapter *adapter)
  503. {
  504. const struct firmware *fw;
  505. const char *file = "";
  506. int ret;
  507. u32 section;
  508. int thissectionsize;
  509. int codeaddr;
  510. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  511. u32 instruction;
  512. u32 baseaddress;
  513. u32 i;
  514. u32 numsects = 0;
  515. u32 sectsize[3];
  516. u32 sectstart[3];
  517. int ucode_start, index = 0;
  518. switch (adapter->devid) {
  519. case SLIC_2GB_DEVICE_ID:
  520. file = "slicoss/oasisdownload.sys";
  521. break;
  522. case SLIC_1GB_DEVICE_ID:
  523. file = "slicoss/gbdownload.sys";
  524. break;
  525. default:
  526. ASSERT(0);
  527. break;
  528. }
  529. ret = request_firmware(&fw, file, &adapter->pcidev->dev);
  530. if (ret) {
  531. dev_err(&adapter->pcidev->dev,
  532. "SLICOSS: Failed to load firmware %s\n", file);
  533. return ret;
  534. }
  535. numsects = *(u32 *)(fw->data + index);
  536. index += 4;
  537. ASSERT(numsects <= 3);
  538. for (i = 0; i < numsects; i++) {
  539. sectsize[i] = *(u32 *)(fw->data + index);
  540. index += 4;
  541. }
  542. for (i = 0; i < numsects; i++) {
  543. sectstart[i] = *(u32 *)(fw->data + index);
  544. index += 4;
  545. }
  546. ucode_start = index;
  547. instruction = *(u32 *)(fw->data + index);
  548. index += 4;
  549. for (section = 0; section < numsects; section++) {
  550. baseaddress = sectstart[section];
  551. thissectionsize = sectsize[section] >> 3;
  552. for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
  553. /* Write out instruction address */
  554. slic_reg32_write(&slic_regs->slic_wcs,
  555. baseaddress + codeaddr, FLUSH);
  556. /* Write out instruction to low addr */
  557. slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH);
  558. instruction = *(u32 *)(fw->data + index);
  559. index += 4;
  560. /* Write out instruction to high addr */
  561. slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH);
  562. instruction = *(u32 *)(fw->data + index);
  563. index += 4;
  564. }
  565. }
  566. index = ucode_start;
  567. for (section = 0; section < numsects; section++) {
  568. instruction = *(u32 *)(fw->data + index);
  569. baseaddress = sectstart[section];
  570. if (baseaddress < 0x8000)
  571. continue;
  572. thissectionsize = sectsize[section] >> 3;
  573. for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
  574. /* Write out instruction address */
  575. slic_reg32_write(&slic_regs->slic_wcs,
  576. SLIC_WCS_COMPARE | (baseaddress + codeaddr),
  577. FLUSH);
  578. /* Write out instruction to low addr */
  579. slic_reg32_write(&slic_regs->slic_wcs, instruction,
  580. FLUSH);
  581. instruction = *(u32 *)(fw->data + index);
  582. index += 4;
  583. /* Write out instruction to high addr */
  584. slic_reg32_write(&slic_regs->slic_wcs, instruction,
  585. FLUSH);
  586. instruction = *(u32 *)(fw->data + index);
  587. index += 4;
  588. /* Check SRAM location zero. If it is non-zero. Abort.*/
  589. /* failure = readl((u32 __iomem *)&slic_regs->slic_reset);
  590. if (failure) {
  591. release_firmware(fw);
  592. return -EIO;
  593. }*/
  594. }
  595. }
  596. release_firmware(fw);
  597. /* Everything OK, kick off the card */
  598. mdelay(10);
  599. slic_reg32_write(&slic_regs->slic_wcs, SLIC_WCS_START, FLUSH);
  600. /* stall for 20 ms, long enough for ucode to init card
  601. and reach mainloop */
  602. mdelay(20);
  603. return 0;
  604. }
  605. MODULE_FIRMWARE("slicoss/oasisdownload.sys");
  606. MODULE_FIRMWARE("slicoss/gbdownload.sys");
  607. static void slic_adapter_set_hwaddr(struct adapter *adapter)
  608. {
  609. struct sliccard *card = adapter->card;
  610. if ((adapter->card) && (card->config_set)) {
  611. memcpy(adapter->macaddr,
  612. card->config.MacInfo[adapter->functionnumber].macaddrA,
  613. sizeof(struct slic_config_mac));
  614. if (!(adapter->currmacaddr[0] || adapter->currmacaddr[1] ||
  615. adapter->currmacaddr[2] || adapter->currmacaddr[3] ||
  616. adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
  617. memcpy(adapter->currmacaddr, adapter->macaddr, 6);
  618. }
  619. if (adapter->netdev) {
  620. memcpy(adapter->netdev->dev_addr, adapter->currmacaddr,
  621. 6);
  622. }
  623. }
  624. }
  625. static void slic_intagg_set(struct adapter *adapter, u32 value)
  626. {
  627. slic_reg32_write(&adapter->slic_regs->slic_intagg, value, FLUSH);
  628. adapter->card->loadlevel_current = value;
  629. }
  630. static void slic_soft_reset(struct adapter *adapter)
  631. {
  632. if (adapter->card->state == CARD_UP) {
  633. slic_reg32_write(&adapter->slic_regs->slic_quiesce, 0, FLUSH);
  634. mdelay(1);
  635. }
  636. slic_reg32_write(&adapter->slic_regs->slic_reset, SLIC_RESET_MAGIC,
  637. FLUSH);
  638. mdelay(1);
  639. }
  640. static void slic_mac_address_config(struct adapter *adapter)
  641. {
  642. u32 value;
  643. u32 value2;
  644. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  645. value = *(u32 *) &adapter->currmacaddr[2];
  646. value = ntohl(value);
  647. slic_reg32_write(&slic_regs->slic_wraddral, value, FLUSH);
  648. slic_reg32_write(&slic_regs->slic_wraddrbl, value, FLUSH);
  649. value2 = (u32) ((adapter->currmacaddr[0] << 8 |
  650. adapter->currmacaddr[1]) & 0xFFFF);
  651. slic_reg32_write(&slic_regs->slic_wraddrah, value2, FLUSH);
  652. slic_reg32_write(&slic_regs->slic_wraddrbh, value2, FLUSH);
  653. /* Write our multicast mask out to the card. This is done */
  654. /* here in addition to the slic_mcast_addr_set routine */
  655. /* because ALL_MCAST may have been enabled or disabled */
  656. slic_mcast_set_mask(adapter);
  657. }
  658. static void slic_mac_config(struct adapter *adapter)
  659. {
  660. u32 value;
  661. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  662. /* Setup GMAC gaps */
  663. if (adapter->linkspeed == LINK_1000MB) {
  664. value = ((GMCR_GAPBB_1000 << GMCR_GAPBB_SHIFT) |
  665. (GMCR_GAPR1_1000 << GMCR_GAPR1_SHIFT) |
  666. (GMCR_GAPR2_1000 << GMCR_GAPR2_SHIFT));
  667. } else {
  668. value = ((GMCR_GAPBB_100 << GMCR_GAPBB_SHIFT) |
  669. (GMCR_GAPR1_100 << GMCR_GAPR1_SHIFT) |
  670. (GMCR_GAPR2_100 << GMCR_GAPR2_SHIFT));
  671. }
  672. /* enable GMII */
  673. if (adapter->linkspeed == LINK_1000MB)
  674. value |= GMCR_GBIT;
  675. /* enable fullduplex */
  676. if ((adapter->linkduplex == LINK_FULLD)
  677. || (adapter->macopts & MAC_LOOPBACK)) {
  678. value |= GMCR_FULLD;
  679. }
  680. /* write mac config */
  681. slic_reg32_write(&slic_regs->slic_wmcfg, value, FLUSH);
  682. /* setup mac addresses */
  683. slic_mac_address_config(adapter);
  684. }
  685. static void slic_config_set(struct adapter *adapter, bool linkchange)
  686. {
  687. u32 value;
  688. u32 RcrReset;
  689. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  690. if (linkchange) {
  691. /* Setup MAC */
  692. slic_mac_config(adapter);
  693. RcrReset = GRCR_RESET;
  694. } else {
  695. slic_mac_address_config(adapter);
  696. RcrReset = 0;
  697. }
  698. if (adapter->linkduplex == LINK_FULLD) {
  699. /* setup xmtcfg */
  700. value = (GXCR_RESET | /* Always reset */
  701. GXCR_XMTEN | /* Enable transmit */
  702. GXCR_PAUSEEN); /* Enable pause */
  703. slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH);
  704. /* Setup rcvcfg last */
  705. value = (RcrReset | /* Reset, if linkchange */
  706. GRCR_CTLEN | /* Enable CTL frames */
  707. GRCR_ADDRAEN | /* Address A enable */
  708. GRCR_RCVBAD | /* Rcv bad frames */
  709. (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
  710. } else {
  711. /* setup xmtcfg */
  712. value = (GXCR_RESET | /* Always reset */
  713. GXCR_XMTEN); /* Enable transmit */
  714. slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH);
  715. /* Setup rcvcfg last */
  716. value = (RcrReset | /* Reset, if linkchange */
  717. GRCR_ADDRAEN | /* Address A enable */
  718. GRCR_RCVBAD | /* Rcv bad frames */
  719. (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
  720. }
  721. if (adapter->state != ADAPT_DOWN) {
  722. /* Only enable receive if we are restarting or running */
  723. value |= GRCR_RCVEN;
  724. }
  725. if (adapter->macopts & MAC_PROMISC)
  726. value |= GRCR_RCVALL;
  727. slic_reg32_write(&slic_regs->slic_wrcfg, value, FLUSH);
  728. }
  729. /*
  730. * Turn off RCV and XMT, power down PHY
  731. */
  732. static void slic_config_clear(struct adapter *adapter)
  733. {
  734. u32 value;
  735. u32 phy_config;
  736. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  737. /* Setup xmtcfg */
  738. value = (GXCR_RESET | /* Always reset */
  739. GXCR_PAUSEEN); /* Enable pause */
  740. slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH);
  741. value = (GRCR_RESET | /* Always reset */
  742. GRCR_CTLEN | /* Enable CTL frames */
  743. GRCR_ADDRAEN | /* Address A enable */
  744. (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
  745. slic_reg32_write(&slic_regs->slic_wrcfg, value, FLUSH);
  746. /* power down phy */
  747. phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN));
  748. slic_reg32_write(&slic_regs->slic_wphy, phy_config, FLUSH);
  749. }
  750. static bool slic_mac_filter(struct adapter *adapter,
  751. struct ether_header *ether_frame)
  752. {
  753. struct net_device *netdev = adapter->netdev;
  754. u32 opts = adapter->macopts;
  755. u32 *dhost4 = (u32 *)&ether_frame->ether_dhost[0];
  756. u16 *dhost2 = (u16 *)&ether_frame->ether_dhost[4];
  757. if (opts & MAC_PROMISC)
  758. return true;
  759. if ((*dhost4 == 0xFFFFFFFF) && (*dhost2 == 0xFFFF)) {
  760. if (opts & MAC_BCAST) {
  761. adapter->rcv_broadcasts++;
  762. return true;
  763. } else {
  764. return false;
  765. }
  766. }
  767. if (ether_frame->ether_dhost[0] & 0x01) {
  768. if (opts & MAC_ALLMCAST) {
  769. adapter->rcv_multicasts++;
  770. netdev->stats.multicast++;
  771. return true;
  772. }
  773. if (opts & MAC_MCAST) {
  774. struct mcast_address *mcaddr = adapter->mcastaddrs;
  775. while (mcaddr) {
  776. if (!compare_ether_addr(mcaddr->address,
  777. ether_frame->ether_dhost)) {
  778. adapter->rcv_multicasts++;
  779. netdev->stats.multicast++;
  780. return true;
  781. }
  782. mcaddr = mcaddr->next;
  783. }
  784. return false;
  785. } else {
  786. return false;
  787. }
  788. }
  789. if (opts & MAC_DIRECTED) {
  790. adapter->rcv_unicasts++;
  791. return true;
  792. }
  793. return false;
  794. }
  795. static int slic_mac_set_address(struct net_device *dev, void *ptr)
  796. {
  797. struct adapter *adapter = netdev_priv(dev);
  798. struct sockaddr *addr = ptr;
  799. if (netif_running(dev))
  800. return -EBUSY;
  801. if (!adapter)
  802. return -EBUSY;
  803. if (!is_valid_ether_addr(addr->sa_data))
  804. return -EINVAL;
  805. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  806. memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
  807. slic_config_set(adapter, true);
  808. return 0;
  809. }
  810. static void slic_timer_load_check(ulong cardaddr)
  811. {
  812. struct sliccard *card = (struct sliccard *)cardaddr;
  813. struct adapter *adapter = card->master;
  814. u32 __iomem *intagg;
  815. u32 load = card->events;
  816. u32 level = 0;
  817. intagg = &adapter->slic_regs->slic_intagg;
  818. if ((adapter) && (adapter->state == ADAPT_UP) &&
  819. (card->state == CARD_UP) && (slic_global.dynamic_intagg)) {
  820. if (adapter->devid == SLIC_1GB_DEVICE_ID) {
  821. if (adapter->linkspeed == LINK_1000MB)
  822. level = 100;
  823. else {
  824. if (load > SLIC_LOAD_5)
  825. level = SLIC_INTAGG_5;
  826. else if (load > SLIC_LOAD_4)
  827. level = SLIC_INTAGG_4;
  828. else if (load > SLIC_LOAD_3)
  829. level = SLIC_INTAGG_3;
  830. else if (load > SLIC_LOAD_2)
  831. level = SLIC_INTAGG_2;
  832. else if (load > SLIC_LOAD_1)
  833. level = SLIC_INTAGG_1;
  834. else
  835. level = SLIC_INTAGG_0;
  836. }
  837. if (card->loadlevel_current != level) {
  838. card->loadlevel_current = level;
  839. slic_reg32_write(intagg, level, FLUSH);
  840. }
  841. } else {
  842. if (load > SLIC_LOAD_5)
  843. level = SLIC_INTAGG_5;
  844. else if (load > SLIC_LOAD_4)
  845. level = SLIC_INTAGG_4;
  846. else if (load > SLIC_LOAD_3)
  847. level = SLIC_INTAGG_3;
  848. else if (load > SLIC_LOAD_2)
  849. level = SLIC_INTAGG_2;
  850. else if (load > SLIC_LOAD_1)
  851. level = SLIC_INTAGG_1;
  852. else
  853. level = SLIC_INTAGG_0;
  854. if (card->loadlevel_current != level) {
  855. card->loadlevel_current = level;
  856. slic_reg32_write(intagg, level, FLUSH);
  857. }
  858. }
  859. }
  860. card->events = 0;
  861. card->loadtimer.expires = jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
  862. add_timer(&card->loadtimer);
  863. }
  864. static int slic_upr_queue_request(struct adapter *adapter,
  865. u32 upr_request,
  866. u32 upr_data,
  867. u32 upr_data_h,
  868. u32 upr_buffer, u32 upr_buffer_h)
  869. {
  870. struct slic_upr *upr;
  871. struct slic_upr *uprqueue;
  872. upr = kmalloc(sizeof(struct slic_upr), GFP_ATOMIC);
  873. if (!upr)
  874. return -ENOMEM;
  875. upr->adapter = adapter->port;
  876. upr->upr_request = upr_request;
  877. upr->upr_data = upr_data;
  878. upr->upr_buffer = upr_buffer;
  879. upr->upr_data_h = upr_data_h;
  880. upr->upr_buffer_h = upr_buffer_h;
  881. upr->next = NULL;
  882. if (adapter->upr_list) {
  883. uprqueue = adapter->upr_list;
  884. while (uprqueue->next)
  885. uprqueue = uprqueue->next;
  886. uprqueue->next = upr;
  887. } else {
  888. adapter->upr_list = upr;
  889. }
  890. return 0;
  891. }
  892. static void slic_upr_start(struct adapter *adapter)
  893. {
  894. struct slic_upr *upr;
  895. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  896. /*
  897. char * ptr1;
  898. char * ptr2;
  899. uint cmdoffset;
  900. */
  901. upr = adapter->upr_list;
  902. if (!upr)
  903. return;
  904. if (adapter->upr_busy)
  905. return;
  906. adapter->upr_busy = 1;
  907. switch (upr->upr_request) {
  908. case SLIC_UPR_STATS:
  909. if (upr->upr_data_h == 0) {
  910. slic_reg32_write(&slic_regs->slic_stats, upr->upr_data,
  911. FLUSH);
  912. } else {
  913. slic_reg64_write(adapter, &slic_regs->slic_stats64,
  914. upr->upr_data,
  915. &slic_regs->slic_addr_upper,
  916. upr->upr_data_h, FLUSH);
  917. }
  918. break;
  919. case SLIC_UPR_RLSR:
  920. slic_reg64_write(adapter, &slic_regs->slic_rlsr, upr->upr_data,
  921. &slic_regs->slic_addr_upper, upr->upr_data_h,
  922. FLUSH);
  923. break;
  924. case SLIC_UPR_RCONFIG:
  925. slic_reg64_write(adapter, &slic_regs->slic_rconfig,
  926. upr->upr_data, &slic_regs->slic_addr_upper,
  927. upr->upr_data_h, FLUSH);
  928. break;
  929. case SLIC_UPR_PING:
  930. slic_reg32_write(&slic_regs->slic_ping, 1, FLUSH);
  931. break;
  932. default:
  933. ASSERT(0);
  934. }
  935. }
  936. static int slic_upr_request(struct adapter *adapter,
  937. u32 upr_request,
  938. u32 upr_data,
  939. u32 upr_data_h,
  940. u32 upr_buffer, u32 upr_buffer_h)
  941. {
  942. int rc;
  943. spin_lock_irqsave(&adapter->upr_lock.lock, adapter->upr_lock.flags);
  944. rc = slic_upr_queue_request(adapter,
  945. upr_request,
  946. upr_data,
  947. upr_data_h, upr_buffer, upr_buffer_h);
  948. if (rc)
  949. goto err_unlock_irq;
  950. slic_upr_start(adapter);
  951. err_unlock_irq:
  952. spin_unlock_irqrestore(&adapter->upr_lock.lock,
  953. adapter->upr_lock.flags);
  954. return rc;
  955. }
  956. static void slic_link_upr_complete(struct adapter *adapter, u32 isr)
  957. {
  958. u32 linkstatus = adapter->pshmem->linkstatus;
  959. uint linkup;
  960. unsigned char linkspeed;
  961. unsigned char linkduplex;
  962. if ((isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
  963. struct slic_shmem *pshmem;
  964. pshmem = (struct slic_shmem *)adapter->phys_shmem;
  965. #if BITS_PER_LONG == 64
  966. slic_upr_queue_request(adapter,
  967. SLIC_UPR_RLSR,
  968. SLIC_GET_ADDR_LOW(&pshmem->linkstatus),
  969. SLIC_GET_ADDR_HIGH(&pshmem->linkstatus),
  970. 0, 0);
  971. #else
  972. slic_upr_queue_request(adapter,
  973. SLIC_UPR_RLSR,
  974. (u32) &pshmem->linkstatus,
  975. SLIC_GET_ADDR_HIGH(pshmem), 0, 0);
  976. #endif
  977. return;
  978. }
  979. if (adapter->state != ADAPT_UP)
  980. return;
  981. ASSERT((adapter->devid == SLIC_1GB_DEVICE_ID)
  982. || (adapter->devid == SLIC_2GB_DEVICE_ID));
  983. linkup = linkstatus & GIG_LINKUP ? LINK_UP : LINK_DOWN;
  984. if (linkstatus & GIG_SPEED_1000)
  985. linkspeed = LINK_1000MB;
  986. else if (linkstatus & GIG_SPEED_100)
  987. linkspeed = LINK_100MB;
  988. else
  989. linkspeed = LINK_10MB;
  990. if (linkstatus & GIG_FULLDUPLEX)
  991. linkduplex = LINK_FULLD;
  992. else
  993. linkduplex = LINK_HALFD;
  994. if ((adapter->linkstate == LINK_DOWN) && (linkup == LINK_DOWN))
  995. return;
  996. /* link up event, but nothing has changed */
  997. if ((adapter->linkstate == LINK_UP) &&
  998. (linkup == LINK_UP) &&
  999. (adapter->linkspeed == linkspeed) &&
  1000. (adapter->linkduplex == linkduplex))
  1001. return;
  1002. /* link has changed at this point */
  1003. /* link has gone from up to down */
  1004. if (linkup == LINK_DOWN) {
  1005. adapter->linkstate = LINK_DOWN;
  1006. return;
  1007. }
  1008. /* link has gone from down to up */
  1009. adapter->linkspeed = linkspeed;
  1010. adapter->linkduplex = linkduplex;
  1011. if (adapter->linkstate != LINK_UP) {
  1012. /* setup the mac */
  1013. slic_config_set(adapter, true);
  1014. adapter->linkstate = LINK_UP;
  1015. netif_start_queue(adapter->netdev);
  1016. }
  1017. }
  1018. static void slic_upr_request_complete(struct adapter *adapter, u32 isr)
  1019. {
  1020. struct sliccard *card = adapter->card;
  1021. struct slic_upr *upr;
  1022. spin_lock_irqsave(&adapter->upr_lock.lock, adapter->upr_lock.flags);
  1023. upr = adapter->upr_list;
  1024. if (!upr) {
  1025. ASSERT(0);
  1026. spin_unlock_irqrestore(&adapter->upr_lock.lock,
  1027. adapter->upr_lock.flags);
  1028. return;
  1029. }
  1030. adapter->upr_list = upr->next;
  1031. upr->next = NULL;
  1032. adapter->upr_busy = 0;
  1033. ASSERT(adapter->port == upr->adapter);
  1034. switch (upr->upr_request) {
  1035. case SLIC_UPR_STATS:
  1036. {
  1037. struct slic_stats *slicstats =
  1038. (struct slic_stats *) &adapter->pshmem->inicstats;
  1039. struct slic_stats *newstats = slicstats;
  1040. struct slic_stats *old = &adapter->inicstats_prev;
  1041. struct slicnet_stats *stst = &adapter->slic_stats;
  1042. if (isr & ISR_UPCERR) {
  1043. dev_err(&adapter->netdev->dev,
  1044. "SLIC_UPR_STATS command failed isr[%x]\n",
  1045. isr);
  1046. break;
  1047. }
  1048. UPDATE_STATS_GB(stst->tcp.xmit_tcp_segs,
  1049. newstats->xmit_tcp_segs_gb,
  1050. old->xmit_tcp_segs_gb);
  1051. UPDATE_STATS_GB(stst->tcp.xmit_tcp_bytes,
  1052. newstats->xmit_tcp_bytes_gb,
  1053. old->xmit_tcp_bytes_gb);
  1054. UPDATE_STATS_GB(stst->tcp.rcv_tcp_segs,
  1055. newstats->rcv_tcp_segs_gb,
  1056. old->rcv_tcp_segs_gb);
  1057. UPDATE_STATS_GB(stst->tcp.rcv_tcp_bytes,
  1058. newstats->rcv_tcp_bytes_gb,
  1059. old->rcv_tcp_bytes_gb);
  1060. UPDATE_STATS_GB(stst->iface.xmt_bytes,
  1061. newstats->xmit_bytes_gb,
  1062. old->xmit_bytes_gb);
  1063. UPDATE_STATS_GB(stst->iface.xmt_ucast,
  1064. newstats->xmit_unicasts_gb,
  1065. old->xmit_unicasts_gb);
  1066. UPDATE_STATS_GB(stst->iface.rcv_bytes,
  1067. newstats->rcv_bytes_gb,
  1068. old->rcv_bytes_gb);
  1069. UPDATE_STATS_GB(stst->iface.rcv_ucast,
  1070. newstats->rcv_unicasts_gb,
  1071. old->rcv_unicasts_gb);
  1072. UPDATE_STATS_GB(stst->iface.xmt_errors,
  1073. newstats->xmit_collisions_gb,
  1074. old->xmit_collisions_gb);
  1075. UPDATE_STATS_GB(stst->iface.xmt_errors,
  1076. newstats->xmit_excess_collisions_gb,
  1077. old->xmit_excess_collisions_gb);
  1078. UPDATE_STATS_GB(stst->iface.xmt_errors,
  1079. newstats->xmit_other_error_gb,
  1080. old->xmit_other_error_gb);
  1081. UPDATE_STATS_GB(stst->iface.rcv_errors,
  1082. newstats->rcv_other_error_gb,
  1083. old->rcv_other_error_gb);
  1084. UPDATE_STATS_GB(stst->iface.rcv_discards,
  1085. newstats->rcv_drops_gb,
  1086. old->rcv_drops_gb);
  1087. if (newstats->rcv_drops_gb > old->rcv_drops_gb) {
  1088. adapter->rcv_drops +=
  1089. (newstats->rcv_drops_gb -
  1090. old->rcv_drops_gb);
  1091. }
  1092. memcpy(old, newstats, sizeof(struct slic_stats));
  1093. break;
  1094. }
  1095. case SLIC_UPR_RLSR:
  1096. slic_link_upr_complete(adapter, isr);
  1097. break;
  1098. case SLIC_UPR_RCONFIG:
  1099. break;
  1100. case SLIC_UPR_RPHY:
  1101. ASSERT(0);
  1102. break;
  1103. case SLIC_UPR_ENLB:
  1104. ASSERT(0);
  1105. break;
  1106. case SLIC_UPR_ENCT:
  1107. ASSERT(0);
  1108. break;
  1109. case SLIC_UPR_PDWN:
  1110. ASSERT(0);
  1111. break;
  1112. case SLIC_UPR_PING:
  1113. card->pingstatus |= (isr & ISR_PINGDSMASK);
  1114. break;
  1115. default:
  1116. ASSERT(0);
  1117. }
  1118. kfree(upr);
  1119. slic_upr_start(adapter);
  1120. spin_unlock_irqrestore(&adapter->upr_lock.lock,
  1121. adapter->upr_lock.flags);
  1122. }
  1123. static void slic_config_get(struct adapter *adapter, u32 config,
  1124. u32 config_h)
  1125. {
  1126. int status;
  1127. status = slic_upr_request(adapter,
  1128. SLIC_UPR_RCONFIG,
  1129. (u32) config, (u32) config_h, 0, 0);
  1130. ASSERT(status == 0);
  1131. }
  1132. /*
  1133. * this is here to checksum the eeprom, there is some ucode bug
  1134. * which prevens us from using the ucode result.
  1135. * remove this once ucode is fixed.
  1136. */
  1137. static ushort slic_eeprom_cksum(char *m, int len)
  1138. {
  1139. #define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
  1140. #define REDUCE {l_util.l = sum; sum = l_util.s[0] + l_util.s[1]; ADDCARRY(sum);\
  1141. }
  1142. u16 *w;
  1143. u32 sum = 0;
  1144. u32 byte_swapped = 0;
  1145. u32 w_int;
  1146. union {
  1147. char c[2];
  1148. ushort s;
  1149. } s_util;
  1150. union {
  1151. ushort s[2];
  1152. int l;
  1153. } l_util;
  1154. l_util.l = 0;
  1155. s_util.s = 0;
  1156. w = (u16 *)m;
  1157. #if BITS_PER_LONG == 64
  1158. w_int = (u32) ((ulong) w & 0x00000000FFFFFFFF);
  1159. #else
  1160. w_int = (u32) (w);
  1161. #endif
  1162. if ((1 & w_int) && (len > 0)) {
  1163. REDUCE;
  1164. sum <<= 8;
  1165. s_util.c[0] = *(unsigned char *)w;
  1166. w = (u16 *)((char *)w + 1);
  1167. len--;
  1168. byte_swapped = 1;
  1169. }
  1170. /* Unroll the loop to make overhead from branches &c small. */
  1171. while ((len -= 32) >= 0) {
  1172. sum += w[0];
  1173. sum += w[1];
  1174. sum += w[2];
  1175. sum += w[3];
  1176. sum += w[4];
  1177. sum += w[5];
  1178. sum += w[6];
  1179. sum += w[7];
  1180. sum += w[8];
  1181. sum += w[9];
  1182. sum += w[10];
  1183. sum += w[11];
  1184. sum += w[12];
  1185. sum += w[13];
  1186. sum += w[14];
  1187. sum += w[15];
  1188. w = (u16 *)((ulong) w + 16); /* verify */
  1189. }
  1190. len += 32;
  1191. while ((len -= 8) >= 0) {
  1192. sum += w[0];
  1193. sum += w[1];
  1194. sum += w[2];
  1195. sum += w[3];
  1196. w = (u16 *)((ulong) w + 4); /* verify */
  1197. }
  1198. len += 8;
  1199. if (len != 0 || byte_swapped != 0) {
  1200. REDUCE;
  1201. while ((len -= 2) >= 0)
  1202. sum += *w++; /* verify */
  1203. if (byte_swapped) {
  1204. REDUCE;
  1205. sum <<= 8;
  1206. byte_swapped = 0;
  1207. if (len == -1) {
  1208. s_util.c[1] = *(char *) w;
  1209. sum += s_util.s;
  1210. len = 0;
  1211. } else {
  1212. len = -1;
  1213. }
  1214. } else if (len == -1) {
  1215. s_util.c[0] = *(char *) w;
  1216. }
  1217. if (len == -1) {
  1218. s_util.c[1] = 0;
  1219. sum += s_util.s;
  1220. }
  1221. }
  1222. REDUCE;
  1223. return (ushort) sum;
  1224. }
  1225. static void slic_rspqueue_free(struct adapter *adapter)
  1226. {
  1227. int i;
  1228. struct slic_rspqueue *rspq = &adapter->rspqueue;
  1229. for (i = 0; i < rspq->num_pages; i++) {
  1230. if (rspq->vaddr[i]) {
  1231. pci_free_consistent(adapter->pcidev, PAGE_SIZE,
  1232. rspq->vaddr[i], rspq->paddr[i]);
  1233. }
  1234. rspq->vaddr[i] = NULL;
  1235. rspq->paddr[i] = 0;
  1236. }
  1237. rspq->offset = 0;
  1238. rspq->pageindex = 0;
  1239. rspq->rspbuf = NULL;
  1240. }
  1241. static int slic_rspqueue_init(struct adapter *adapter)
  1242. {
  1243. int i;
  1244. struct slic_rspqueue *rspq = &adapter->rspqueue;
  1245. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  1246. u32 paddrh = 0;
  1247. ASSERT(adapter->state == ADAPT_DOWN);
  1248. memset(rspq, 0, sizeof(struct slic_rspqueue));
  1249. rspq->num_pages = SLIC_RSPQ_PAGES_GB;
  1250. for (i = 0; i < rspq->num_pages; i++) {
  1251. rspq->vaddr[i] = pci_alloc_consistent(adapter->pcidev,
  1252. PAGE_SIZE,
  1253. &rspq->paddr[i]);
  1254. if (!rspq->vaddr[i]) {
  1255. dev_err(&adapter->pcidev->dev,
  1256. "pci_alloc_consistent failed\n");
  1257. slic_rspqueue_free(adapter);
  1258. return -ENOMEM;
  1259. }
  1260. /* FIXME:
  1261. * do we really need this assertions (4K PAGE_SIZE aligned addr)? */
  1262. #if 0
  1263. #ifndef CONFIG_X86_64
  1264. ASSERT(((u32) rspq->vaddr[i] & 0xFFFFF000) ==
  1265. (u32) rspq->vaddr[i]);
  1266. ASSERT(((u32) rspq->paddr[i] & 0xFFFFF000) ==
  1267. (u32) rspq->paddr[i]);
  1268. #endif
  1269. #endif
  1270. memset(rspq->vaddr[i], 0, PAGE_SIZE);
  1271. if (paddrh == 0) {
  1272. slic_reg32_write(&slic_regs->slic_rbar,
  1273. (rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE),
  1274. DONT_FLUSH);
  1275. } else {
  1276. slic_reg64_write(adapter, &slic_regs->slic_rbar64,
  1277. (rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE),
  1278. &slic_regs->slic_addr_upper,
  1279. paddrh, DONT_FLUSH);
  1280. }
  1281. }
  1282. rspq->offset = 0;
  1283. rspq->pageindex = 0;
  1284. rspq->rspbuf = (struct slic_rspbuf *)rspq->vaddr[0];
  1285. return 0;
  1286. }
  1287. static struct slic_rspbuf *slic_rspqueue_getnext(struct adapter *adapter)
  1288. {
  1289. struct slic_rspqueue *rspq = &adapter->rspqueue;
  1290. struct slic_rspbuf *buf;
  1291. if (!(rspq->rspbuf->status))
  1292. return NULL;
  1293. buf = rspq->rspbuf;
  1294. #if BITS_PER_LONG == 32
  1295. ASSERT((buf->status & 0xFFFFFFE0) == 0);
  1296. #endif
  1297. ASSERT(buf->hosthandle);
  1298. if (++rspq->offset < SLIC_RSPQ_BUFSINPAGE) {
  1299. rspq->rspbuf++;
  1300. #if BITS_PER_LONG == 32
  1301. ASSERT(((u32) rspq->rspbuf & 0xFFFFFFE0) ==
  1302. (u32) rspq->rspbuf);
  1303. #endif
  1304. } else {
  1305. ASSERT(rspq->offset == SLIC_RSPQ_BUFSINPAGE);
  1306. slic_reg64_write(adapter, &adapter->slic_regs->slic_rbar64,
  1307. (rspq->paddr[rspq->pageindex] | SLIC_RSPQ_BUFSINPAGE),
  1308. &adapter->slic_regs->slic_addr_upper, 0, DONT_FLUSH);
  1309. rspq->pageindex = (++rspq->pageindex) % rspq->num_pages;
  1310. rspq->offset = 0;
  1311. rspq->rspbuf = (struct slic_rspbuf *)
  1312. rspq->vaddr[rspq->pageindex];
  1313. #if BITS_PER_LONG == 32
  1314. ASSERT(((u32) rspq->rspbuf & 0xFFFFF000) ==
  1315. (u32) rspq->rspbuf);
  1316. #endif
  1317. }
  1318. #if BITS_PER_LONG == 32
  1319. ASSERT(((u32) buf & 0xFFFFFFE0) == (u32) buf);
  1320. #endif
  1321. return buf;
  1322. }
  1323. static void slic_cmdqmem_init(struct adapter *adapter)
  1324. {
  1325. struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem;
  1326. memset(cmdqmem, 0, sizeof(struct slic_cmdqmem));
  1327. }
  1328. static void slic_cmdqmem_free(struct adapter *adapter)
  1329. {
  1330. struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem;
  1331. int i;
  1332. for (i = 0; i < SLIC_CMDQ_MAXPAGES; i++) {
  1333. if (cmdqmem->pages[i]) {
  1334. pci_free_consistent(adapter->pcidev,
  1335. PAGE_SIZE,
  1336. (void *) cmdqmem->pages[i],
  1337. cmdqmem->dma_pages[i]);
  1338. }
  1339. }
  1340. memset(cmdqmem, 0, sizeof(struct slic_cmdqmem));
  1341. }
  1342. static u32 *slic_cmdqmem_addpage(struct adapter *adapter)
  1343. {
  1344. struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem;
  1345. u32 *pageaddr;
  1346. if (cmdqmem->pagecnt >= SLIC_CMDQ_MAXPAGES)
  1347. return NULL;
  1348. pageaddr = pci_alloc_consistent(adapter->pcidev,
  1349. PAGE_SIZE,
  1350. &cmdqmem->dma_pages[cmdqmem->pagecnt]);
  1351. if (!pageaddr)
  1352. return NULL;
  1353. #if BITS_PER_LONG == 32
  1354. ASSERT(((u32) pageaddr & 0xFFFFF000) == (u32) pageaddr);
  1355. #endif
  1356. cmdqmem->pages[cmdqmem->pagecnt] = pageaddr;
  1357. cmdqmem->pagecnt++;
  1358. return pageaddr;
  1359. }
  1360. static void slic_cmdq_free(struct adapter *adapter)
  1361. {
  1362. struct slic_hostcmd *cmd;
  1363. cmd = adapter->cmdq_all.head;
  1364. while (cmd) {
  1365. if (cmd->busy) {
  1366. struct sk_buff *tempskb;
  1367. tempskb = cmd->skb;
  1368. if (tempskb) {
  1369. cmd->skb = NULL;
  1370. dev_kfree_skb_irq(tempskb);
  1371. }
  1372. }
  1373. cmd = cmd->next_all;
  1374. }
  1375. memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
  1376. memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
  1377. memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
  1378. slic_cmdqmem_free(adapter);
  1379. }
  1380. static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page)
  1381. {
  1382. struct slic_hostcmd *cmd;
  1383. struct slic_hostcmd *prev;
  1384. struct slic_hostcmd *tail;
  1385. struct slic_cmdqueue *cmdq;
  1386. int cmdcnt;
  1387. void *cmdaddr;
  1388. ulong phys_addr;
  1389. u32 phys_addrl;
  1390. u32 phys_addrh;
  1391. struct slic_handle *pslic_handle;
  1392. cmdaddr = page;
  1393. cmd = (struct slic_hostcmd *)cmdaddr;
  1394. cmdcnt = 0;
  1395. phys_addr = virt_to_bus((void *)page);
  1396. phys_addrl = SLIC_GET_ADDR_LOW(phys_addr);
  1397. phys_addrh = SLIC_GET_ADDR_HIGH(phys_addr);
  1398. prev = NULL;
  1399. tail = cmd;
  1400. while ((cmdcnt < SLIC_CMDQ_CMDSINPAGE) &&
  1401. (adapter->slic_handle_ix < 256)) {
  1402. /* Allocate and initialize a SLIC_HANDLE for this command */
  1403. SLIC_GET_SLIC_HANDLE(adapter, pslic_handle);
  1404. if (pslic_handle == NULL)
  1405. ASSERT(0);
  1406. ASSERT(pslic_handle ==
  1407. &adapter->slic_handles[pslic_handle->token.
  1408. handle_index]);
  1409. pslic_handle->type = SLIC_HANDLE_CMD;
  1410. pslic_handle->address = (void *) cmd;
  1411. pslic_handle->offset = (ushort) adapter->slic_handle_ix++;
  1412. pslic_handle->other_handle = NULL;
  1413. pslic_handle->next = NULL;
  1414. cmd->pslic_handle = pslic_handle;
  1415. cmd->cmd64.hosthandle = pslic_handle->token.handle_token;
  1416. cmd->busy = false;
  1417. cmd->paddrl = phys_addrl;
  1418. cmd->paddrh = phys_addrh;
  1419. cmd->next_all = prev;
  1420. cmd->next = prev;
  1421. prev = cmd;
  1422. phys_addrl += SLIC_HOSTCMD_SIZE;
  1423. cmdaddr += SLIC_HOSTCMD_SIZE;
  1424. cmd = (struct slic_hostcmd *)cmdaddr;
  1425. cmdcnt++;
  1426. }
  1427. cmdq = &adapter->cmdq_all;
  1428. cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */
  1429. tail->next_all = cmdq->head;
  1430. cmdq->head = prev;
  1431. cmdq = &adapter->cmdq_free;
  1432. spin_lock_irqsave(&cmdq->lock.lock, cmdq->lock.flags);
  1433. cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */
  1434. tail->next = cmdq->head;
  1435. cmdq->head = prev;
  1436. spin_unlock_irqrestore(&cmdq->lock.lock, cmdq->lock.flags);
  1437. }
  1438. static int slic_cmdq_init(struct adapter *adapter)
  1439. {
  1440. int i;
  1441. u32 *pageaddr;
  1442. ASSERT(adapter->state == ADAPT_DOWN);
  1443. memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
  1444. memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
  1445. memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
  1446. spin_lock_init(&adapter->cmdq_all.lock.lock);
  1447. spin_lock_init(&adapter->cmdq_free.lock.lock);
  1448. spin_lock_init(&adapter->cmdq_done.lock.lock);
  1449. slic_cmdqmem_init(adapter);
  1450. adapter->slic_handle_ix = 1;
  1451. for (i = 0; i < SLIC_CMDQ_INITPAGES; i++) {
  1452. pageaddr = slic_cmdqmem_addpage(adapter);
  1453. #if BITS_PER_LONG == 32
  1454. ASSERT(((u32) pageaddr & 0xFFFFF000) == (u32) pageaddr);
  1455. #endif
  1456. if (!pageaddr) {
  1457. slic_cmdq_free(adapter);
  1458. return -ENOMEM;
  1459. }
  1460. slic_cmdq_addcmdpage(adapter, pageaddr);
  1461. }
  1462. adapter->slic_handle_ix = 1;
  1463. return 0;
  1464. }
  1465. static void slic_cmdq_reset(struct adapter *adapter)
  1466. {
  1467. struct slic_hostcmd *hcmd;
  1468. struct sk_buff *skb;
  1469. u32 outstanding;
  1470. spin_lock_irqsave(&adapter->cmdq_free.lock.lock,
  1471. adapter->cmdq_free.lock.flags);
  1472. spin_lock_irqsave(&adapter->cmdq_done.lock.lock,
  1473. adapter->cmdq_done.lock.flags);
  1474. outstanding = adapter->cmdq_all.count - adapter->cmdq_done.count;
  1475. outstanding -= adapter->cmdq_free.count;
  1476. hcmd = adapter->cmdq_all.head;
  1477. while (hcmd) {
  1478. if (hcmd->busy) {
  1479. skb = hcmd->skb;
  1480. ASSERT(skb);
  1481. hcmd->busy = 0;
  1482. hcmd->skb = NULL;
  1483. dev_kfree_skb_irq(skb);
  1484. }
  1485. hcmd = hcmd->next_all;
  1486. }
  1487. adapter->cmdq_free.count = 0;
  1488. adapter->cmdq_free.head = NULL;
  1489. adapter->cmdq_free.tail = NULL;
  1490. adapter->cmdq_done.count = 0;
  1491. adapter->cmdq_done.head = NULL;
  1492. adapter->cmdq_done.tail = NULL;
  1493. adapter->cmdq_free.head = adapter->cmdq_all.head;
  1494. hcmd = adapter->cmdq_all.head;
  1495. while (hcmd) {
  1496. adapter->cmdq_free.count++;
  1497. hcmd->next = hcmd->next_all;
  1498. hcmd = hcmd->next_all;
  1499. }
  1500. if (adapter->cmdq_free.count != adapter->cmdq_all.count) {
  1501. dev_err(&adapter->netdev->dev,
  1502. "free_count %d != all count %d\n",
  1503. adapter->cmdq_free.count, adapter->cmdq_all.count);
  1504. }
  1505. spin_unlock_irqrestore(&adapter->cmdq_done.lock.lock,
  1506. adapter->cmdq_done.lock.flags);
  1507. spin_unlock_irqrestore(&adapter->cmdq_free.lock.lock,
  1508. adapter->cmdq_free.lock.flags);
  1509. }
  1510. static void slic_cmdq_getdone(struct adapter *adapter)
  1511. {
  1512. struct slic_cmdqueue *done_cmdq = &adapter->cmdq_done;
  1513. struct slic_cmdqueue *free_cmdq = &adapter->cmdq_free;
  1514. ASSERT(free_cmdq->head == NULL);
  1515. spin_lock_irqsave(&done_cmdq->lock.lock, done_cmdq->lock.flags);
  1516. free_cmdq->head = done_cmdq->head;
  1517. free_cmdq->count = done_cmdq->count;
  1518. done_cmdq->head = NULL;
  1519. done_cmdq->tail = NULL;
  1520. done_cmdq->count = 0;
  1521. spin_unlock_irqrestore(&done_cmdq->lock.lock, done_cmdq->lock.flags);
  1522. }
  1523. static struct slic_hostcmd *slic_cmdq_getfree(struct adapter *adapter)
  1524. {
  1525. struct slic_cmdqueue *cmdq = &adapter->cmdq_free;
  1526. struct slic_hostcmd *cmd = NULL;
  1527. lock_and_retry:
  1528. spin_lock_irqsave(&cmdq->lock.lock, cmdq->lock.flags);
  1529. retry:
  1530. cmd = cmdq->head;
  1531. if (cmd) {
  1532. cmdq->head = cmd->next;
  1533. cmdq->count--;
  1534. spin_unlock_irqrestore(&cmdq->lock.lock, cmdq->lock.flags);
  1535. } else {
  1536. slic_cmdq_getdone(adapter);
  1537. cmd = cmdq->head;
  1538. if (cmd) {
  1539. goto retry;
  1540. } else {
  1541. u32 *pageaddr;
  1542. spin_unlock_irqrestore(&cmdq->lock.lock,
  1543. cmdq->lock.flags);
  1544. pageaddr = slic_cmdqmem_addpage(adapter);
  1545. if (pageaddr) {
  1546. slic_cmdq_addcmdpage(adapter, pageaddr);
  1547. goto lock_and_retry;
  1548. }
  1549. }
  1550. }
  1551. return cmd;
  1552. }
  1553. static void slic_cmdq_putdone_irq(struct adapter *adapter,
  1554. struct slic_hostcmd *cmd)
  1555. {
  1556. struct slic_cmdqueue *cmdq = &adapter->cmdq_done;
  1557. spin_lock(&cmdq->lock.lock);
  1558. cmd->busy = 0;
  1559. cmd->next = cmdq->head;
  1560. cmdq->head = cmd;
  1561. cmdq->count++;
  1562. if ((adapter->xmitq_full) && (cmdq->count > 10))
  1563. netif_wake_queue(adapter->netdev);
  1564. spin_unlock(&cmdq->lock.lock);
  1565. }
  1566. static int slic_rcvqueue_fill(struct adapter *adapter)
  1567. {
  1568. void *paddr;
  1569. u32 paddrl;
  1570. u32 paddrh;
  1571. struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
  1572. int i = 0;
  1573. struct device *dev = &adapter->netdev->dev;
  1574. while (i < SLIC_RCVQ_FILLENTRIES) {
  1575. struct slic_rcvbuf *rcvbuf;
  1576. struct sk_buff *skb;
  1577. #ifdef KLUDGE_FOR_4GB_BOUNDARY
  1578. retry_rcvqfill:
  1579. #endif
  1580. skb = alloc_skb(SLIC_RCVQ_RCVBUFSIZE, GFP_ATOMIC);
  1581. if (skb) {
  1582. paddr = (void *)pci_map_single(adapter->pcidev,
  1583. skb->data,
  1584. SLIC_RCVQ_RCVBUFSIZE,
  1585. PCI_DMA_FROMDEVICE);
  1586. paddrl = SLIC_GET_ADDR_LOW(paddr);
  1587. paddrh = SLIC_GET_ADDR_HIGH(paddr);
  1588. skb->len = SLIC_RCVBUF_HEADSIZE;
  1589. rcvbuf = (struct slic_rcvbuf *)skb->head;
  1590. rcvbuf->status = 0;
  1591. skb->next = NULL;
  1592. #ifdef KLUDGE_FOR_4GB_BOUNDARY
  1593. if (paddrl == 0) {
  1594. dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
  1595. __func__);
  1596. dev_err(dev, "skb[%p] PROBLEM\n", skb);
  1597. dev_err(dev, " skbdata[%p]\n", skb->data);
  1598. dev_err(dev, " skblen[%x]\n", skb->len);
  1599. dev_err(dev, " paddr[%p]\n", paddr);
  1600. dev_err(dev, " paddrl[%x]\n", paddrl);
  1601. dev_err(dev, " paddrh[%x]\n", paddrh);
  1602. dev_err(dev, " rcvq->head[%p]\n", rcvq->head);
  1603. dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail);
  1604. dev_err(dev, " rcvq->count[%x]\n", rcvq->count);
  1605. dev_err(dev, "SKIP THIS SKB!!!!!!!!\n");
  1606. goto retry_rcvqfill;
  1607. }
  1608. #else
  1609. if (paddrl == 0) {
  1610. dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
  1611. __func__);
  1612. dev_err(dev, "skb[%p] PROBLEM\n", skb);
  1613. dev_err(dev, " skbdata[%p]\n", skb->data);
  1614. dev_err(dev, " skblen[%x]\n", skb->len);
  1615. dev_err(dev, " paddr[%p]\n", paddr);
  1616. dev_err(dev, " paddrl[%x]\n", paddrl);
  1617. dev_err(dev, " paddrh[%x]\n", paddrh);
  1618. dev_err(dev, " rcvq->head[%p]\n", rcvq->head);
  1619. dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail);
  1620. dev_err(dev, " rcvq->count[%x]\n", rcvq->count);
  1621. dev_err(dev, "GIVE TO CARD ANYWAY\n");
  1622. }
  1623. #endif
  1624. if (paddrh == 0) {
  1625. slic_reg32_write(&adapter->slic_regs->slic_hbar,
  1626. (u32)paddrl, DONT_FLUSH);
  1627. } else {
  1628. slic_reg64_write(adapter,
  1629. &adapter->slic_regs->slic_hbar64,
  1630. paddrl,
  1631. &adapter->slic_regs->slic_addr_upper,
  1632. paddrh, DONT_FLUSH);
  1633. }
  1634. if (rcvq->head)
  1635. rcvq->tail->next = skb;
  1636. else
  1637. rcvq->head = skb;
  1638. rcvq->tail = skb;
  1639. rcvq->count++;
  1640. i++;
  1641. } else {
  1642. dev_err(&adapter->netdev->dev,
  1643. "slic_rcvqueue_fill could only get [%d] skbuffs\n",
  1644. i);
  1645. break;
  1646. }
  1647. }
  1648. return i;
  1649. }
  1650. static void slic_rcvqueue_free(struct adapter *adapter)
  1651. {
  1652. struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
  1653. struct sk_buff *skb;
  1654. while (rcvq->head) {
  1655. skb = rcvq->head;
  1656. rcvq->head = rcvq->head->next;
  1657. dev_kfree_skb(skb);
  1658. }
  1659. rcvq->tail = NULL;
  1660. rcvq->head = NULL;
  1661. rcvq->count = 0;
  1662. }
  1663. static int slic_rcvqueue_init(struct adapter *adapter)
  1664. {
  1665. int i, count;
  1666. struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
  1667. ASSERT(adapter->state == ADAPT_DOWN);
  1668. rcvq->tail = NULL;
  1669. rcvq->head = NULL;
  1670. rcvq->size = SLIC_RCVQ_ENTRIES;
  1671. rcvq->errors = 0;
  1672. rcvq->count = 0;
  1673. i = (SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES);
  1674. count = 0;
  1675. while (i) {
  1676. count += slic_rcvqueue_fill(adapter);
  1677. i--;
  1678. }
  1679. if (rcvq->count < SLIC_RCVQ_MINENTRIES) {
  1680. slic_rcvqueue_free(adapter);
  1681. return -ENOMEM;
  1682. }
  1683. return 0;
  1684. }
  1685. static struct sk_buff *slic_rcvqueue_getnext(struct adapter *adapter)
  1686. {
  1687. struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
  1688. struct sk_buff *skb;
  1689. struct slic_rcvbuf *rcvbuf;
  1690. int count;
  1691. if (rcvq->count) {
  1692. skb = rcvq->head;
  1693. rcvbuf = (struct slic_rcvbuf *)skb->head;
  1694. ASSERT(rcvbuf);
  1695. if (rcvbuf->status & IRHDDR_SVALID) {
  1696. rcvq->head = rcvq->head->next;
  1697. skb->next = NULL;
  1698. rcvq->count--;
  1699. } else {
  1700. skb = NULL;
  1701. }
  1702. } else {
  1703. dev_err(&adapter->netdev->dev,
  1704. "RcvQ Empty!! rcvq[%p] count[%x]\n", rcvq, rcvq->count);
  1705. skb = NULL;
  1706. }
  1707. while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
  1708. count = slic_rcvqueue_fill(adapter);
  1709. if (!count)
  1710. break;
  1711. }
  1712. if (skb)
  1713. rcvq->errors = 0;
  1714. return skb;
  1715. }
  1716. static u32 slic_rcvqueue_reinsert(struct adapter *adapter, struct sk_buff *skb)
  1717. {
  1718. struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
  1719. void *paddr;
  1720. u32 paddrl;
  1721. u32 paddrh;
  1722. struct slic_rcvbuf *rcvbuf = (struct slic_rcvbuf *)skb->head;
  1723. struct device *dev;
  1724. ASSERT(skb->len == SLIC_RCVBUF_HEADSIZE);
  1725. paddr = (void *)pci_map_single(adapter->pcidev, skb->head,
  1726. SLIC_RCVQ_RCVBUFSIZE, PCI_DMA_FROMDEVICE);
  1727. rcvbuf->status = 0;
  1728. skb->next = NULL;
  1729. paddrl = SLIC_GET_ADDR_LOW(paddr);
  1730. paddrh = SLIC_GET_ADDR_HIGH(paddr);
  1731. if (paddrl == 0) {
  1732. dev = &adapter->netdev->dev;
  1733. dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
  1734. __func__);
  1735. dev_err(dev, "skb[%p] PROBLEM\n", skb);
  1736. dev_err(dev, " skbdata[%p]\n", skb->data);
  1737. dev_err(dev, " skblen[%x]\n", skb->len);
  1738. dev_err(dev, " paddr[%p]\n", paddr);
  1739. dev_err(dev, " paddrl[%x]\n", paddrl);
  1740. dev_err(dev, " paddrh[%x]\n", paddrh);
  1741. dev_err(dev, " rcvq->head[%p]\n", rcvq->head);
  1742. dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail);
  1743. dev_err(dev, " rcvq->count[%x]\n", rcvq->count);
  1744. }
  1745. if (paddrh == 0) {
  1746. slic_reg32_write(&adapter->slic_regs->slic_hbar, (u32)paddrl,
  1747. DONT_FLUSH);
  1748. } else {
  1749. slic_reg64_write(adapter, &adapter->slic_regs->slic_hbar64,
  1750. paddrl, &adapter->slic_regs->slic_addr_upper,
  1751. paddrh, DONT_FLUSH);
  1752. }
  1753. if (rcvq->head)
  1754. rcvq->tail->next = skb;
  1755. else
  1756. rcvq->head = skb;
  1757. rcvq->tail = skb;
  1758. rcvq->count++;
  1759. return rcvq->count;
  1760. }
  1761. static int slic_debug_card_show(struct seq_file *seq, void *v)
  1762. {
  1763. #ifdef MOOKTODO
  1764. int i;
  1765. struct sliccard *card = seq->private;
  1766. struct slic_config *config = &card->config;
  1767. unsigned char *fru = (unsigned char *)(&card->config.atk_fru);
  1768. unsigned char *oemfru = (unsigned char *)(&card->config.OemFru);
  1769. #endif
  1770. seq_printf(seq, "driver_version : %s\n", slic_proc_version);
  1771. seq_printf(seq, "Microcode versions: \n");
  1772. seq_printf(seq, " Gigabit (gb) : %s %s\n",
  1773. MOJAVE_UCODE_VERS_STRING, MOJAVE_UCODE_VERS_DATE);
  1774. seq_printf(seq, " Gigabit Receiver : %s %s\n",
  1775. GB_RCVUCODE_VERS_STRING, GB_RCVUCODE_VERS_DATE);
  1776. seq_printf(seq, "Vendor : %s\n", slic_vendor);
  1777. seq_printf(seq, "Product Name : %s\n", slic_product_name);
  1778. #ifdef MOOKTODO
  1779. seq_printf(seq, "VendorId : %4.4X\n",
  1780. config->VendorId);
  1781. seq_printf(seq, "DeviceId : %4.4X\n",
  1782. config->DeviceId);
  1783. seq_printf(seq, "RevisionId : %2.2x\n",
  1784. config->RevisionId);
  1785. seq_printf(seq, "Bus # : %d\n", card->busnumber);
  1786. seq_printf(seq, "Device # : %d\n", card->slotnumber);
  1787. seq_printf(seq, "Interfaces : %d\n", card->card_size);
  1788. seq_printf(seq, " Initialized : %d\n",
  1789. card->adapters_activated);
  1790. seq_printf(seq, " Allocated : %d\n",
  1791. card->adapters_allocated);
  1792. ASSERT(card->card_size <= SLIC_NBR_MACS);
  1793. for (i = 0; i < card->card_size; i++) {
  1794. seq_printf(seq,
  1795. " MAC%d : %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
  1796. i, config->macinfo[i].macaddrA[0],
  1797. config->macinfo[i].macaddrA[1],
  1798. config->macinfo[i].macaddrA[2],
  1799. config->macinfo[i].macaddrA[3],
  1800. config->macinfo[i].macaddrA[4],
  1801. config->macinfo[i].macaddrA[5]);
  1802. }
  1803. seq_printf(seq, " IF Init State Duplex/Speed irq\n");
  1804. seq_printf(seq, " -------------------------------\n");
  1805. for (i = 0; i < card->adapters_allocated; i++) {
  1806. struct adapter *adapter;
  1807. adapter = card->adapter[i];
  1808. if (adapter) {
  1809. seq_printf(seq,
  1810. " %d %d %s %s %s 0x%X\n",
  1811. adapter->physport, adapter->state,
  1812. SLIC_LINKSTATE(adapter->linkstate),
  1813. SLIC_DUPLEX(adapter->linkduplex),
  1814. SLIC_SPEED(adapter->linkspeed),
  1815. (uint) adapter->irq);
  1816. }
  1817. }
  1818. seq_printf(seq, "Generation # : %4.4X\n", card->gennumber);
  1819. seq_printf(seq, "RcvQ max entries : %4.4X\n",
  1820. SLIC_RCVQ_ENTRIES);
  1821. seq_printf(seq, "Ping Status : %8.8X\n",
  1822. card->pingstatus);
  1823. seq_printf(seq, "Minimum grant : %2.2x\n",
  1824. config->MinGrant);
  1825. seq_printf(seq, "Maximum Latency : %2.2x\n", config->MaxLat);
  1826. seq_printf(seq, "PciStatus : %4.4x\n",
  1827. config->Pcistatus);
  1828. seq_printf(seq, "Debug Device Id : %4.4x\n",
  1829. config->DbgDevId);
  1830. seq_printf(seq, "DRAM ROM Function : %4.4x\n",
  1831. config->DramRomFn);
  1832. seq_printf(seq, "Network interface Pin 1 : %2.2x\n",
  1833. config->NetIntPin1);
  1834. seq_printf(seq, "Network interface Pin 2 : %2.2x\n",
  1835. config->NetIntPin1);
  1836. seq_printf(seq, "Network interface Pin 3 : %2.2x\n",
  1837. config->NetIntPin1);
  1838. seq_printf(seq, "PM capabilities : %4.4X\n",
  1839. config->PMECapab);
  1840. seq_printf(seq, "Network Clock Controls : %4.4X\n",
  1841. config->NwClkCtrls);
  1842. switch (config->FruFormat) {
  1843. case ATK_FRU_FORMAT:
  1844. {
  1845. seq_printf(seq,
  1846. "Vendor : Alacritech, Inc.\n");
  1847. seq_printf(seq,
  1848. "Assembly # : %c%c%c%c%c%c\n",
  1849. fru[0], fru[1], fru[2], fru[3], fru[4],
  1850. fru[5]);
  1851. seq_printf(seq,
  1852. "Revision # : %c%c\n",
  1853. fru[6], fru[7]);
  1854. if (config->OEMFruFormat == VENDOR4_FRU_FORMAT) {
  1855. seq_printf(seq,
  1856. "Serial # : "
  1857. "%c%c%c%c%c%c%c%c%c%c%c%c\n",
  1858. fru[8], fru[9], fru[10],
  1859. fru[11], fru[12], fru[13],
  1860. fru[16], fru[17], fru[18],
  1861. fru[19], fru[20], fru[21]);
  1862. } else {
  1863. seq_printf(seq,
  1864. "Serial # : "
  1865. "%c%c%c%c%c%c%c%c%c%c%c%c%c%c\n",
  1866. fru[8], fru[9], fru[10],
  1867. fru[11], fru[12], fru[13],
  1868. fru[14], fru[15], fru[16],
  1869. fru[17], fru[18], fru[19],
  1870. fru[20], fru[21]);
  1871. }
  1872. break;
  1873. }
  1874. default:
  1875. {
  1876. seq_printf(seq,
  1877. "Vendor : Alacritech, Inc.\n");
  1878. seq_printf(seq,
  1879. "Serial # : Empty FRU\n");
  1880. break;
  1881. }
  1882. }
  1883. switch (config->OEMFruFormat) {
  1884. case VENDOR1_FRU_FORMAT:
  1885. {
  1886. seq_printf(seq, "FRU Information:\n");
  1887. seq_printf(seq, " Commodity # : %c\n",
  1888. oemfru[0]);
  1889. seq_printf(seq,
  1890. " Assembly # : %c%c%c%c\n",
  1891. oemfru[1], oemfru[2], oemfru[3], oemfru[4]);
  1892. seq_printf(seq,
  1893. " Revision # : %c%c\n",
  1894. oemfru[5], oemfru[6]);
  1895. seq_printf(seq,
  1896. " Supplier # : %c%c\n",
  1897. oemfru[7], oemfru[8]);
  1898. seq_printf(seq,
  1899. " Date : %c%c\n",
  1900. oemfru[9], oemfru[10]);
  1901. seq_sprintf(seq,
  1902. " Sequence # : %c%c%c\n",
  1903. oemfru[11], oemfru[12], oemfru[13]);
  1904. break;
  1905. }
  1906. case VENDOR2_FRU_FORMAT:
  1907. {
  1908. seq_printf(seq, "FRU Information:\n");
  1909. seq_printf(seq,
  1910. " Part # : "
  1911. "%c%c%c%c%c%c%c%c\n",
  1912. oemfru[0], oemfru[1], oemfru[2],
  1913. oemfru[3], oemfru[4], oemfru[5],
  1914. oemfru[6], oemfru[7]);
  1915. seq_printf(seq,
  1916. " Supplier # : %c%c%c%c%c\n",
  1917. oemfru[8], oemfru[9], oemfru[10],
  1918. oemfru[11], oemfru[12]);
  1919. seq_printf(seq,
  1920. " Date : %c%c%c\n",
  1921. oemfru[13], oemfru[14], oemfru[15]);
  1922. seq_sprintf(seq,
  1923. " Sequence # : %c%c%c%c\n",
  1924. oemfru[16], oemfru[17], oemfru[18],
  1925. oemfru[19]);
  1926. break;
  1927. }
  1928. case VENDOR3_FRU_FORMAT:
  1929. {
  1930. seq_printf(seq, "FRU Information:\n");
  1931. }
  1932. case VENDOR4_FRU_FORMAT:
  1933. {
  1934. seq_printf(seq, "FRU Information:\n");
  1935. seq_printf(seq,
  1936. " FRU Number : "
  1937. "%c%c%c%c%c%c%c%c\n",
  1938. oemfru[0], oemfru[1], oemfru[2],
  1939. oemfru[3], oemfru[4], oemfru[5],
  1940. oemfru[6], oemfru[7]);
  1941. seq_sprintf(seq,
  1942. " Part Number : "
  1943. "%c%c%c%c%c%c%c%c\n",
  1944. oemfru[8], oemfru[9], oemfru[10],
  1945. oemfru[11], oemfru[12], oemfru[13],
  1946. oemfru[14], oemfru[15]);
  1947. seq_printf(seq,
  1948. " EC Level : "
  1949. "%c%c%c%c%c%c%c%c\n",
  1950. oemfru[16], oemfru[17], oemfru[18],
  1951. oemfru[19], oemfru[20], oemfru[21],
  1952. oemfru[22], oemfru[23]);
  1953. break;
  1954. }
  1955. default:
  1956. break;
  1957. }
  1958. #endif
  1959. return 0;
  1960. }
  1961. static int slic_debug_adapter_show(struct seq_file *seq, void *v)
  1962. {
  1963. struct adapter *adapter = seq->private;
  1964. struct net_device *netdev = adapter->netdev;
  1965. seq_printf(seq, "info: interface : %s\n",
  1966. adapter->netdev->name);
  1967. seq_printf(seq, "info: status : %s\n",
  1968. SLIC_LINKSTATE(adapter->linkstate));
  1969. seq_printf(seq, "info: port : %d\n",
  1970. adapter->physport);
  1971. seq_printf(seq, "info: speed : %s\n",
  1972. SLIC_SPEED(adapter->linkspeed));
  1973. seq_printf(seq, "info: duplex : %s\n",
  1974. SLIC_DUPLEX(adapter->linkduplex));
  1975. seq_printf(seq, "info: irq : 0x%X\n",
  1976. (uint) adapter->irq);
  1977. seq_printf(seq, "info: Interrupt Agg Delay: %d usec\n",
  1978. adapter->card->loadlevel_current);
  1979. seq_printf(seq, "info: RcvQ max entries : %4.4X\n",
  1980. SLIC_RCVQ_ENTRIES);
  1981. seq_printf(seq, "info: RcvQ current : %4.4X\n",
  1982. adapter->rcvqueue.count);
  1983. seq_printf(seq, "rx stats: packets : %8.8lX\n",
  1984. netdev->stats.rx_packets);
  1985. seq_printf(seq, "rx stats: bytes : %8.8lX\n",
  1986. netdev->stats.rx_bytes);
  1987. seq_printf(seq, "rx stats: broadcasts : %8.8X\n",
  1988. adapter->rcv_broadcasts);
  1989. seq_printf(seq, "rx stats: multicasts : %8.8X\n",
  1990. adapter->rcv_multicasts);
  1991. seq_printf(seq, "rx stats: unicasts : %8.8X\n",
  1992. adapter->rcv_unicasts);
  1993. seq_printf(seq, "rx stats: errors : %8.8X\n",
  1994. (u32) adapter->slic_stats.iface.rcv_errors);
  1995. seq_printf(seq, "rx stats: Missed errors : %8.8X\n",
  1996. (u32) adapter->slic_stats.iface.rcv_discards);
  1997. seq_printf(seq, "rx stats: drops : %8.8X\n",
  1998. (u32) adapter->rcv_drops);
  1999. seq_printf(seq, "tx stats: packets : %8.8lX\n",
  2000. netdev->stats.tx_packets);
  2001. seq_printf(seq, "tx stats: bytes : %8.8lX\n",
  2002. netdev->stats.tx_bytes);
  2003. seq_printf(seq, "tx stats: errors : %8.8X\n",
  2004. (u32) adapter->slic_stats.iface.xmt_errors);
  2005. seq_printf(seq, "rx stats: multicasts : %8.8lX\n",
  2006. netdev->stats.multicast);
  2007. seq_printf(seq, "tx stats: collision errors : %8.8X\n",
  2008. (u32) adapter->slic_stats.iface.xmit_collisions);
  2009. seq_printf(seq, "perf: Max rcv frames/isr : %8.8X\n",
  2010. adapter->max_isr_rcvs);
  2011. seq_printf(seq, "perf: Rcv interrupt yields : %8.8X\n",
  2012. adapter->rcv_interrupt_yields);
  2013. seq_printf(seq, "perf: Max xmit complete/isr : %8.8X\n",
  2014. adapter->max_isr_xmits);
  2015. seq_printf(seq, "perf: error interrupts : %8.8X\n",
  2016. adapter->error_interrupts);
  2017. seq_printf(seq, "perf: error rmiss interrupts : %8.8X\n",
  2018. adapter->error_rmiss_interrupts);
  2019. seq_printf(seq, "perf: rcv interrupts : %8.8X\n",
  2020. adapter->rcv_interrupts);
  2021. seq_printf(seq, "perf: xmit interrupts : %8.8X\n",
  2022. adapter->xmit_interrupts);
  2023. seq_printf(seq, "perf: link event interrupts : %8.8X\n",
  2024. adapter->linkevent_interrupts);
  2025. seq_printf(seq, "perf: UPR interrupts : %8.8X\n",
  2026. adapter->upr_interrupts);
  2027. seq_printf(seq, "perf: interrupt count : %8.8X\n",
  2028. adapter->num_isrs);
  2029. seq_printf(seq, "perf: false interrupts : %8.8X\n",
  2030. adapter->false_interrupts);
  2031. seq_printf(seq, "perf: All register writes : %8.8X\n",
  2032. adapter->all_reg_writes);
  2033. seq_printf(seq, "perf: ICR register writes : %8.8X\n",
  2034. adapter->icr_reg_writes);
  2035. seq_printf(seq, "perf: ISR register writes : %8.8X\n",
  2036. adapter->isr_reg_writes);
  2037. seq_printf(seq, "ifevents: overflow 802 errors : %8.8X\n",
  2038. adapter->if_events.oflow802);
  2039. seq_printf(seq, "ifevents: transport overflow errors: %8.8X\n",
  2040. adapter->if_events.Tprtoflow);
  2041. seq_printf(seq, "ifevents: underflow errors : %8.8X\n",
  2042. adapter->if_events.uflow802);
  2043. seq_printf(seq, "ifevents: receive early : %8.8X\n",
  2044. adapter->if_events.rcvearly);
  2045. seq_printf(seq, "ifevents: buffer overflows : %8.8X\n",
  2046. adapter->if_events.Bufov);
  2047. seq_printf(seq, "ifevents: carrier errors : %8.8X\n",
  2048. adapter->if_events.Carre);
  2049. seq_printf(seq, "ifevents: Long : %8.8X\n",
  2050. adapter->if_events.Longe);
  2051. seq_printf(seq, "ifevents: invalid preambles : %8.8X\n",
  2052. adapter->if_events.Invp);
  2053. seq_printf(seq, "ifevents: CRC errors : %8.8X\n",
  2054. adapter->if_events.Crc);
  2055. seq_printf(seq, "ifevents: dribble nibbles : %8.8X\n",
  2056. adapter->if_events.Drbl);
  2057. seq_printf(seq, "ifevents: Code violations : %8.8X\n",
  2058. adapter->if_events.Code);
  2059. seq_printf(seq, "ifevents: TCP checksum errors : %8.8X\n",
  2060. adapter->if_events.TpCsum);
  2061. seq_printf(seq, "ifevents: TCP header short errors : %8.8X\n",
  2062. adapter->if_events.TpHlen);
  2063. seq_printf(seq, "ifevents: IP checksum errors : %8.8X\n",
  2064. adapter->if_events.IpCsum);
  2065. seq_printf(seq, "ifevents: IP frame incompletes : %8.8X\n",
  2066. adapter->if_events.IpLen);
  2067. seq_printf(seq, "ifevents: IP headers shorts : %8.8X\n",
  2068. adapter->if_events.IpHlen);
  2069. return 0;
  2070. }
  2071. static int slic_debug_adapter_open(struct inode *inode, struct file *file)
  2072. {
  2073. return single_open(file, slic_debug_adapter_show, inode->i_private);
  2074. }
  2075. static int slic_debug_card_open(struct inode *inode, struct file *file)
  2076. {
  2077. return single_open(file, slic_debug_card_show, inode->i_private);
  2078. }
  2079. static const struct file_operations slic_debug_adapter_fops = {
  2080. .owner = THIS_MODULE,
  2081. .open = slic_debug_adapter_open,
  2082. .read = seq_read,
  2083. .llseek = seq_lseek,
  2084. .release = single_release,
  2085. };
  2086. static const struct file_operations slic_debug_card_fops = {
  2087. .owner = THIS_MODULE,
  2088. .open = slic_debug_card_open,
  2089. .read = seq_read,
  2090. .llseek = seq_lseek,
  2091. .release = single_release,
  2092. };
  2093. static void slic_debug_adapter_create(struct adapter *adapter)
  2094. {
  2095. struct dentry *d;
  2096. char name[7];
  2097. struct sliccard *card = adapter->card;
  2098. if (!card->debugfs_dir)
  2099. return;
  2100. sprintf(name, "port%d", adapter->port);
  2101. d = debugfs_create_file(name, S_IRUGO,
  2102. card->debugfs_dir, adapter,
  2103. &slic_debug_adapter_fops);
  2104. if (!d || IS_ERR(d))
  2105. pr_info(PFX "%s: debugfs create failed\n", name);
  2106. else
  2107. adapter->debugfs_entry = d;
  2108. }
  2109. static void slic_debug_adapter_destroy(struct adapter *adapter)
  2110. {
  2111. debugfs_remove(adapter->debugfs_entry);
  2112. adapter->debugfs_entry = NULL;
  2113. }
  2114. static void slic_debug_card_create(struct sliccard *card)
  2115. {
  2116. struct dentry *d;
  2117. char name[IFNAMSIZ];
  2118. snprintf(name, sizeof(name), "slic%d", card->cardnum);
  2119. d = debugfs_create_dir(name, slic_debugfs);
  2120. if (!d || IS_ERR(d))
  2121. pr_info(PFX "%s: debugfs create dir failed\n",
  2122. name);
  2123. else {
  2124. card->debugfs_dir = d;
  2125. d = debugfs_create_file("cardinfo", S_IRUGO,
  2126. slic_debugfs, card,
  2127. &slic_debug_card_fops);
  2128. if (!d || IS_ERR(d))
  2129. pr_info(PFX "%s: debugfs create failed\n",
  2130. name);
  2131. else
  2132. card->debugfs_cardinfo = d;
  2133. }
  2134. }
  2135. static void slic_debug_card_destroy(struct sliccard *card)
  2136. {
  2137. int i;
  2138. for (i = 0; i < card->card_size; i++) {
  2139. struct adapter *adapter;
  2140. adapter = card->adapter[i];
  2141. if (adapter)
  2142. slic_debug_adapter_destroy(adapter);
  2143. }
  2144. if (card->debugfs_cardinfo) {
  2145. debugfs_remove(card->debugfs_cardinfo);
  2146. card->debugfs_cardinfo = NULL;
  2147. }
  2148. if (card->debugfs_dir) {
  2149. debugfs_remove(card->debugfs_dir);
  2150. card->debugfs_dir = NULL;
  2151. }
  2152. }
  2153. static void slic_debug_init(void)
  2154. {
  2155. struct dentry *ent;
  2156. ent = debugfs_create_dir("slic", NULL);
  2157. if (!ent || IS_ERR(ent)) {
  2158. pr_info(PFX "debugfs create directory failed\n");
  2159. return;
  2160. }
  2161. slic_debugfs = ent;
  2162. }
  2163. static void slic_debug_cleanup(void)
  2164. {
  2165. if (slic_debugfs) {
  2166. debugfs_remove(slic_debugfs);
  2167. slic_debugfs = NULL;
  2168. }
  2169. }
  2170. /*
  2171. * slic_link_event_handler -
  2172. *
  2173. * Initiate a link configuration sequence. The link configuration begins
  2174. * by issuing a READ_LINK_STATUS command to the Utility Processor on the
  2175. * SLIC. Since the command finishes asynchronously, the slic_upr_comlete
  2176. * routine will follow it up witha UP configuration write command, which
  2177. * will also complete asynchronously.
  2178. *
  2179. */
  2180. static void slic_link_event_handler(struct adapter *adapter)
  2181. {
  2182. int status;
  2183. struct slic_shmem *pshmem;
  2184. if (adapter->state != ADAPT_UP) {
  2185. /* Adapter is not operational. Ignore. */
  2186. return;
  2187. }
  2188. pshmem = (struct slic_shmem *)adapter->phys_shmem;
  2189. #if BITS_PER_LONG == 64
  2190. status = slic_upr_request(adapter,
  2191. SLIC_UPR_RLSR,
  2192. SLIC_GET_ADDR_LOW(&pshmem->linkstatus),
  2193. SLIC_GET_ADDR_HIGH(&pshmem->linkstatus),
  2194. 0, 0);
  2195. #else
  2196. status = slic_upr_request(adapter, SLIC_UPR_RLSR,
  2197. (u32) &pshmem->linkstatus, /* no 4GB wrap guaranteed */
  2198. 0, 0, 0);
  2199. #endif
  2200. ASSERT(status == 0);
  2201. }
  2202. static void slic_init_cleanup(struct adapter *adapter)
  2203. {
  2204. if (adapter->intrregistered) {
  2205. adapter->intrregistered = 0;
  2206. free_irq(adapter->netdev->irq, adapter->netdev);
  2207. }
  2208. if (adapter->pshmem) {
  2209. pci_free_consistent(adapter->pcidev,
  2210. sizeof(struct slic_shmem),
  2211. adapter->pshmem, adapter->phys_shmem);
  2212. adapter->pshmem = NULL;
  2213. adapter->phys_shmem = (dma_addr_t) NULL;
  2214. }
  2215. if (adapter->pingtimerset) {
  2216. adapter->pingtimerset = 0;
  2217. del_timer(&adapter->pingtimer);
  2218. }
  2219. slic_rspqueue_free(adapter);
  2220. slic_cmdq_free(adapter);
  2221. slic_rcvqueue_free(adapter);
  2222. }
  2223. /*
  2224. * Allocate a mcast_address structure to hold the multicast address.
  2225. * Link it in.
  2226. */
  2227. static int slic_mcast_add_list(struct adapter *adapter, char *address)
  2228. {
  2229. struct mcast_address *mcaddr, *mlist;
  2230. /* Check to see if it already exists */
  2231. mlist = adapter->mcastaddrs;
  2232. while (mlist) {
  2233. if (!compare_ether_addr(mlist->address, address))
  2234. return 0;
  2235. mlist = mlist->next;
  2236. }
  2237. /* Doesn't already exist. Allocate a structure to hold it */
  2238. mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC);
  2239. if (mcaddr == NULL)
  2240. return 1;
  2241. memcpy(mcaddr->address, address, 6);
  2242. mcaddr->next = adapter->mcastaddrs;
  2243. adapter->mcastaddrs = mcaddr;
  2244. return 0;
  2245. }
  2246. static void slic_mcast_set_list(struct net_device *dev)
  2247. {
  2248. struct adapter *adapter = netdev_priv(dev);
  2249. int status = 0;
  2250. char *addresses;
  2251. struct netdev_hw_addr *ha;
  2252. ASSERT(adapter);
  2253. netdev_for_each_mc_addr(ha, dev) {
  2254. addresses = (char *) &ha->addr;
  2255. status = slic_mcast_add_list(adapter, addresses);
  2256. if (status != 0)
  2257. break;
  2258. slic_mcast_set_bit(adapter, addresses);
  2259. }
  2260. if (adapter->devflags_prev != dev->flags) {
  2261. adapter->macopts = MAC_DIRECTED;
  2262. if (dev->flags) {
  2263. if (dev->flags & IFF_BROADCAST)
  2264. adapter->macopts |= MAC_BCAST;
  2265. if (dev->flags & IFF_PROMISC)
  2266. adapter->macopts |= MAC_PROMISC;
  2267. if (dev->flags & IFF_ALLMULTI)
  2268. adapter->macopts |= MAC_ALLMCAST;
  2269. if (dev->flags & IFF_MULTICAST)
  2270. adapter->macopts |= MAC_MCAST;
  2271. }
  2272. adapter->devflags_prev = dev->flags;
  2273. slic_config_set(adapter, true);
  2274. } else {
  2275. if (status == 0)
  2276. slic_mcast_set_mask(adapter);
  2277. }
  2278. return;
  2279. }
  2280. #define XMIT_FAIL_LINK_STATE 1
  2281. #define XMIT_FAIL_ZERO_LENGTH 2
  2282. #define XMIT_FAIL_HOSTCMD_FAIL 3
  2283. static void slic_xmit_build_request(struct adapter *adapter,
  2284. struct slic_hostcmd *hcmd, struct sk_buff *skb)
  2285. {
  2286. struct slic_host64_cmd *ihcmd;
  2287. ulong phys_addr;
  2288. ihcmd = &hcmd->cmd64;
  2289. ihcmd->flags = (adapter->port << IHFLG_IFSHFT);
  2290. ihcmd->command = IHCMD_XMT_REQ;
  2291. ihcmd->u.slic_buffers.totlen = skb->len;
  2292. phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
  2293. PCI_DMA_TODEVICE);
  2294. ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr);
  2295. ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr);
  2296. ihcmd->u.slic_buffers.bufs[0].length = skb->len;
  2297. #if BITS_PER_LONG == 64
  2298. hcmd->cmdsize = (u32) ((((u64)&ihcmd->u.slic_buffers.bufs[1] -
  2299. (u64) hcmd) + 31) >> 5);
  2300. #else
  2301. hcmd->cmdsize = ((((u32) &ihcmd->u.slic_buffers.bufs[1] -
  2302. (u32) hcmd) + 31) >> 5);
  2303. #endif
  2304. }
  2305. static void slic_xmit_fail(struct adapter *adapter,
  2306. struct sk_buff *skb,
  2307. void *cmd, u32 skbtype, u32 status)
  2308. {
  2309. if (adapter->xmitq_full)
  2310. netif_stop_queue(adapter->netdev);
  2311. if ((cmd == NULL) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) {
  2312. switch (status) {
  2313. case XMIT_FAIL_LINK_STATE:
  2314. dev_err(&adapter->netdev->dev,
  2315. "reject xmit skb[%p: %x] linkstate[%s] "
  2316. "adapter[%s:%d] card[%s:%d]\n",
  2317. skb, skb->pkt_type,
  2318. SLIC_LINKSTATE(adapter->linkstate),
  2319. SLIC_ADAPTER_STATE(adapter->state),
  2320. adapter->state,
  2321. SLIC_CARD_STATE(adapter->card->state),
  2322. adapter->card->state);
  2323. break;
  2324. case XMIT_FAIL_ZERO_LENGTH:
  2325. dev_err(&adapter->netdev->dev,
  2326. "xmit_start skb->len == 0 skb[%p] type[%x]\n",
  2327. skb, skb->pkt_type);
  2328. break;
  2329. case XMIT_FAIL_HOSTCMD_FAIL:
  2330. dev_err(&adapter->netdev->dev,
  2331. "xmit_start skb[%p] type[%x] No host commands "
  2332. "available\n", skb, skb->pkt_type);
  2333. break;
  2334. default:
  2335. ASSERT(0);
  2336. }
  2337. }
  2338. dev_kfree_skb(skb);
  2339. adapter->netdev->stats.tx_dropped++;
  2340. }
  2341. static void slic_rcv_handle_error(struct adapter *adapter,
  2342. struct slic_rcvbuf *rcvbuf)
  2343. {
  2344. struct slic_hddr_wds *hdr = (struct slic_hddr_wds *)rcvbuf->data;
  2345. struct net_device *netdev = adapter->netdev;
  2346. if (adapter->devid != SLIC_1GB_DEVICE_ID) {
  2347. if (hdr->frame_status14 & VRHSTAT_802OE)
  2348. adapter->if_events.oflow802++;
  2349. if (hdr->frame_status14 & VRHSTAT_TPOFLO)
  2350. adapter->if_events.Tprtoflow++;
  2351. if (hdr->frame_status_b14 & VRHSTATB_802UE)
  2352. adapter->if_events.uflow802++;
  2353. if (hdr->frame_status_b14 & VRHSTATB_RCVE) {
  2354. adapter->if_events.rcvearly++;
  2355. netdev->stats.rx_fifo_errors++;
  2356. }
  2357. if (hdr->frame_status_b14 & VRHSTATB_BUFF) {
  2358. adapter->if_events.Bufov++;
  2359. netdev->stats.rx_over_errors++;
  2360. }
  2361. if (hdr->frame_status_b14 & VRHSTATB_CARRE) {
  2362. adapter->if_events.Carre++;
  2363. netdev->stats.tx_carrier_errors++;
  2364. }
  2365. if (hdr->frame_status_b14 & VRHSTATB_LONGE)
  2366. adapter->if_events.Longe++;
  2367. if (hdr->frame_status_b14 & VRHSTATB_PREA)
  2368. adapter->if_events.Invp++;
  2369. if (hdr->frame_status_b14 & VRHSTATB_CRC) {
  2370. adapter->if_events.Crc++;
  2371. netdev->stats.rx_crc_errors++;
  2372. }
  2373. if (hdr->frame_status_b14 & VRHSTATB_DRBL)
  2374. adapter->if_events.Drbl++;
  2375. if (hdr->frame_status_b14 & VRHSTATB_CODE)
  2376. adapter->if_events.Code++;
  2377. if (hdr->frame_status_b14 & VRHSTATB_TPCSUM)
  2378. adapter->if_events.TpCsum++;
  2379. if (hdr->frame_status_b14 & VRHSTATB_TPHLEN)
  2380. adapter->if_events.TpHlen++;
  2381. if (hdr->frame_status_b14 & VRHSTATB_IPCSUM)
  2382. adapter->if_events.IpCsum++;
  2383. if (hdr->frame_status_b14 & VRHSTATB_IPLERR)
  2384. adapter->if_events.IpLen++;
  2385. if (hdr->frame_status_b14 & VRHSTATB_IPHERR)
  2386. adapter->if_events.IpHlen++;
  2387. } else {
  2388. if (hdr->frame_statusGB & VGBSTAT_XPERR) {
  2389. u32 xerr = hdr->frame_statusGB >> VGBSTAT_XERRSHFT;
  2390. if (xerr == VGBSTAT_XCSERR)
  2391. adapter->if_events.TpCsum++;
  2392. if (xerr == VGBSTAT_XUFLOW)
  2393. adapter->if_events.Tprtoflow++;
  2394. if (xerr == VGBSTAT_XHLEN)
  2395. adapter->if_events.TpHlen++;
  2396. }
  2397. if (hdr->frame_statusGB & VGBSTAT_NETERR) {
  2398. u32 nerr =
  2399. (hdr->
  2400. frame_statusGB >> VGBSTAT_NERRSHFT) &
  2401. VGBSTAT_NERRMSK;
  2402. if (nerr == VGBSTAT_NCSERR)
  2403. adapter->if_events.IpCsum++;
  2404. if (nerr == VGBSTAT_NUFLOW)
  2405. adapter->if_events.IpLen++;
  2406. if (nerr == VGBSTAT_NHLEN)
  2407. adapter->if_events.IpHlen++;
  2408. }
  2409. if (hdr->frame_statusGB & VGBSTAT_LNKERR) {
  2410. u32 lerr = hdr->frame_statusGB & VGBSTAT_LERRMSK;
  2411. if (lerr == VGBSTAT_LDEARLY)
  2412. adapter->if_events.rcvearly++;
  2413. if (lerr == VGBSTAT_LBOFLO)
  2414. adapter->if_events.Bufov++;
  2415. if (lerr == VGBSTAT_LCODERR)
  2416. adapter->if_events.Code++;
  2417. if (lerr == VGBSTAT_LDBLNBL)
  2418. adapter->if_events.Drbl++;
  2419. if (lerr == VGBSTAT_LCRCERR)
  2420. adapter->if_events.Crc++;
  2421. if (lerr == VGBSTAT_LOFLO)
  2422. adapter->if_events.oflow802++;
  2423. if (lerr == VGBSTAT_LUFLO)
  2424. adapter->if_events.uflow802++;
  2425. }
  2426. }
  2427. return;
  2428. }
  2429. #define TCP_OFFLOAD_FRAME_PUSHFLAG 0x10000000
  2430. #define M_FAST_PATH 0x0040
  2431. static void slic_rcv_handler(struct adapter *adapter)
  2432. {
  2433. struct net_device *netdev = adapter->netdev;
  2434. struct sk_buff *skb;
  2435. struct slic_rcvbuf *rcvbuf;
  2436. u32 frames = 0;
  2437. while ((skb = slic_rcvqueue_getnext(adapter))) {
  2438. u32 rx_bytes;
  2439. ASSERT(skb->head);
  2440. rcvbuf = (struct slic_rcvbuf *)skb->head;
  2441. adapter->card->events++;
  2442. if (rcvbuf->status & IRHDDR_ERR) {
  2443. adapter->rx_errors++;
  2444. slic_rcv_handle_error(adapter, rcvbuf);
  2445. slic_rcvqueue_reinsert(adapter, skb);
  2446. continue;
  2447. }
  2448. if (!slic_mac_filter(adapter, (struct ether_header *)
  2449. rcvbuf->data)) {
  2450. slic_rcvqueue_reinsert(adapter, skb);
  2451. continue;
  2452. }
  2453. skb_pull(skb, SLIC_RCVBUF_HEADSIZE);
  2454. rx_bytes = (rcvbuf->length & IRHDDR_FLEN_MSK);
  2455. skb_put(skb, rx_bytes);
  2456. netdev->stats.rx_packets++;
  2457. netdev->stats.rx_bytes += rx_bytes;
  2458. #if SLIC_OFFLOAD_IP_CHECKSUM
  2459. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2460. #endif
  2461. skb->dev = adapter->netdev;
  2462. skb->protocol = eth_type_trans(skb, skb->dev);
  2463. netif_rx(skb);
  2464. ++frames;
  2465. #if SLIC_INTERRUPT_PROCESS_LIMIT
  2466. if (frames >= SLIC_RCVQ_MAX_PROCESS_ISR) {
  2467. adapter->rcv_interrupt_yields++;
  2468. break;
  2469. }
  2470. #endif
  2471. }
  2472. adapter->max_isr_rcvs = max(adapter->max_isr_rcvs, frames);
  2473. }
  2474. static void slic_xmit_complete(struct adapter *adapter)
  2475. {
  2476. struct slic_hostcmd *hcmd;
  2477. struct slic_rspbuf *rspbuf;
  2478. u32 frames = 0;
  2479. struct slic_handle_word slic_handle_word;
  2480. do {
  2481. rspbuf = slic_rspqueue_getnext(adapter);
  2482. if (!rspbuf)
  2483. break;
  2484. adapter->xmit_completes++;
  2485. adapter->card->events++;
  2486. /*
  2487. Get the complete host command buffer
  2488. */
  2489. slic_handle_word.handle_token = rspbuf->hosthandle;
  2490. ASSERT(slic_handle_word.handle_index);
  2491. ASSERT(slic_handle_word.handle_index <= SLIC_CMDQ_MAXCMDS);
  2492. hcmd =
  2493. (struct slic_hostcmd *)
  2494. adapter->slic_handles[slic_handle_word.handle_index].
  2495. address;
  2496. /* hcmd = (struct slic_hostcmd *) rspbuf->hosthandle; */
  2497. ASSERT(hcmd);
  2498. ASSERT(hcmd->pslic_handle ==
  2499. &adapter->slic_handles[slic_handle_word.handle_index]);
  2500. if (hcmd->type == SLIC_CMD_DUMB) {
  2501. if (hcmd->skb)
  2502. dev_kfree_skb_irq(hcmd->skb);
  2503. slic_cmdq_putdone_irq(adapter, hcmd);
  2504. }
  2505. rspbuf->status = 0;
  2506. rspbuf->hosthandle = 0;
  2507. frames++;
  2508. } while (1);
  2509. adapter->max_isr_xmits = max(adapter->max_isr_xmits, frames);
  2510. }
  2511. static irqreturn_t slic_interrupt(int irq, void *dev_id)
  2512. {
  2513. struct net_device *dev = (struct net_device *)dev_id;
  2514. struct adapter *adapter = netdev_priv(dev);
  2515. u32 isr;
  2516. if ((adapter->pshmem) && (adapter->pshmem->isr)) {
  2517. slic_reg32_write(&adapter->slic_regs->slic_icr,
  2518. ICR_INT_MASK, FLUSH);
  2519. isr = adapter->isrcopy = adapter->pshmem->isr;
  2520. adapter->pshmem->isr = 0;
  2521. adapter->num_isrs++;
  2522. switch (adapter->card->state) {
  2523. case CARD_UP:
  2524. if (isr & ~ISR_IO) {
  2525. if (isr & ISR_ERR) {
  2526. adapter->error_interrupts++;
  2527. if (isr & ISR_RMISS) {
  2528. int count;
  2529. int pre_count;
  2530. int errors;
  2531. struct slic_rcvqueue *rcvq =
  2532. &adapter->rcvqueue;
  2533. adapter->
  2534. error_rmiss_interrupts++;
  2535. if (!rcvq->errors)
  2536. rcv_count = rcvq->count;
  2537. pre_count = rcvq->count;
  2538. errors = rcvq->errors;
  2539. while (rcvq->count <
  2540. SLIC_RCVQ_FILLTHRESH) {
  2541. count =
  2542. slic_rcvqueue_fill
  2543. (adapter);
  2544. if (!count)
  2545. break;
  2546. }
  2547. } else if (isr & ISR_XDROP) {
  2548. dev_err(&dev->dev,
  2549. "isr & ISR_ERR [%x] "
  2550. "ISR_XDROP \n", isr);
  2551. } else {
  2552. dev_err(&dev->dev,
  2553. "isr & ISR_ERR [%x]\n",
  2554. isr);
  2555. }
  2556. }
  2557. if (isr & ISR_LEVENT) {
  2558. adapter->linkevent_interrupts++;
  2559. slic_link_event_handler(adapter);
  2560. }
  2561. if ((isr & ISR_UPC) ||
  2562. (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
  2563. adapter->upr_interrupts++;
  2564. slic_upr_request_complete(adapter, isr);
  2565. }
  2566. }
  2567. if (isr & ISR_RCV) {
  2568. adapter->rcv_interrupts++;
  2569. slic_rcv_handler(adapter);
  2570. }
  2571. if (isr & ISR_CMD) {
  2572. adapter->xmit_interrupts++;
  2573. slic_xmit_complete(adapter);
  2574. }
  2575. break;
  2576. case CARD_DOWN:
  2577. if ((isr & ISR_UPC) ||
  2578. (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
  2579. adapter->upr_interrupts++;
  2580. slic_upr_request_complete(adapter, isr);
  2581. }
  2582. break;
  2583. default:
  2584. break;
  2585. }
  2586. adapter->isrcopy = 0;
  2587. adapter->all_reg_writes += 2;
  2588. adapter->isr_reg_writes++;
  2589. slic_reg32_write(&adapter->slic_regs->slic_isr, 0, FLUSH);
  2590. } else {
  2591. adapter->false_interrupts++;
  2592. }
  2593. return IRQ_HANDLED;
  2594. }
  2595. #define NORMAL_ETHFRAME 0
  2596. static netdev_tx_t slic_xmit_start(struct sk_buff *skb, struct net_device *dev)
  2597. {
  2598. struct sliccard *card;
  2599. struct adapter *adapter = netdev_priv(dev);
  2600. struct slic_hostcmd *hcmd = NULL;
  2601. u32 status = 0;
  2602. u32 skbtype = NORMAL_ETHFRAME;
  2603. void *offloadcmd = NULL;
  2604. card = adapter->card;
  2605. ASSERT(card);
  2606. if ((adapter->linkstate != LINK_UP) ||
  2607. (adapter->state != ADAPT_UP) || (card->state != CARD_UP)) {
  2608. status = XMIT_FAIL_LINK_STATE;
  2609. goto xmit_fail;
  2610. } else if (skb->len == 0) {
  2611. status = XMIT_FAIL_ZERO_LENGTH;
  2612. goto xmit_fail;
  2613. }
  2614. if (skbtype == NORMAL_ETHFRAME) {
  2615. hcmd = slic_cmdq_getfree(adapter);
  2616. if (!hcmd) {
  2617. adapter->xmitq_full = 1;
  2618. status = XMIT_FAIL_HOSTCMD_FAIL;
  2619. goto xmit_fail;
  2620. }
  2621. ASSERT(hcmd->pslic_handle);
  2622. ASSERT(hcmd->cmd64.hosthandle ==
  2623. hcmd->pslic_handle->token.handle_token);
  2624. hcmd->skb = skb;
  2625. hcmd->busy = 1;
  2626. hcmd->type = SLIC_CMD_DUMB;
  2627. if (skbtype == NORMAL_ETHFRAME)
  2628. slic_xmit_build_request(adapter, hcmd, skb);
  2629. }
  2630. dev->stats.tx_packets++;
  2631. dev->stats.tx_bytes += skb->len;
  2632. #ifdef DEBUG_DUMP
  2633. if (adapter->kill_card) {
  2634. struct slic_host64_cmd ihcmd;
  2635. ihcmd = &hcmd->cmd64;
  2636. ihcmd->flags |= 0x40;
  2637. adapter->kill_card = 0; /* only do this once */
  2638. }
  2639. #endif
  2640. if (hcmd->paddrh == 0) {
  2641. slic_reg32_write(&adapter->slic_regs->slic_cbar,
  2642. (hcmd->paddrl | hcmd->cmdsize), DONT_FLUSH);
  2643. } else {
  2644. slic_reg64_write(adapter, &adapter->slic_regs->slic_cbar64,
  2645. (hcmd->paddrl | hcmd->cmdsize),
  2646. &adapter->slic_regs->slic_addr_upper,
  2647. hcmd->paddrh, DONT_FLUSH);
  2648. }
  2649. xmit_done:
  2650. return NETDEV_TX_OK;
  2651. xmit_fail:
  2652. slic_xmit_fail(adapter, skb, offloadcmd, skbtype, status);
  2653. goto xmit_done;
  2654. }
  2655. static void slic_adapter_freeresources(struct adapter *adapter)
  2656. {
  2657. slic_init_cleanup(adapter);
  2658. adapter->error_interrupts = 0;
  2659. adapter->rcv_interrupts = 0;
  2660. adapter->xmit_interrupts = 0;
  2661. adapter->linkevent_interrupts = 0;
  2662. adapter->upr_interrupts = 0;
  2663. adapter->num_isrs = 0;
  2664. adapter->xmit_completes = 0;
  2665. adapter->rcv_broadcasts = 0;
  2666. adapter->rcv_multicasts = 0;
  2667. adapter->rcv_unicasts = 0;
  2668. }
  2669. static int slic_adapter_allocresources(struct adapter *adapter)
  2670. {
  2671. if (!adapter->intrregistered) {
  2672. int retval;
  2673. spin_unlock_irqrestore(&slic_global.driver_lock.lock,
  2674. slic_global.driver_lock.flags);
  2675. retval = request_irq(adapter->netdev->irq,
  2676. &slic_interrupt,
  2677. IRQF_SHARED,
  2678. adapter->netdev->name, adapter->netdev);
  2679. spin_lock_irqsave(&slic_global.driver_lock.lock,
  2680. slic_global.driver_lock.flags);
  2681. if (retval) {
  2682. dev_err(&adapter->netdev->dev,
  2683. "request_irq (%s) FAILED [%x]\n",
  2684. adapter->netdev->name, retval);
  2685. return retval;
  2686. }
  2687. adapter->intrregistered = 1;
  2688. }
  2689. return 0;
  2690. }
  2691. /*
  2692. * slic_if_init
  2693. *
  2694. * Perform initialization of our slic interface.
  2695. *
  2696. */
  2697. static int slic_if_init(struct adapter *adapter)
  2698. {
  2699. struct sliccard *card = adapter->card;
  2700. struct net_device *dev = adapter->netdev;
  2701. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  2702. struct slic_shmem *pshmem;
  2703. int rc;
  2704. ASSERT(card);
  2705. /* adapter should be down at this point */
  2706. if (adapter->state != ADAPT_DOWN) {
  2707. dev_err(&dev->dev, "%s: adapter->state != ADAPT_DOWN\n",
  2708. __func__);
  2709. rc = -EIO;
  2710. goto err;
  2711. }
  2712. ASSERT(adapter->linkstate == LINK_DOWN);
  2713. adapter->devflags_prev = dev->flags;
  2714. adapter->macopts = MAC_DIRECTED;
  2715. if (dev->flags) {
  2716. if (dev->flags & IFF_BROADCAST)
  2717. adapter->macopts |= MAC_BCAST;
  2718. if (dev->flags & IFF_PROMISC)
  2719. adapter->macopts |= MAC_PROMISC;
  2720. if (dev->flags & IFF_ALLMULTI)
  2721. adapter->macopts |= MAC_ALLMCAST;
  2722. if (dev->flags & IFF_MULTICAST)
  2723. adapter->macopts |= MAC_MCAST;
  2724. }
  2725. rc = slic_adapter_allocresources(adapter);
  2726. if (rc) {
  2727. dev_err(&dev->dev,
  2728. "%s: slic_adapter_allocresources FAILED %x\n",
  2729. __func__, rc);
  2730. slic_adapter_freeresources(adapter);
  2731. goto err;
  2732. }
  2733. if (!adapter->queues_initialized) {
  2734. if ((rc = slic_rspqueue_init(adapter)))
  2735. goto err;
  2736. if ((rc = slic_cmdq_init(adapter)))
  2737. goto err;
  2738. if ((rc = slic_rcvqueue_init(adapter)))
  2739. goto err;
  2740. adapter->queues_initialized = 1;
  2741. }
  2742. slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
  2743. mdelay(1);
  2744. if (!adapter->isp_initialized) {
  2745. pshmem = (struct slic_shmem *)adapter->phys_shmem;
  2746. spin_lock_irqsave(&adapter->bit64reglock.lock,
  2747. adapter->bit64reglock.flags);
  2748. #if BITS_PER_LONG == 64
  2749. slic_reg32_write(&slic_regs->slic_addr_upper,
  2750. SLIC_GET_ADDR_HIGH(&pshmem->isr), DONT_FLUSH);
  2751. slic_reg32_write(&slic_regs->slic_isp,
  2752. SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH);
  2753. #else
  2754. slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH);
  2755. slic_reg32_write(&slic_regs->slic_isp, (u32)&pshmem->isr, FLUSH);
  2756. #endif
  2757. spin_unlock_irqrestore(&adapter->bit64reglock.lock,
  2758. adapter->bit64reglock.flags);
  2759. adapter->isp_initialized = 1;
  2760. }
  2761. adapter->state = ADAPT_UP;
  2762. if (!card->loadtimerset) {
  2763. init_timer(&card->loadtimer);
  2764. card->loadtimer.expires =
  2765. jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
  2766. card->loadtimer.data = (ulong) card;
  2767. card->loadtimer.function = &slic_timer_load_check;
  2768. add_timer(&card->loadtimer);
  2769. card->loadtimerset = 1;
  2770. }
  2771. if (!adapter->pingtimerset) {
  2772. init_timer(&adapter->pingtimer);
  2773. adapter->pingtimer.expires =
  2774. jiffies + (PING_TIMER_INTERVAL * HZ);
  2775. adapter->pingtimer.data = (ulong) dev;
  2776. adapter->pingtimer.function = &slic_timer_ping;
  2777. add_timer(&adapter->pingtimer);
  2778. adapter->pingtimerset = 1;
  2779. adapter->card->pingstatus = ISR_PINGMASK;
  2780. }
  2781. /*
  2782. * clear any pending events, then enable interrupts
  2783. */
  2784. adapter->isrcopy = 0;
  2785. adapter->pshmem->isr = 0;
  2786. slic_reg32_write(&slic_regs->slic_isr, 0, FLUSH);
  2787. slic_reg32_write(&slic_regs->slic_icr, ICR_INT_ON, FLUSH);
  2788. slic_link_config(adapter, LINK_AUTOSPEED, LINK_AUTOD);
  2789. slic_link_event_handler(adapter);
  2790. err:
  2791. return rc;
  2792. }
  2793. static int slic_entry_open(struct net_device *dev)
  2794. {
  2795. struct adapter *adapter = netdev_priv(dev);
  2796. struct sliccard *card = adapter->card;
  2797. u32 locked = 0;
  2798. int status;
  2799. ASSERT(adapter);
  2800. ASSERT(card);
  2801. netif_stop_queue(adapter->netdev);
  2802. spin_lock_irqsave(&slic_global.driver_lock.lock,
  2803. slic_global.driver_lock.flags);
  2804. locked = 1;
  2805. if (!adapter->activated) {
  2806. card->adapters_activated++;
  2807. slic_global.num_slic_ports_active++;
  2808. adapter->activated = 1;
  2809. }
  2810. status = slic_if_init(adapter);
  2811. if (status != 0) {
  2812. if (adapter->activated) {
  2813. card->adapters_activated--;
  2814. slic_global.num_slic_ports_active--;
  2815. adapter->activated = 0;
  2816. }
  2817. if (locked) {
  2818. spin_unlock_irqrestore(&slic_global.driver_lock.lock,
  2819. slic_global.driver_lock.flags);
  2820. locked = 0;
  2821. }
  2822. return status;
  2823. }
  2824. if (!card->master)
  2825. card->master = adapter;
  2826. if (locked) {
  2827. spin_unlock_irqrestore(&slic_global.driver_lock.lock,
  2828. slic_global.driver_lock.flags);
  2829. locked = 0;
  2830. }
  2831. return 0;
  2832. }
  2833. static void slic_card_cleanup(struct sliccard *card)
  2834. {
  2835. if (card->loadtimerset) {
  2836. card->loadtimerset = 0;
  2837. del_timer(&card->loadtimer);
  2838. }
  2839. slic_debug_card_destroy(card);
  2840. kfree(card);
  2841. }
  2842. static void __devexit slic_entry_remove(struct pci_dev *pcidev)
  2843. {
  2844. struct net_device *dev = pci_get_drvdata(pcidev);
  2845. u32 mmio_start = 0;
  2846. uint mmio_len = 0;
  2847. struct adapter *adapter = netdev_priv(dev);
  2848. struct sliccard *card;
  2849. struct mcast_address *mcaddr, *mlist;
  2850. ASSERT(adapter);
  2851. slic_adapter_freeresources(adapter);
  2852. slic_unmap_mmio_space(adapter);
  2853. unregister_netdev(dev);
  2854. mmio_start = pci_resource_start(pcidev, 0);
  2855. mmio_len = pci_resource_len(pcidev, 0);
  2856. release_mem_region(mmio_start, mmio_len);
  2857. iounmap((void __iomem *)dev->base_addr);
  2858. /* free multicast addresses */
  2859. mlist = adapter->mcastaddrs;
  2860. while (mlist) {
  2861. mcaddr = mlist;
  2862. mlist = mlist->next;
  2863. kfree(mcaddr);
  2864. }
  2865. ASSERT(adapter->card);
  2866. card = adapter->card;
  2867. ASSERT(card->adapters_allocated);
  2868. card->adapters_allocated--;
  2869. adapter->allocated = 0;
  2870. if (!card->adapters_allocated) {
  2871. struct sliccard *curr_card = slic_global.slic_card;
  2872. if (curr_card == card) {
  2873. slic_global.slic_card = card->next;
  2874. } else {
  2875. while (curr_card->next != card)
  2876. curr_card = curr_card->next;
  2877. ASSERT(curr_card);
  2878. curr_card->next = card->next;
  2879. }
  2880. ASSERT(slic_global.num_slic_cards);
  2881. slic_global.num_slic_cards--;
  2882. slic_card_cleanup(card);
  2883. }
  2884. free_netdev(dev);
  2885. pci_release_regions(pcidev);
  2886. }
  2887. static int slic_entry_halt(struct net_device *dev)
  2888. {
  2889. struct adapter *adapter = netdev_priv(dev);
  2890. struct sliccard *card = adapter->card;
  2891. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  2892. spin_lock_irqsave(&slic_global.driver_lock.lock,
  2893. slic_global.driver_lock.flags);
  2894. ASSERT(card);
  2895. netif_stop_queue(adapter->netdev);
  2896. adapter->state = ADAPT_DOWN;
  2897. adapter->linkstate = LINK_DOWN;
  2898. adapter->upr_list = NULL;
  2899. adapter->upr_busy = 0;
  2900. adapter->devflags_prev = 0;
  2901. ASSERT(card->adapter[adapter->cardindex] == adapter);
  2902. slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
  2903. adapter->all_reg_writes++;
  2904. adapter->icr_reg_writes++;
  2905. slic_config_clear(adapter);
  2906. if (adapter->activated) {
  2907. card->adapters_activated--;
  2908. slic_global.num_slic_ports_active--;
  2909. adapter->activated = 0;
  2910. }
  2911. #ifdef AUTOMATIC_RESET
  2912. slic_reg32_write(&slic_regs->slic_reset_iface, 0, FLUSH);
  2913. #endif
  2914. /*
  2915. * Reset the adapter's cmd queues
  2916. */
  2917. slic_cmdq_reset(adapter);
  2918. #ifdef AUTOMATIC_RESET
  2919. if (!card->adapters_activated)
  2920. slic_card_init(card, adapter);
  2921. #endif
  2922. spin_unlock_irqrestore(&slic_global.driver_lock.lock,
  2923. slic_global.driver_lock.flags);
  2924. return 0;
  2925. }
  2926. static struct net_device_stats *slic_get_stats(struct net_device *dev)
  2927. {
  2928. struct adapter *adapter = netdev_priv(dev);
  2929. ASSERT(adapter);
  2930. dev->stats.collisions = adapter->slic_stats.iface.xmit_collisions;
  2931. dev->stats.rx_errors = adapter->slic_stats.iface.rcv_errors;
  2932. dev->stats.tx_errors = adapter->slic_stats.iface.xmt_errors;
  2933. dev->stats.rx_missed_errors = adapter->slic_stats.iface.rcv_discards;
  2934. dev->stats.tx_heartbeat_errors = 0;
  2935. dev->stats.tx_aborted_errors = 0;
  2936. dev->stats.tx_window_errors = 0;
  2937. dev->stats.tx_fifo_errors = 0;
  2938. dev->stats.rx_frame_errors = 0;
  2939. dev->stats.rx_length_errors = 0;
  2940. return &dev->stats;
  2941. }
  2942. static int slic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  2943. {
  2944. struct adapter *adapter = netdev_priv(dev);
  2945. struct ethtool_cmd edata;
  2946. struct ethtool_cmd ecmd;
  2947. u32 data[7];
  2948. u32 intagg;
  2949. ASSERT(rq);
  2950. switch (cmd) {
  2951. case SIOCSLICSETINTAGG:
  2952. if (copy_from_user(data, rq->ifr_data, 28))
  2953. return -EFAULT;
  2954. intagg = data[0];
  2955. dev_err(&dev->dev, "%s: set interrupt aggregation to %d\n",
  2956. __func__, intagg);
  2957. slic_intagg_set(adapter, intagg);
  2958. return 0;
  2959. #ifdef SLIC_TRACE_DUMP_ENABLED
  2960. case SIOCSLICTRACEDUMP:
  2961. {
  2962. u32 value;
  2963. DBG_IOCTL("slic_ioctl SIOCSLIC_TRACE_DUMP\n");
  2964. if (copy_from_user(data, rq->ifr_data, 28)) {
  2965. PRINT_ERROR
  2966. ("slic: copy_from_user FAILED getting initial simba param\n");
  2967. return -EFAULT;
  2968. }
  2969. value = data[0];
  2970. if (tracemon_request == SLIC_DUMP_DONE) {
  2971. PRINT_ERROR
  2972. ("ATK Diagnostic Trace Dump Requested\n");
  2973. tracemon_request = SLIC_DUMP_REQUESTED;
  2974. tracemon_request_type = value;
  2975. tracemon_timestamp = jiffies;
  2976. } else if ((tracemon_request == SLIC_DUMP_REQUESTED) ||
  2977. (tracemon_request ==
  2978. SLIC_DUMP_IN_PROGRESS)) {
  2979. PRINT_ERROR
  2980. ("ATK Diagnostic Trace Dump Requested but already in progress... ignore\n");
  2981. } else {
  2982. PRINT_ERROR
  2983. ("ATK Diagnostic Trace Dump Requested\n");
  2984. tracemon_request = SLIC_DUMP_REQUESTED;
  2985. tracemon_request_type = value;
  2986. tracemon_timestamp = jiffies;
  2987. }
  2988. return 0;
  2989. }
  2990. #endif
  2991. case SIOCETHTOOL:
  2992. ASSERT(adapter);
  2993. if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd)))
  2994. return -EFAULT;
  2995. if (ecmd.cmd == ETHTOOL_GSET) {
  2996. edata.supported = (SUPPORTED_10baseT_Half |
  2997. SUPPORTED_10baseT_Full |
  2998. SUPPORTED_100baseT_Half |
  2999. SUPPORTED_100baseT_Full |
  3000. SUPPORTED_Autoneg | SUPPORTED_MII);
  3001. edata.port = PORT_MII;
  3002. edata.transceiver = XCVR_INTERNAL;
  3003. edata.phy_address = 0;
  3004. if (adapter->linkspeed == LINK_100MB)
  3005. edata.speed = SPEED_100;
  3006. else if (adapter->linkspeed == LINK_10MB)
  3007. edata.speed = SPEED_10;
  3008. else
  3009. edata.speed = 0;
  3010. if (adapter->linkduplex == LINK_FULLD)
  3011. edata.duplex = DUPLEX_FULL;
  3012. else
  3013. edata.duplex = DUPLEX_HALF;
  3014. edata.autoneg = AUTONEG_ENABLE;
  3015. edata.maxtxpkt = 1;
  3016. edata.maxrxpkt = 1;
  3017. if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
  3018. return -EFAULT;
  3019. } else if (ecmd.cmd == ETHTOOL_SSET) {
  3020. if (!capable(CAP_NET_ADMIN))
  3021. return -EPERM;
  3022. if (adapter->linkspeed == LINK_100MB)
  3023. edata.speed = SPEED_100;
  3024. else if (adapter->linkspeed == LINK_10MB)
  3025. edata.speed = SPEED_10;
  3026. else
  3027. edata.speed = 0;
  3028. if (adapter->linkduplex == LINK_FULLD)
  3029. edata.duplex = DUPLEX_FULL;
  3030. else
  3031. edata.duplex = DUPLEX_HALF;
  3032. edata.autoneg = AUTONEG_ENABLE;
  3033. edata.maxtxpkt = 1;
  3034. edata.maxrxpkt = 1;
  3035. if ((ecmd.speed != edata.speed) ||
  3036. (ecmd.duplex != edata.duplex)) {
  3037. u32 speed;
  3038. u32 duplex;
  3039. if (ecmd.speed == SPEED_10)
  3040. speed = 0;
  3041. else
  3042. speed = PCR_SPEED_100;
  3043. if (ecmd.duplex == DUPLEX_FULL)
  3044. duplex = PCR_DUPLEX_FULL;
  3045. else
  3046. duplex = 0;
  3047. slic_link_config(adapter, speed, duplex);
  3048. slic_link_event_handler(adapter);
  3049. }
  3050. }
  3051. return 0;
  3052. default:
  3053. return -EOPNOTSUPP;
  3054. }
  3055. }
  3056. static void slic_config_pci(struct pci_dev *pcidev)
  3057. {
  3058. u16 pci_command;
  3059. u16 new_command;
  3060. pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
  3061. new_command = pci_command | PCI_COMMAND_MASTER
  3062. | PCI_COMMAND_MEMORY
  3063. | PCI_COMMAND_INVALIDATE
  3064. | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
  3065. if (pci_command != new_command)
  3066. pci_write_config_word(pcidev, PCI_COMMAND, new_command);
  3067. }
  3068. static int slic_card_init(struct sliccard *card, struct adapter *adapter)
  3069. {
  3070. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  3071. struct slic_eeprom *peeprom;
  3072. struct oslic_eeprom *pOeeprom;
  3073. dma_addr_t phys_config;
  3074. u32 phys_configh;
  3075. u32 phys_configl;
  3076. u32 i = 0;
  3077. struct slic_shmem *pshmem;
  3078. int status;
  3079. uint macaddrs = card->card_size;
  3080. ushort eecodesize;
  3081. ushort dramsize;
  3082. ushort ee_chksum;
  3083. ushort calc_chksum;
  3084. struct slic_config_mac *pmac;
  3085. unsigned char fruformat;
  3086. unsigned char oemfruformat;
  3087. struct atk_fru *patkfru;
  3088. union oemfru *poemfru;
  3089. /* Reset everything except PCI configuration space */
  3090. slic_soft_reset(adapter);
  3091. /* Download the microcode */
  3092. status = slic_card_download(adapter);
  3093. if (status != 0) {
  3094. dev_err(&adapter->pcidev->dev,
  3095. "download failed bus %d slot %d\n",
  3096. adapter->busnumber, adapter->slotnumber);
  3097. return status;
  3098. }
  3099. if (!card->config_set) {
  3100. peeprom = pci_alloc_consistent(adapter->pcidev,
  3101. sizeof(struct slic_eeprom),
  3102. &phys_config);
  3103. phys_configl = SLIC_GET_ADDR_LOW(phys_config);
  3104. phys_configh = SLIC_GET_ADDR_HIGH(phys_config);
  3105. if (!peeprom) {
  3106. dev_err(&adapter->pcidev->dev,
  3107. "eeprom read failed to get memory "
  3108. "bus %d slot %d\n", adapter->busnumber,
  3109. adapter->slotnumber);
  3110. return -ENOMEM;
  3111. } else {
  3112. memset(peeprom, 0, sizeof(struct slic_eeprom));
  3113. }
  3114. slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
  3115. mdelay(1);
  3116. pshmem = (struct slic_shmem *)adapter->phys_shmem;
  3117. spin_lock_irqsave(&adapter->bit64reglock.lock,
  3118. adapter->bit64reglock.flags);
  3119. slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH);
  3120. slic_reg32_write(&slic_regs->slic_isp,
  3121. SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH);
  3122. spin_unlock_irqrestore(&adapter->bit64reglock.lock,
  3123. adapter->bit64reglock.flags);
  3124. slic_config_get(adapter, phys_configl, phys_configh);
  3125. for (;;) {
  3126. if (adapter->pshmem->isr) {
  3127. if (adapter->pshmem->isr & ISR_UPC) {
  3128. adapter->pshmem->isr = 0;
  3129. slic_reg64_write(adapter,
  3130. &slic_regs->slic_isp, 0,
  3131. &slic_regs->slic_addr_upper,
  3132. 0, FLUSH);
  3133. slic_reg32_write(&slic_regs->slic_isr,
  3134. 0, FLUSH);
  3135. slic_upr_request_complete(adapter, 0);
  3136. break;
  3137. } else {
  3138. adapter->pshmem->isr = 0;
  3139. slic_reg32_write(&slic_regs->slic_isr,
  3140. 0, FLUSH);
  3141. }
  3142. } else {
  3143. mdelay(1);
  3144. i++;
  3145. if (i > 5000) {
  3146. dev_err(&adapter->pcidev->dev,
  3147. "%d config data fetch timed out!\n",
  3148. adapter->port);
  3149. slic_reg64_write(adapter,
  3150. &slic_regs->slic_isp, 0,
  3151. &slic_regs->slic_addr_upper,
  3152. 0, FLUSH);
  3153. return -EINVAL;
  3154. }
  3155. }
  3156. }
  3157. switch (adapter->devid) {
  3158. /* Oasis card */
  3159. case SLIC_2GB_DEVICE_ID:
  3160. /* extract EEPROM data and pointers to EEPROM data */
  3161. pOeeprom = (struct oslic_eeprom *) peeprom;
  3162. eecodesize = pOeeprom->EecodeSize;
  3163. dramsize = pOeeprom->DramSize;
  3164. pmac = pOeeprom->MacInfo;
  3165. fruformat = pOeeprom->FruFormat;
  3166. patkfru = &pOeeprom->AtkFru;
  3167. oemfruformat = pOeeprom->OemFruFormat;
  3168. poemfru = &pOeeprom->OemFru;
  3169. macaddrs = 2;
  3170. /* Minor kludge for Oasis card
  3171. get 2 MAC addresses from the
  3172. EEPROM to ensure that function 1
  3173. gets the Port 1 MAC address */
  3174. break;
  3175. default:
  3176. /* extract EEPROM data and pointers to EEPROM data */
  3177. eecodesize = peeprom->EecodeSize;
  3178. dramsize = peeprom->DramSize;
  3179. pmac = peeprom->u2.mac.MacInfo;
  3180. fruformat = peeprom->FruFormat;
  3181. patkfru = &peeprom->AtkFru;
  3182. oemfruformat = peeprom->OemFruFormat;
  3183. poemfru = &peeprom->OemFru;
  3184. break;
  3185. }
  3186. card->config.EepromValid = false;
  3187. /* see if the EEPROM is valid by checking it's checksum */
  3188. if ((eecodesize <= MAX_EECODE_SIZE) &&
  3189. (eecodesize >= MIN_EECODE_SIZE)) {
  3190. ee_chksum =
  3191. *(u16 *) ((char *) peeprom + (eecodesize - 2));
  3192. /*
  3193. calculate the EEPROM checksum
  3194. */
  3195. calc_chksum =
  3196. ~slic_eeprom_cksum((char *) peeprom,
  3197. (eecodesize - 2));
  3198. /*
  3199. if the ucdoe chksum flag bit worked,
  3200. we wouldn't need this shit
  3201. */
  3202. if (ee_chksum == calc_chksum)
  3203. card->config.EepromValid = true;
  3204. }
  3205. /* copy in the DRAM size */
  3206. card->config.DramSize = dramsize;
  3207. /* copy in the MAC address(es) */
  3208. for (i = 0; i < macaddrs; i++) {
  3209. memcpy(&card->config.MacInfo[i],
  3210. &pmac[i], sizeof(struct slic_config_mac));
  3211. }
  3212. /* copy the Alacritech FRU information */
  3213. card->config.FruFormat = fruformat;
  3214. memcpy(&card->config.AtkFru, patkfru,
  3215. sizeof(struct atk_fru));
  3216. pci_free_consistent(adapter->pcidev,
  3217. sizeof(struct slic_eeprom),
  3218. peeprom, phys_config);
  3219. if ((!card->config.EepromValid) &&
  3220. (adapter->reg_params.fail_on_bad_eeprom)) {
  3221. slic_reg64_write(adapter, &slic_regs->slic_isp, 0,
  3222. &slic_regs->slic_addr_upper,
  3223. 0, FLUSH);
  3224. dev_err(&adapter->pcidev->dev,
  3225. "unsupported CONFIGURATION EEPROM invalid\n");
  3226. return -EINVAL;
  3227. }
  3228. card->config_set = 1;
  3229. }
  3230. if (slic_card_download_gbrcv(adapter)) {
  3231. dev_err(&adapter->pcidev->dev,
  3232. "unable to download GB receive microcode\n");
  3233. return -EINVAL;
  3234. }
  3235. if (slic_global.dynamic_intagg)
  3236. slic_intagg_set(adapter, 0);
  3237. else
  3238. slic_intagg_set(adapter, intagg_delay);
  3239. /*
  3240. * Initialize ping status to "ok"
  3241. */
  3242. card->pingstatus = ISR_PINGMASK;
  3243. /*
  3244. * Lastly, mark our card state as up and return success
  3245. */
  3246. card->state = CARD_UP;
  3247. card->reset_in_progress = 0;
  3248. return 0;
  3249. }
  3250. static void slic_init_driver(void)
  3251. {
  3252. if (slic_first_init) {
  3253. slic_first_init = 0;
  3254. spin_lock_init(&slic_global.driver_lock.lock);
  3255. slic_debug_init();
  3256. }
  3257. }
  3258. static void slic_init_adapter(struct net_device *netdev,
  3259. struct pci_dev *pcidev,
  3260. const struct pci_device_id *pci_tbl_entry,
  3261. void __iomem *memaddr, int chip_idx)
  3262. {
  3263. ushort index;
  3264. struct slic_handle *pslic_handle;
  3265. struct adapter *adapter = netdev_priv(netdev);
  3266. /* adapter->pcidev = pcidev;*/
  3267. adapter->vendid = pci_tbl_entry->vendor;
  3268. adapter->devid = pci_tbl_entry->device;
  3269. adapter->subsysid = pci_tbl_entry->subdevice;
  3270. adapter->busnumber = pcidev->bus->number;
  3271. adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
  3272. adapter->functionnumber = (pcidev->devfn & 0x7);
  3273. adapter->memorylength = pci_resource_len(pcidev, 0);
  3274. adapter->slic_regs = (__iomem struct slic_regs *)memaddr;
  3275. adapter->irq = pcidev->irq;
  3276. /* adapter->netdev = netdev;*/
  3277. adapter->next_netdevice = head_netdevice;
  3278. head_netdevice = netdev;
  3279. adapter->chipid = chip_idx;
  3280. adapter->port = 0; /*adapter->functionnumber;*/
  3281. adapter->cardindex = adapter->port;
  3282. adapter->memorybase = memaddr;
  3283. spin_lock_init(&adapter->upr_lock.lock);
  3284. spin_lock_init(&adapter->bit64reglock.lock);
  3285. spin_lock_init(&adapter->adapter_lock.lock);
  3286. spin_lock_init(&adapter->reset_lock.lock);
  3287. spin_lock_init(&adapter->handle_lock.lock);
  3288. adapter->card_size = 1;
  3289. /*
  3290. Initialize slic_handle array
  3291. */
  3292. ASSERT(SLIC_CMDQ_MAXCMDS <= 0xFFFF);
  3293. /*
  3294. Start with 1. 0 is an invalid host handle.
  3295. */
  3296. for (index = 1, pslic_handle = &adapter->slic_handles[1];
  3297. index < SLIC_CMDQ_MAXCMDS; index++, pslic_handle++) {
  3298. pslic_handle->token.handle_index = index;
  3299. pslic_handle->type = SLIC_HANDLE_FREE;
  3300. pslic_handle->next = adapter->pfree_slic_handles;
  3301. adapter->pfree_slic_handles = pslic_handle;
  3302. }
  3303. adapter->pshmem = (struct slic_shmem *)
  3304. pci_alloc_consistent(adapter->pcidev,
  3305. sizeof(struct slic_shmem),
  3306. &adapter->
  3307. phys_shmem);
  3308. ASSERT(adapter->pshmem);
  3309. memset(adapter->pshmem, 0, sizeof(struct slic_shmem));
  3310. return;
  3311. }
  3312. static const struct net_device_ops slic_netdev_ops = {
  3313. .ndo_open = slic_entry_open,
  3314. .ndo_stop = slic_entry_halt,
  3315. .ndo_start_xmit = slic_xmit_start,
  3316. .ndo_do_ioctl = slic_ioctl,
  3317. .ndo_set_mac_address = slic_mac_set_address,
  3318. .ndo_get_stats = slic_get_stats,
  3319. .ndo_set_multicast_list = slic_mcast_set_list,
  3320. .ndo_validate_addr = eth_validate_addr,
  3321. .ndo_change_mtu = eth_change_mtu,
  3322. };
  3323. static u32 slic_card_locate(struct adapter *adapter)
  3324. {
  3325. struct sliccard *card = slic_global.slic_card;
  3326. struct physcard *physcard = slic_global.phys_card;
  3327. ushort card_hostid;
  3328. u16 __iomem *hostid_reg;
  3329. uint i;
  3330. uint rdhostid_offset = 0;
  3331. switch (adapter->devid) {
  3332. case SLIC_2GB_DEVICE_ID:
  3333. rdhostid_offset = SLIC_RDHOSTID_2GB;
  3334. break;
  3335. case SLIC_1GB_DEVICE_ID:
  3336. rdhostid_offset = SLIC_RDHOSTID_1GB;
  3337. break;
  3338. default:
  3339. ASSERT(0);
  3340. break;
  3341. }
  3342. hostid_reg =
  3343. (u16 __iomem *) (((u8 __iomem *) (adapter->slic_regs)) +
  3344. rdhostid_offset);
  3345. /* read the 16 bit hostid from SRAM */
  3346. card_hostid = (ushort) readw(hostid_reg);
  3347. /* Initialize a new card structure if need be */
  3348. if (card_hostid == SLIC_HOSTID_DEFAULT) {
  3349. card = kzalloc(sizeof(struct sliccard), GFP_KERNEL);
  3350. if (card == NULL)
  3351. return -ENOMEM;
  3352. card->next = slic_global.slic_card;
  3353. slic_global.slic_card = card;
  3354. card->busnumber = adapter->busnumber;
  3355. card->slotnumber = adapter->slotnumber;
  3356. /* Find an available cardnum */
  3357. for (i = 0; i < SLIC_MAX_CARDS; i++) {
  3358. if (slic_global.cardnuminuse[i] == 0) {
  3359. slic_global.cardnuminuse[i] = 1;
  3360. card->cardnum = i;
  3361. break;
  3362. }
  3363. }
  3364. slic_global.num_slic_cards++;
  3365. slic_debug_card_create(card);
  3366. } else {
  3367. /* Card exists, find the card this adapter belongs to */
  3368. while (card) {
  3369. if (card->cardnum == card_hostid)
  3370. break;
  3371. card = card->next;
  3372. }
  3373. }
  3374. ASSERT(card);
  3375. if (!card)
  3376. return -ENXIO;
  3377. /* Put the adapter in the card's adapter list */
  3378. ASSERT(card->adapter[adapter->port] == NULL);
  3379. if (!card->adapter[adapter->port]) {
  3380. card->adapter[adapter->port] = adapter;
  3381. adapter->card = card;
  3382. }
  3383. card->card_size = 1; /* one port per *logical* card */
  3384. while (physcard) {
  3385. for (i = 0; i < SLIC_MAX_PORTS; i++) {
  3386. if (!physcard->adapter[i])
  3387. continue;
  3388. else
  3389. break;
  3390. }
  3391. ASSERT(i != SLIC_MAX_PORTS);
  3392. if (physcard->adapter[i]->slotnumber == adapter->slotnumber)
  3393. break;
  3394. physcard = physcard->next;
  3395. }
  3396. if (!physcard) {
  3397. /* no structure allocated for this physical card yet */
  3398. physcard = kzalloc(sizeof(struct physcard), GFP_ATOMIC);
  3399. ASSERT(physcard);
  3400. physcard->next = slic_global.phys_card;
  3401. slic_global.phys_card = physcard;
  3402. physcard->adapters_allocd = 1;
  3403. } else {
  3404. physcard->adapters_allocd++;
  3405. }
  3406. /* Note - this is ZERO relative */
  3407. adapter->physport = physcard->adapters_allocd - 1;
  3408. ASSERT(physcard->adapter[adapter->physport] == NULL);
  3409. physcard->adapter[adapter->physport] = adapter;
  3410. adapter->physcard = physcard;
  3411. return 0;
  3412. }
  3413. static int __devinit slic_entry_probe(struct pci_dev *pcidev,
  3414. const struct pci_device_id *pci_tbl_entry)
  3415. {
  3416. static int cards_found;
  3417. static int did_version;
  3418. int err = -ENODEV;
  3419. struct net_device *netdev;
  3420. struct adapter *adapter;
  3421. void __iomem *memmapped_ioaddr = NULL;
  3422. u32 status = 0;
  3423. ulong mmio_start = 0;
  3424. ulong mmio_len = 0;
  3425. struct sliccard *card = NULL;
  3426. int pci_using_dac = 0;
  3427. slic_global.dynamic_intagg = dynamic_intagg;
  3428. err = pci_enable_device(pcidev);
  3429. if (err)
  3430. return err;
  3431. if (slic_debug > 0 && did_version++ == 0) {
  3432. printk(KERN_DEBUG "%s\n", slic_banner);
  3433. printk(KERN_DEBUG "%s\n", slic_proc_version);
  3434. }
  3435. if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
  3436. pci_using_dac = 1;
  3437. if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
  3438. dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for "
  3439. "consistent allocations\n");
  3440. goto err_out_disable_pci;
  3441. }
  3442. } else if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
  3443. pci_using_dac = 0;
  3444. pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
  3445. } else {
  3446. dev_err(&pcidev->dev, "no usable DMA configuration\n");
  3447. goto err_out_disable_pci;
  3448. }
  3449. err = pci_request_regions(pcidev, DRV_NAME);
  3450. if (err) {
  3451. dev_err(&pcidev->dev, "can't obtain PCI resources\n");
  3452. goto err_out_disable_pci;
  3453. }
  3454. pci_set_master(pcidev);
  3455. netdev = alloc_etherdev(sizeof(struct adapter));
  3456. if (!netdev) {
  3457. err = -ENOMEM;
  3458. goto err_out_exit_slic_probe;
  3459. }
  3460. SET_NETDEV_DEV(netdev, &pcidev->dev);
  3461. pci_set_drvdata(pcidev, netdev);
  3462. adapter = netdev_priv(netdev);
  3463. adapter->netdev = netdev;
  3464. adapter->pcidev = pcidev;
  3465. if (pci_using_dac)
  3466. netdev->features |= NETIF_F_HIGHDMA;
  3467. mmio_start = pci_resource_start(pcidev, 0);
  3468. mmio_len = pci_resource_len(pcidev, 0);
  3469. /* memmapped_ioaddr = (u32)ioremap_nocache(mmio_start, mmio_len);*/
  3470. memmapped_ioaddr = ioremap(mmio_start, mmio_len);
  3471. if (!memmapped_ioaddr) {
  3472. dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n",
  3473. mmio_len, mmio_start);
  3474. goto err_out_free_netdev;
  3475. }
  3476. slic_config_pci(pcidev);
  3477. slic_init_driver();
  3478. slic_init_adapter(netdev,
  3479. pcidev, pci_tbl_entry, memmapped_ioaddr, cards_found);
  3480. status = slic_card_locate(adapter);
  3481. if (status) {
  3482. dev_err(&pcidev->dev, "cannot locate card\n");
  3483. goto err_out_free_mmio_region;
  3484. }
  3485. card = adapter->card;
  3486. if (!adapter->allocated) {
  3487. card->adapters_allocated++;
  3488. adapter->allocated = 1;
  3489. }
  3490. status = slic_card_init(card, adapter);
  3491. if (status != 0) {
  3492. card->state = CARD_FAIL;
  3493. adapter->state = ADAPT_FAIL;
  3494. adapter->linkstate = LINK_DOWN;
  3495. dev_err(&pcidev->dev, "FAILED status[%x]\n", status);
  3496. } else {
  3497. slic_adapter_set_hwaddr(adapter);
  3498. }
  3499. netdev->base_addr = (unsigned long)adapter->memorybase;
  3500. netdev->irq = adapter->irq;
  3501. netdev->netdev_ops = &slic_netdev_ops;
  3502. slic_debug_adapter_create(adapter);
  3503. strcpy(netdev->name, "eth%d");
  3504. err = register_netdev(netdev);
  3505. if (err) {
  3506. dev_err(&pcidev->dev, "Cannot register net device, aborting.\n");
  3507. goto err_out_unmap;
  3508. }
  3509. cards_found++;
  3510. return status;
  3511. err_out_unmap:
  3512. iounmap(memmapped_ioaddr);
  3513. err_out_free_mmio_region:
  3514. release_mem_region(mmio_start, mmio_len);
  3515. err_out_free_netdev:
  3516. free_netdev(netdev);
  3517. err_out_exit_slic_probe:
  3518. pci_release_regions(pcidev);
  3519. err_out_disable_pci:
  3520. pci_disable_device(pcidev);
  3521. return err;
  3522. }
  3523. static struct pci_driver slic_driver = {
  3524. .name = DRV_NAME,
  3525. .id_table = slic_pci_tbl,
  3526. .probe = slic_entry_probe,
  3527. .remove = __devexit_p(slic_entry_remove),
  3528. };
  3529. static int __init slic_module_init(void)
  3530. {
  3531. slic_init_driver();
  3532. if (debug >= 0 && slic_debug != debug)
  3533. printk(KERN_DEBUG KBUILD_MODNAME ": debug level is %d.\n",
  3534. debug);
  3535. if (debug >= 0)
  3536. slic_debug = debug;
  3537. return pci_register_driver(&slic_driver);
  3538. }
  3539. static void __exit slic_module_cleanup(void)
  3540. {
  3541. pci_unregister_driver(&slic_driver);
  3542. slic_debug_cleanup();
  3543. }
  3544. module_init(slic_module_init);
  3545. module_exit(slic_module_cleanup);