/drivers/atm/idt77252.c

https://bitbucket.org/cresqo/cm7-p500-kernel · C · 3848 lines · 3016 code · 692 blank · 140 comment · 456 complexity · e0b7d4123871e1c07ce850d5ba2a4253 MD5 · raw file

Large files are truncated click here to view the full file

  1. /*******************************************************************
  2. *
  3. * Copyright (c) 2000 ATecoM GmbH
  4. *
  5. * The author may be reached at ecd@atecom.com.
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License as published by the
  9. * Free Software Foundation; either version 2 of the License, or (at your
  10. * option) any later version.
  11. *
  12. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  13. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  14. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
  15. * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  16. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  17. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  18. * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  19. * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  20. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  21. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  22. *
  23. * You should have received a copy of the GNU General Public License along
  24. * with this program; if not, write to the Free Software Foundation, Inc.,
  25. * 675 Mass Ave, Cambridge, MA 02139, USA.
  26. *
  27. *******************************************************************/
  28. #include <linux/module.h>
  29. #include <linux/pci.h>
  30. #include <linux/poison.h>
  31. #include <linux/skbuff.h>
  32. #include <linux/kernel.h>
  33. #include <linux/vmalloc.h>
  34. #include <linux/netdevice.h>
  35. #include <linux/atmdev.h>
  36. #include <linux/atm.h>
  37. #include <linux/delay.h>
  38. #include <linux/init.h>
  39. #include <linux/bitops.h>
  40. #include <linux/wait.h>
  41. #include <linux/jiffies.h>
  42. #include <linux/mutex.h>
  43. #include <linux/slab.h>
  44. #include <asm/io.h>
  45. #include <asm/uaccess.h>
  46. #include <asm/atomic.h>
  47. #include <asm/byteorder.h>
  48. #ifdef CONFIG_ATM_IDT77252_USE_SUNI
  49. #include "suni.h"
  50. #endif /* CONFIG_ATM_IDT77252_USE_SUNI */
  51. #include "idt77252.h"
  52. #include "idt77252_tables.h"
  53. static unsigned int vpibits = 1;
  54. #define ATM_IDT77252_SEND_IDLE 1
  55. /*
  56. * Debug HACKs.
  57. */
  58. #define DEBUG_MODULE 1
  59. #undef HAVE_EEPROM /* does not work, yet. */
  60. #ifdef CONFIG_ATM_IDT77252_DEBUG
  61. static unsigned long debug = DBG_GENERAL;
  62. #endif
  63. #define SAR_RX_DELAY (SAR_CFG_RXINT_NODELAY)
  64. /*
  65. * SCQ Handling.
  66. */
  67. static struct scq_info *alloc_scq(struct idt77252_dev *, int);
  68. static void free_scq(struct idt77252_dev *, struct scq_info *);
  69. static int queue_skb(struct idt77252_dev *, struct vc_map *,
  70. struct sk_buff *, int oam);
  71. static void drain_scq(struct idt77252_dev *, struct vc_map *);
  72. static unsigned long get_free_scd(struct idt77252_dev *, struct vc_map *);
  73. static void fill_scd(struct idt77252_dev *, struct scq_info *, int);
  74. /*
  75. * FBQ Handling.
  76. */
  77. static int push_rx_skb(struct idt77252_dev *,
  78. struct sk_buff *, int queue);
  79. static void recycle_rx_skb(struct idt77252_dev *, struct sk_buff *);
  80. static void flush_rx_pool(struct idt77252_dev *, struct rx_pool *);
  81. static void recycle_rx_pool_skb(struct idt77252_dev *,
  82. struct rx_pool *);
  83. static void add_rx_skb(struct idt77252_dev *, int queue,
  84. unsigned int size, unsigned int count);
  85. /*
  86. * RSQ Handling.
  87. */
  88. static int init_rsq(struct idt77252_dev *);
  89. static void deinit_rsq(struct idt77252_dev *);
  90. static void idt77252_rx(struct idt77252_dev *);
  91. /*
  92. * TSQ handling.
  93. */
  94. static int init_tsq(struct idt77252_dev *);
  95. static void deinit_tsq(struct idt77252_dev *);
  96. static void idt77252_tx(struct idt77252_dev *);
  97. /*
  98. * ATM Interface.
  99. */
  100. static void idt77252_dev_close(struct atm_dev *dev);
  101. static int idt77252_open(struct atm_vcc *vcc);
  102. static void idt77252_close(struct atm_vcc *vcc);
  103. static int idt77252_send(struct atm_vcc *vcc, struct sk_buff *skb);
  104. static int idt77252_send_oam(struct atm_vcc *vcc, void *cell,
  105. int flags);
  106. static void idt77252_phy_put(struct atm_dev *dev, unsigned char value,
  107. unsigned long addr);
  108. static unsigned char idt77252_phy_get(struct atm_dev *dev, unsigned long addr);
  109. static int idt77252_change_qos(struct atm_vcc *vcc, struct atm_qos *qos,
  110. int flags);
  111. static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos,
  112. char *page);
  113. static void idt77252_softint(struct work_struct *work);
  114. static struct atmdev_ops idt77252_ops =
  115. {
  116. .dev_close = idt77252_dev_close,
  117. .open = idt77252_open,
  118. .close = idt77252_close,
  119. .send = idt77252_send,
  120. .send_oam = idt77252_send_oam,
  121. .phy_put = idt77252_phy_put,
  122. .phy_get = idt77252_phy_get,
  123. .change_qos = idt77252_change_qos,
  124. .proc_read = idt77252_proc_read,
  125. .owner = THIS_MODULE
  126. };
  127. static struct idt77252_dev *idt77252_chain = NULL;
  128. static unsigned int idt77252_sram_write_errors = 0;
  129. /*****************************************************************************/
  130. /* */
  131. /* I/O and Utility Bus */
  132. /* */
  133. /*****************************************************************************/
  134. static void
  135. waitfor_idle(struct idt77252_dev *card)
  136. {
  137. u32 stat;
  138. stat = readl(SAR_REG_STAT);
  139. while (stat & SAR_STAT_CMDBZ)
  140. stat = readl(SAR_REG_STAT);
  141. }
  142. static u32
  143. read_sram(struct idt77252_dev *card, unsigned long addr)
  144. {
  145. unsigned long flags;
  146. u32 value;
  147. spin_lock_irqsave(&card->cmd_lock, flags);
  148. writel(SAR_CMD_READ_SRAM | (addr << 2), SAR_REG_CMD);
  149. waitfor_idle(card);
  150. value = readl(SAR_REG_DR0);
  151. spin_unlock_irqrestore(&card->cmd_lock, flags);
  152. return value;
  153. }
  154. static void
  155. write_sram(struct idt77252_dev *card, unsigned long addr, u32 value)
  156. {
  157. unsigned long flags;
  158. if ((idt77252_sram_write_errors == 0) &&
  159. (((addr > card->tst[0] + card->tst_size - 2) &&
  160. (addr < card->tst[0] + card->tst_size)) ||
  161. ((addr > card->tst[1] + card->tst_size - 2) &&
  162. (addr < card->tst[1] + card->tst_size)))) {
  163. printk("%s: ERROR: TST JMP section at %08lx written: %08x\n",
  164. card->name, addr, value);
  165. }
  166. spin_lock_irqsave(&card->cmd_lock, flags);
  167. writel(value, SAR_REG_DR0);
  168. writel(SAR_CMD_WRITE_SRAM | (addr << 2), SAR_REG_CMD);
  169. waitfor_idle(card);
  170. spin_unlock_irqrestore(&card->cmd_lock, flags);
  171. }
  172. static u8
  173. read_utility(void *dev, unsigned long ubus_addr)
  174. {
  175. struct idt77252_dev *card = dev;
  176. unsigned long flags;
  177. u8 value;
  178. if (!card) {
  179. printk("Error: No such device.\n");
  180. return -1;
  181. }
  182. spin_lock_irqsave(&card->cmd_lock, flags);
  183. writel(SAR_CMD_READ_UTILITY + ubus_addr, SAR_REG_CMD);
  184. waitfor_idle(card);
  185. value = readl(SAR_REG_DR0);
  186. spin_unlock_irqrestore(&card->cmd_lock, flags);
  187. return value;
  188. }
  189. static void
  190. write_utility(void *dev, unsigned long ubus_addr, u8 value)
  191. {
  192. struct idt77252_dev *card = dev;
  193. unsigned long flags;
  194. if (!card) {
  195. printk("Error: No such device.\n");
  196. return;
  197. }
  198. spin_lock_irqsave(&card->cmd_lock, flags);
  199. writel((u32) value, SAR_REG_DR0);
  200. writel(SAR_CMD_WRITE_UTILITY + ubus_addr, SAR_REG_CMD);
  201. waitfor_idle(card);
  202. spin_unlock_irqrestore(&card->cmd_lock, flags);
  203. }
  204. #ifdef HAVE_EEPROM
  205. static u32 rdsrtab[] =
  206. {
  207. SAR_GP_EECS | SAR_GP_EESCLK,
  208. 0,
  209. SAR_GP_EESCLK, /* 0 */
  210. 0,
  211. SAR_GP_EESCLK, /* 0 */
  212. 0,
  213. SAR_GP_EESCLK, /* 0 */
  214. 0,
  215. SAR_GP_EESCLK, /* 0 */
  216. 0,
  217. SAR_GP_EESCLK, /* 0 */
  218. SAR_GP_EEDO,
  219. SAR_GP_EESCLK | SAR_GP_EEDO, /* 1 */
  220. 0,
  221. SAR_GP_EESCLK, /* 0 */
  222. SAR_GP_EEDO,
  223. SAR_GP_EESCLK | SAR_GP_EEDO /* 1 */
  224. };
  225. static u32 wrentab[] =
  226. {
  227. SAR_GP_EECS | SAR_GP_EESCLK,
  228. 0,
  229. SAR_GP_EESCLK, /* 0 */
  230. 0,
  231. SAR_GP_EESCLK, /* 0 */
  232. 0,
  233. SAR_GP_EESCLK, /* 0 */
  234. 0,
  235. SAR_GP_EESCLK, /* 0 */
  236. SAR_GP_EEDO,
  237. SAR_GP_EESCLK | SAR_GP_EEDO, /* 1 */
  238. SAR_GP_EEDO,
  239. SAR_GP_EESCLK | SAR_GP_EEDO, /* 1 */
  240. 0,
  241. SAR_GP_EESCLK, /* 0 */
  242. 0,
  243. SAR_GP_EESCLK /* 0 */
  244. };
  245. static u32 rdtab[] =
  246. {
  247. SAR_GP_EECS | SAR_GP_EESCLK,
  248. 0,
  249. SAR_GP_EESCLK, /* 0 */
  250. 0,
  251. SAR_GP_EESCLK, /* 0 */
  252. 0,
  253. SAR_GP_EESCLK, /* 0 */
  254. 0,
  255. SAR_GP_EESCLK, /* 0 */
  256. 0,
  257. SAR_GP_EESCLK, /* 0 */
  258. 0,
  259. SAR_GP_EESCLK, /* 0 */
  260. SAR_GP_EEDO,
  261. SAR_GP_EESCLK | SAR_GP_EEDO, /* 1 */
  262. SAR_GP_EEDO,
  263. SAR_GP_EESCLK | SAR_GP_EEDO /* 1 */
  264. };
  265. static u32 wrtab[] =
  266. {
  267. SAR_GP_EECS | SAR_GP_EESCLK,
  268. 0,
  269. SAR_GP_EESCLK, /* 0 */
  270. 0,
  271. SAR_GP_EESCLK, /* 0 */
  272. 0,
  273. SAR_GP_EESCLK, /* 0 */
  274. 0,
  275. SAR_GP_EESCLK, /* 0 */
  276. 0,
  277. SAR_GP_EESCLK, /* 0 */
  278. 0,
  279. SAR_GP_EESCLK, /* 0 */
  280. SAR_GP_EEDO,
  281. SAR_GP_EESCLK | SAR_GP_EEDO, /* 1 */
  282. 0,
  283. SAR_GP_EESCLK /* 0 */
  284. };
  285. static u32 clktab[] =
  286. {
  287. 0,
  288. SAR_GP_EESCLK,
  289. 0,
  290. SAR_GP_EESCLK,
  291. 0,
  292. SAR_GP_EESCLK,
  293. 0,
  294. SAR_GP_EESCLK,
  295. 0,
  296. SAR_GP_EESCLK,
  297. 0,
  298. SAR_GP_EESCLK,
  299. 0,
  300. SAR_GP_EESCLK,
  301. 0,
  302. SAR_GP_EESCLK,
  303. 0
  304. };
  305. static u32
  306. idt77252_read_gp(struct idt77252_dev *card)
  307. {
  308. u32 gp;
  309. gp = readl(SAR_REG_GP);
  310. #if 0
  311. printk("RD: %s\n", gp & SAR_GP_EEDI ? "1" : "0");
  312. #endif
  313. return gp;
  314. }
  315. static void
  316. idt77252_write_gp(struct idt77252_dev *card, u32 value)
  317. {
  318. unsigned long flags;
  319. #if 0
  320. printk("WR: %s %s %s\n", value & SAR_GP_EECS ? " " : "/CS",
  321. value & SAR_GP_EESCLK ? "HIGH" : "LOW ",
  322. value & SAR_GP_EEDO ? "1" : "0");
  323. #endif
  324. spin_lock_irqsave(&card->cmd_lock, flags);
  325. waitfor_idle(card);
  326. writel(value, SAR_REG_GP);
  327. spin_unlock_irqrestore(&card->cmd_lock, flags);
  328. }
  329. static u8
  330. idt77252_eeprom_read_status(struct idt77252_dev *card)
  331. {
  332. u8 byte;
  333. u32 gp;
  334. int i, j;
  335. gp = idt77252_read_gp(card) & ~(SAR_GP_EESCLK|SAR_GP_EECS|SAR_GP_EEDO);
  336. for (i = 0; i < ARRAY_SIZE(rdsrtab); i++) {
  337. idt77252_write_gp(card, gp | rdsrtab[i]);
  338. udelay(5);
  339. }
  340. idt77252_write_gp(card, gp | SAR_GP_EECS);
  341. udelay(5);
  342. byte = 0;
  343. for (i = 0, j = 0; i < 8; i++) {
  344. byte <<= 1;
  345. idt77252_write_gp(card, gp | clktab[j++]);
  346. udelay(5);
  347. byte |= idt77252_read_gp(card) & SAR_GP_EEDI ? 1 : 0;
  348. idt77252_write_gp(card, gp | clktab[j++]);
  349. udelay(5);
  350. }
  351. idt77252_write_gp(card, gp | SAR_GP_EECS);
  352. udelay(5);
  353. return byte;
  354. }
  355. static u8
  356. idt77252_eeprom_read_byte(struct idt77252_dev *card, u8 offset)
  357. {
  358. u8 byte;
  359. u32 gp;
  360. int i, j;
  361. gp = idt77252_read_gp(card) & ~(SAR_GP_EESCLK|SAR_GP_EECS|SAR_GP_EEDO);
  362. for (i = 0; i < ARRAY_SIZE(rdtab); i++) {
  363. idt77252_write_gp(card, gp | rdtab[i]);
  364. udelay(5);
  365. }
  366. idt77252_write_gp(card, gp | SAR_GP_EECS);
  367. udelay(5);
  368. for (i = 0, j = 0; i < 8; i++) {
  369. idt77252_write_gp(card, gp | clktab[j++] |
  370. (offset & 1 ? SAR_GP_EEDO : 0));
  371. udelay(5);
  372. idt77252_write_gp(card, gp | clktab[j++] |
  373. (offset & 1 ? SAR_GP_EEDO : 0));
  374. udelay(5);
  375. offset >>= 1;
  376. }
  377. idt77252_write_gp(card, gp | SAR_GP_EECS);
  378. udelay(5);
  379. byte = 0;
  380. for (i = 0, j = 0; i < 8; i++) {
  381. byte <<= 1;
  382. idt77252_write_gp(card, gp | clktab[j++]);
  383. udelay(5);
  384. byte |= idt77252_read_gp(card) & SAR_GP_EEDI ? 1 : 0;
  385. idt77252_write_gp(card, gp | clktab[j++]);
  386. udelay(5);
  387. }
  388. idt77252_write_gp(card, gp | SAR_GP_EECS);
  389. udelay(5);
  390. return byte;
  391. }
  392. static void
  393. idt77252_eeprom_write_byte(struct idt77252_dev *card, u8 offset, u8 data)
  394. {
  395. u32 gp;
  396. int i, j;
  397. gp = idt77252_read_gp(card) & ~(SAR_GP_EESCLK|SAR_GP_EECS|SAR_GP_EEDO);
  398. for (i = 0; i < ARRAY_SIZE(wrentab); i++) {
  399. idt77252_write_gp(card, gp | wrentab[i]);
  400. udelay(5);
  401. }
  402. idt77252_write_gp(card, gp | SAR_GP_EECS);
  403. udelay(5);
  404. for (i = 0; i < ARRAY_SIZE(wrtab); i++) {
  405. idt77252_write_gp(card, gp | wrtab[i]);
  406. udelay(5);
  407. }
  408. idt77252_write_gp(card, gp | SAR_GP_EECS);
  409. udelay(5);
  410. for (i = 0, j = 0; i < 8; i++) {
  411. idt77252_write_gp(card, gp | clktab[j++] |
  412. (offset & 1 ? SAR_GP_EEDO : 0));
  413. udelay(5);
  414. idt77252_write_gp(card, gp | clktab[j++] |
  415. (offset & 1 ? SAR_GP_EEDO : 0));
  416. udelay(5);
  417. offset >>= 1;
  418. }
  419. idt77252_write_gp(card, gp | SAR_GP_EECS);
  420. udelay(5);
  421. for (i = 0, j = 0; i < 8; i++) {
  422. idt77252_write_gp(card, gp | clktab[j++] |
  423. (data & 1 ? SAR_GP_EEDO : 0));
  424. udelay(5);
  425. idt77252_write_gp(card, gp | clktab[j++] |
  426. (data & 1 ? SAR_GP_EEDO : 0));
  427. udelay(5);
  428. data >>= 1;
  429. }
  430. idt77252_write_gp(card, gp | SAR_GP_EECS);
  431. udelay(5);
  432. }
  433. static void
  434. idt77252_eeprom_init(struct idt77252_dev *card)
  435. {
  436. u32 gp;
  437. gp = idt77252_read_gp(card) & ~(SAR_GP_EESCLK|SAR_GP_EECS|SAR_GP_EEDO);
  438. idt77252_write_gp(card, gp | SAR_GP_EECS | SAR_GP_EESCLK);
  439. udelay(5);
  440. idt77252_write_gp(card, gp | SAR_GP_EECS);
  441. udelay(5);
  442. idt77252_write_gp(card, gp | SAR_GP_EECS | SAR_GP_EESCLK);
  443. udelay(5);
  444. idt77252_write_gp(card, gp | SAR_GP_EECS);
  445. udelay(5);
  446. }
  447. #endif /* HAVE_EEPROM */
  448. #ifdef CONFIG_ATM_IDT77252_DEBUG
  449. static void
  450. dump_tct(struct idt77252_dev *card, int index)
  451. {
  452. unsigned long tct;
  453. int i;
  454. tct = (unsigned long) (card->tct_base + index * SAR_SRAM_TCT_SIZE);
  455. printk("%s: TCT %x:", card->name, index);
  456. for (i = 0; i < 8; i++) {
  457. printk(" %08x", read_sram(card, tct + i));
  458. }
  459. printk("\n");
  460. }
  461. static void
  462. idt77252_tx_dump(struct idt77252_dev *card)
  463. {
  464. struct atm_vcc *vcc;
  465. struct vc_map *vc;
  466. int i;
  467. printk("%s\n", __func__);
  468. for (i = 0; i < card->tct_size; i++) {
  469. vc = card->vcs[i];
  470. if (!vc)
  471. continue;
  472. vcc = NULL;
  473. if (vc->rx_vcc)
  474. vcc = vc->rx_vcc;
  475. else if (vc->tx_vcc)
  476. vcc = vc->tx_vcc;
  477. if (!vcc)
  478. continue;
  479. printk("%s: Connection %d:\n", card->name, vc->index);
  480. dump_tct(card, vc->index);
  481. }
  482. }
  483. #endif
  484. /*****************************************************************************/
  485. /* */
  486. /* SCQ Handling */
  487. /* */
  488. /*****************************************************************************/
  489. static int
  490. sb_pool_add(struct idt77252_dev *card, struct sk_buff *skb, int queue)
  491. {
  492. struct sb_pool *pool = &card->sbpool[queue];
  493. int index;
  494. index = pool->index;
  495. while (pool->skb[index]) {
  496. index = (index + 1) & FBQ_MASK;
  497. if (index == pool->index)
  498. return -ENOBUFS;
  499. }
  500. pool->skb[index] = skb;
  501. IDT77252_PRV_POOL(skb) = POOL_HANDLE(queue, index);
  502. pool->index = (index + 1) & FBQ_MASK;
  503. return 0;
  504. }
  505. static void
  506. sb_pool_remove(struct idt77252_dev *card, struct sk_buff *skb)
  507. {
  508. unsigned int queue, index;
  509. u32 handle;
  510. handle = IDT77252_PRV_POOL(skb);
  511. queue = POOL_QUEUE(handle);
  512. if (queue > 3)
  513. return;
  514. index = POOL_INDEX(handle);
  515. if (index > FBQ_SIZE - 1)
  516. return;
  517. card->sbpool[queue].skb[index] = NULL;
  518. }
  519. static struct sk_buff *
  520. sb_pool_skb(struct idt77252_dev *card, u32 handle)
  521. {
  522. unsigned int queue, index;
  523. queue = POOL_QUEUE(handle);
  524. if (queue > 3)
  525. return NULL;
  526. index = POOL_INDEX(handle);
  527. if (index > FBQ_SIZE - 1)
  528. return NULL;
  529. return card->sbpool[queue].skb[index];
  530. }
  531. static struct scq_info *
  532. alloc_scq(struct idt77252_dev *card, int class)
  533. {
  534. struct scq_info *scq;
  535. scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
  536. if (!scq)
  537. return NULL;
  538. scq->base = pci_alloc_consistent(card->pcidev, SCQ_SIZE,
  539. &scq->paddr);
  540. if (scq->base == NULL) {
  541. kfree(scq);
  542. return NULL;
  543. }
  544. memset(scq->base, 0, SCQ_SIZE);
  545. scq->next = scq->base;
  546. scq->last = scq->base + (SCQ_ENTRIES - 1);
  547. atomic_set(&scq->used, 0);
  548. spin_lock_init(&scq->lock);
  549. spin_lock_init(&scq->skblock);
  550. skb_queue_head_init(&scq->transmit);
  551. skb_queue_head_init(&scq->pending);
  552. TXPRINTK("idt77252: SCQ: base 0x%p, next 0x%p, last 0x%p, paddr %08llx\n",
  553. scq->base, scq->next, scq->last, (unsigned long long)scq->paddr);
  554. return scq;
  555. }
  556. static void
  557. free_scq(struct idt77252_dev *card, struct scq_info *scq)
  558. {
  559. struct sk_buff *skb;
  560. struct atm_vcc *vcc;
  561. pci_free_consistent(card->pcidev, SCQ_SIZE,
  562. scq->base, scq->paddr);
  563. while ((skb = skb_dequeue(&scq->transmit))) {
  564. pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
  565. skb->len, PCI_DMA_TODEVICE);
  566. vcc = ATM_SKB(skb)->vcc;
  567. if (vcc->pop)
  568. vcc->pop(vcc, skb);
  569. else
  570. dev_kfree_skb(skb);
  571. }
  572. while ((skb = skb_dequeue(&scq->pending))) {
  573. pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
  574. skb->len, PCI_DMA_TODEVICE);
  575. vcc = ATM_SKB(skb)->vcc;
  576. if (vcc->pop)
  577. vcc->pop(vcc, skb);
  578. else
  579. dev_kfree_skb(skb);
  580. }
  581. kfree(scq);
  582. }
  583. static int
  584. push_on_scq(struct idt77252_dev *card, struct vc_map *vc, struct sk_buff *skb)
  585. {
  586. struct scq_info *scq = vc->scq;
  587. unsigned long flags;
  588. struct scqe *tbd;
  589. int entries;
  590. TXPRINTK("%s: SCQ: next 0x%p\n", card->name, scq->next);
  591. atomic_inc(&scq->used);
  592. entries = atomic_read(&scq->used);
  593. if (entries > (SCQ_ENTRIES - 1)) {
  594. atomic_dec(&scq->used);
  595. goto out;
  596. }
  597. skb_queue_tail(&scq->transmit, skb);
  598. spin_lock_irqsave(&vc->lock, flags);
  599. if (vc->estimator) {
  600. struct atm_vcc *vcc = vc->tx_vcc;
  601. struct sock *sk = sk_atm(vcc);
  602. vc->estimator->cells += (skb->len + 47) / 48;
  603. if (atomic_read(&sk->sk_wmem_alloc) >
  604. (sk->sk_sndbuf >> 1)) {
  605. u32 cps = vc->estimator->maxcps;
  606. vc->estimator->cps = cps;
  607. vc->estimator->avcps = cps << 5;
  608. if (vc->lacr < vc->init_er) {
  609. vc->lacr = vc->init_er;
  610. writel(TCMDQ_LACR | (vc->lacr << 16) |
  611. vc->index, SAR_REG_TCMDQ);
  612. }
  613. }
  614. }
  615. spin_unlock_irqrestore(&vc->lock, flags);
  616. tbd = &IDT77252_PRV_TBD(skb);
  617. spin_lock_irqsave(&scq->lock, flags);
  618. scq->next->word_1 = cpu_to_le32(tbd->word_1 |
  619. SAR_TBD_TSIF | SAR_TBD_GTSI);
  620. scq->next->word_2 = cpu_to_le32(tbd->word_2);
  621. scq->next->word_3 = cpu_to_le32(tbd->word_3);
  622. scq->next->word_4 = cpu_to_le32(tbd->word_4);
  623. if (scq->next == scq->last)
  624. scq->next = scq->base;
  625. else
  626. scq->next++;
  627. write_sram(card, scq->scd,
  628. scq->paddr +
  629. (u32)((unsigned long)scq->next - (unsigned long)scq->base));
  630. spin_unlock_irqrestore(&scq->lock, flags);
  631. scq->trans_start = jiffies;
  632. if (test_and_clear_bit(VCF_IDLE, &vc->flags)) {
  633. writel(TCMDQ_START_LACR | (vc->lacr << 16) | vc->index,
  634. SAR_REG_TCMDQ);
  635. }
  636. TXPRINTK("%d entries in SCQ used (push).\n", atomic_read(&scq->used));
  637. XPRINTK("%s: SCQ (after push %2d) head = 0x%x, next = 0x%p.\n",
  638. card->name, atomic_read(&scq->used),
  639. read_sram(card, scq->scd + 1), scq->next);
  640. return 0;
  641. out:
  642. if (time_after(jiffies, scq->trans_start + HZ)) {
  643. printk("%s: Error pushing TBD for %d.%d\n",
  644. card->name, vc->tx_vcc->vpi, vc->tx_vcc->vci);
  645. #ifdef CONFIG_ATM_IDT77252_DEBUG
  646. idt77252_tx_dump(card);
  647. #endif
  648. scq->trans_start = jiffies;
  649. }
  650. return -ENOBUFS;
  651. }
  652. static void
  653. drain_scq(struct idt77252_dev *card, struct vc_map *vc)
  654. {
  655. struct scq_info *scq = vc->scq;
  656. struct sk_buff *skb;
  657. struct atm_vcc *vcc;
  658. TXPRINTK("%s: SCQ (before drain %2d) next = 0x%p.\n",
  659. card->name, atomic_read(&scq->used), scq->next);
  660. skb = skb_dequeue(&scq->transmit);
  661. if (skb) {
  662. TXPRINTK("%s: freeing skb at %p.\n", card->name, skb);
  663. pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
  664. skb->len, PCI_DMA_TODEVICE);
  665. vcc = ATM_SKB(skb)->vcc;
  666. if (vcc->pop)
  667. vcc->pop(vcc, skb);
  668. else
  669. dev_kfree_skb(skb);
  670. atomic_inc(&vcc->stats->tx);
  671. }
  672. atomic_dec(&scq->used);
  673. spin_lock(&scq->skblock);
  674. while ((skb = skb_dequeue(&scq->pending))) {
  675. if (push_on_scq(card, vc, skb)) {
  676. skb_queue_head(&vc->scq->pending, skb);
  677. break;
  678. }
  679. }
  680. spin_unlock(&scq->skblock);
  681. }
  682. static int
  683. queue_skb(struct idt77252_dev *card, struct vc_map *vc,
  684. struct sk_buff *skb, int oam)
  685. {
  686. struct atm_vcc *vcc;
  687. struct scqe *tbd;
  688. unsigned long flags;
  689. int error;
  690. int aal;
  691. if (skb->len == 0) {
  692. printk("%s: invalid skb->len (%d)\n", card->name, skb->len);
  693. return -EINVAL;
  694. }
  695. TXPRINTK("%s: Sending %d bytes of data.\n",
  696. card->name, skb->len);
  697. tbd = &IDT77252_PRV_TBD(skb);
  698. vcc = ATM_SKB(skb)->vcc;
  699. IDT77252_PRV_PADDR(skb) = pci_map_single(card->pcidev, skb->data,
  700. skb->len, PCI_DMA_TODEVICE);
  701. error = -EINVAL;
  702. if (oam) {
  703. if (skb->len != 52)
  704. goto errout;
  705. tbd->word_1 = SAR_TBD_OAM | ATM_CELL_PAYLOAD | SAR_TBD_EPDU;
  706. tbd->word_2 = IDT77252_PRV_PADDR(skb) + 4;
  707. tbd->word_3 = 0x00000000;
  708. tbd->word_4 = (skb->data[0] << 24) | (skb->data[1] << 16) |
  709. (skb->data[2] << 8) | (skb->data[3] << 0);
  710. if (test_bit(VCF_RSV, &vc->flags))
  711. vc = card->vcs[0];
  712. goto done;
  713. }
  714. if (test_bit(VCF_RSV, &vc->flags)) {
  715. printk("%s: Trying to transmit on reserved VC\n", card->name);
  716. goto errout;
  717. }
  718. aal = vcc->qos.aal;
  719. switch (aal) {
  720. case ATM_AAL0:
  721. case ATM_AAL34:
  722. if (skb->len > 52)
  723. goto errout;
  724. if (aal == ATM_AAL0)
  725. tbd->word_1 = SAR_TBD_EPDU | SAR_TBD_AAL0 |
  726. ATM_CELL_PAYLOAD;
  727. else
  728. tbd->word_1 = SAR_TBD_EPDU | SAR_TBD_AAL34 |
  729. ATM_CELL_PAYLOAD;
  730. tbd->word_2 = IDT77252_PRV_PADDR(skb) + 4;
  731. tbd->word_3 = 0x00000000;
  732. tbd->word_4 = (skb->data[0] << 24) | (skb->data[1] << 16) |
  733. (skb->data[2] << 8) | (skb->data[3] << 0);
  734. break;
  735. case ATM_AAL5:
  736. tbd->word_1 = SAR_TBD_EPDU | SAR_TBD_AAL5 | skb->len;
  737. tbd->word_2 = IDT77252_PRV_PADDR(skb);
  738. tbd->word_3 = skb->len;
  739. tbd->word_4 = (vcc->vpi << SAR_TBD_VPI_SHIFT) |
  740. (vcc->vci << SAR_TBD_VCI_SHIFT);
  741. break;
  742. case ATM_AAL1:
  743. case ATM_AAL2:
  744. default:
  745. printk("%s: Traffic type not supported.\n", card->name);
  746. error = -EPROTONOSUPPORT;
  747. goto errout;
  748. }
  749. done:
  750. spin_lock_irqsave(&vc->scq->skblock, flags);
  751. skb_queue_tail(&vc->scq->pending, skb);
  752. while ((skb = skb_dequeue(&vc->scq->pending))) {
  753. if (push_on_scq(card, vc, skb)) {
  754. skb_queue_head(&vc->scq->pending, skb);
  755. break;
  756. }
  757. }
  758. spin_unlock_irqrestore(&vc->scq->skblock, flags);
  759. return 0;
  760. errout:
  761. pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
  762. skb->len, PCI_DMA_TODEVICE);
  763. return error;
  764. }
  765. static unsigned long
  766. get_free_scd(struct idt77252_dev *card, struct vc_map *vc)
  767. {
  768. int i;
  769. for (i = 0; i < card->scd_size; i++) {
  770. if (!card->scd2vc[i]) {
  771. card->scd2vc[i] = vc;
  772. vc->scd_index = i;
  773. return card->scd_base + i * SAR_SRAM_SCD_SIZE;
  774. }
  775. }
  776. return 0;
  777. }
  778. static void
  779. fill_scd(struct idt77252_dev *card, struct scq_info *scq, int class)
  780. {
  781. write_sram(card, scq->scd, scq->paddr);
  782. write_sram(card, scq->scd + 1, 0x00000000);
  783. write_sram(card, scq->scd + 2, 0xffffffff);
  784. write_sram(card, scq->scd + 3, 0x00000000);
  785. }
  786. static void
  787. clear_scd(struct idt77252_dev *card, struct scq_info *scq, int class)
  788. {
  789. return;
  790. }
  791. /*****************************************************************************/
  792. /* */
  793. /* RSQ Handling */
  794. /* */
  795. /*****************************************************************************/
  796. static int
  797. init_rsq(struct idt77252_dev *card)
  798. {
  799. struct rsq_entry *rsqe;
  800. card->rsq.base = pci_alloc_consistent(card->pcidev, RSQSIZE,
  801. &card->rsq.paddr);
  802. if (card->rsq.base == NULL) {
  803. printk("%s: can't allocate RSQ.\n", card->name);
  804. return -1;
  805. }
  806. memset(card->rsq.base, 0, RSQSIZE);
  807. card->rsq.last = card->rsq.base + RSQ_NUM_ENTRIES - 1;
  808. card->rsq.next = card->rsq.last;
  809. for (rsqe = card->rsq.base; rsqe <= card->rsq.last; rsqe++)
  810. rsqe->word_4 = 0;
  811. writel((unsigned long) card->rsq.last - (unsigned long) card->rsq.base,
  812. SAR_REG_RSQH);
  813. writel(card->rsq.paddr, SAR_REG_RSQB);
  814. IPRINTK("%s: RSQ base at 0x%lx (0x%x).\n", card->name,
  815. (unsigned long) card->rsq.base,
  816. readl(SAR_REG_RSQB));
  817. IPRINTK("%s: RSQ head = 0x%x, base = 0x%x, tail = 0x%x.\n",
  818. card->name,
  819. readl(SAR_REG_RSQH),
  820. readl(SAR_REG_RSQB),
  821. readl(SAR_REG_RSQT));
  822. return 0;
  823. }
  824. static void
  825. deinit_rsq(struct idt77252_dev *card)
  826. {
  827. pci_free_consistent(card->pcidev, RSQSIZE,
  828. card->rsq.base, card->rsq.paddr);
  829. }
  830. static void
  831. dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
  832. {
  833. struct atm_vcc *vcc;
  834. struct sk_buff *skb;
  835. struct rx_pool *rpp;
  836. struct vc_map *vc;
  837. u32 header, vpi, vci;
  838. u32 stat;
  839. int i;
  840. stat = le32_to_cpu(rsqe->word_4);
  841. if (stat & SAR_RSQE_IDLE) {
  842. RXPRINTK("%s: message about inactive connection.\n",
  843. card->name);
  844. return;
  845. }
  846. skb = sb_pool_skb(card, le32_to_cpu(rsqe->word_2));
  847. if (skb == NULL) {
  848. printk("%s: NULL skb in %s, rsqe: %08x %08x %08x %08x\n",
  849. card->name, __func__,
  850. le32_to_cpu(rsqe->word_1), le32_to_cpu(rsqe->word_2),
  851. le32_to_cpu(rsqe->word_3), le32_to_cpu(rsqe->word_4));
  852. return;
  853. }
  854. header = le32_to_cpu(rsqe->word_1);
  855. vpi = (header >> 16) & 0x00ff;
  856. vci = (header >> 0) & 0xffff;
  857. RXPRINTK("%s: SDU for %d.%d received in buffer 0x%p (data 0x%p).\n",
  858. card->name, vpi, vci, skb, skb->data);
  859. if ((vpi >= (1 << card->vpibits)) || (vci != (vci & card->vcimask))) {
  860. printk("%s: SDU received for out-of-range vc %u.%u\n",
  861. card->name, vpi, vci);
  862. recycle_rx_skb(card, skb);
  863. return;
  864. }
  865. vc = card->vcs[VPCI2VC(card, vpi, vci)];
  866. if (!vc || !test_bit(VCF_RX, &vc->flags)) {
  867. printk("%s: SDU received on non RX vc %u.%u\n",
  868. card->name, vpi, vci);
  869. recycle_rx_skb(card, skb);
  870. return;
  871. }
  872. vcc = vc->rx_vcc;
  873. pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(skb),
  874. skb_end_pointer(skb) - skb->data,
  875. PCI_DMA_FROMDEVICE);
  876. if ((vcc->qos.aal == ATM_AAL0) ||
  877. (vcc->qos.aal == ATM_AAL34)) {
  878. struct sk_buff *sb;
  879. unsigned char *cell;
  880. u32 aal0;
  881. cell = skb->data;
  882. for (i = (stat & SAR_RSQE_CELLCNT); i; i--) {
  883. if ((sb = dev_alloc_skb(64)) == NULL) {
  884. printk("%s: Can't allocate buffers for aal0.\n",
  885. card->name);
  886. atomic_add(i, &vcc->stats->rx_drop);
  887. break;
  888. }
  889. if (!atm_charge(vcc, sb->truesize)) {
  890. RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
  891. card->name);
  892. atomic_add(i - 1, &vcc->stats->rx_drop);
  893. dev_kfree_skb(sb);
  894. break;
  895. }
  896. aal0 = (vpi << ATM_HDR_VPI_SHIFT) |
  897. (vci << ATM_HDR_VCI_SHIFT);
  898. aal0 |= (stat & SAR_RSQE_EPDU) ? 0x00000002 : 0;
  899. aal0 |= (stat & SAR_RSQE_CLP) ? 0x00000001 : 0;
  900. *((u32 *) sb->data) = aal0;
  901. skb_put(sb, sizeof(u32));
  902. memcpy(skb_put(sb, ATM_CELL_PAYLOAD),
  903. cell, ATM_CELL_PAYLOAD);
  904. ATM_SKB(sb)->vcc = vcc;
  905. __net_timestamp(sb);
  906. vcc->push(vcc, sb);
  907. atomic_inc(&vcc->stats->rx);
  908. cell += ATM_CELL_PAYLOAD;
  909. }
  910. recycle_rx_skb(card, skb);
  911. return;
  912. }
  913. if (vcc->qos.aal != ATM_AAL5) {
  914. printk("%s: Unexpected AAL type in dequeue_rx(): %d.\n",
  915. card->name, vcc->qos.aal);
  916. recycle_rx_skb(card, skb);
  917. return;
  918. }
  919. skb->len = (stat & SAR_RSQE_CELLCNT) * ATM_CELL_PAYLOAD;
  920. rpp = &vc->rcv.rx_pool;
  921. __skb_queue_tail(&rpp->queue, skb);
  922. rpp->len += skb->len;
  923. if (stat & SAR_RSQE_EPDU) {
  924. unsigned char *l1l2;
  925. unsigned int len;
  926. l1l2 = (unsigned char *) ((unsigned long) skb->data + skb->len - 6);
  927. len = (l1l2[0] << 8) | l1l2[1];
  928. len = len ? len : 0x10000;
  929. RXPRINTK("%s: PDU has %d bytes.\n", card->name, len);
  930. if ((len + 8 > rpp->len) || (len + (47 + 8) < rpp->len)) {
  931. RXPRINTK("%s: AAL5 PDU size mismatch: %d != %d. "
  932. "(CDC: %08x)\n",
  933. card->name, len, rpp->len, readl(SAR_REG_CDC));
  934. recycle_rx_pool_skb(card, rpp);
  935. atomic_inc(&vcc->stats->rx_err);
  936. return;
  937. }
  938. if (stat & SAR_RSQE_CRC) {
  939. RXPRINTK("%s: AAL5 CRC error.\n", card->name);
  940. recycle_rx_pool_skb(card, rpp);
  941. atomic_inc(&vcc->stats->rx_err);
  942. return;
  943. }
  944. if (skb_queue_len(&rpp->queue) > 1) {
  945. struct sk_buff *sb;
  946. skb = dev_alloc_skb(rpp->len);
  947. if (!skb) {
  948. RXPRINTK("%s: Can't alloc RX skb.\n",
  949. card->name);
  950. recycle_rx_pool_skb(card, rpp);
  951. atomic_inc(&vcc->stats->rx_err);
  952. return;
  953. }
  954. if (!atm_charge(vcc, skb->truesize)) {
  955. recycle_rx_pool_skb(card, rpp);
  956. dev_kfree_skb(skb);
  957. return;
  958. }
  959. skb_queue_walk(&rpp->queue, sb)
  960. memcpy(skb_put(skb, sb->len),
  961. sb->data, sb->len);
  962. recycle_rx_pool_skb(card, rpp);
  963. skb_trim(skb, len);
  964. ATM_SKB(skb)->vcc = vcc;
  965. __net_timestamp(skb);
  966. vcc->push(vcc, skb);
  967. atomic_inc(&vcc->stats->rx);
  968. return;
  969. }
  970. flush_rx_pool(card, rpp);
  971. if (!atm_charge(vcc, skb->truesize)) {
  972. recycle_rx_skb(card, skb);
  973. return;
  974. }
  975. pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
  976. skb_end_pointer(skb) - skb->data,
  977. PCI_DMA_FROMDEVICE);
  978. sb_pool_remove(card, skb);
  979. skb_trim(skb, len);
  980. ATM_SKB(skb)->vcc = vcc;
  981. __net_timestamp(skb);
  982. vcc->push(vcc, skb);
  983. atomic_inc(&vcc->stats->rx);
  984. if (skb->truesize > SAR_FB_SIZE_3)
  985. add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
  986. else if (skb->truesize > SAR_FB_SIZE_2)
  987. add_rx_skb(card, 2, SAR_FB_SIZE_2, 1);
  988. else if (skb->truesize > SAR_FB_SIZE_1)
  989. add_rx_skb(card, 1, SAR_FB_SIZE_1, 1);
  990. else
  991. add_rx_skb(card, 0, SAR_FB_SIZE_0, 1);
  992. return;
  993. }
  994. }
  995. static void
  996. idt77252_rx(struct idt77252_dev *card)
  997. {
  998. struct rsq_entry *rsqe;
  999. if (card->rsq.next == card->rsq.last)
  1000. rsqe = card->rsq.base;
  1001. else
  1002. rsqe = card->rsq.next + 1;
  1003. if (!(le32_to_cpu(rsqe->word_4) & SAR_RSQE_VALID)) {
  1004. RXPRINTK("%s: no entry in RSQ.\n", card->name);
  1005. return;
  1006. }
  1007. do {
  1008. dequeue_rx(card, rsqe);
  1009. rsqe->word_4 = 0;
  1010. card->rsq.next = rsqe;
  1011. if (card->rsq.next == card->rsq.last)
  1012. rsqe = card->rsq.base;
  1013. else
  1014. rsqe = card->rsq.next + 1;
  1015. } while (le32_to_cpu(rsqe->word_4) & SAR_RSQE_VALID);
  1016. writel((unsigned long) card->rsq.next - (unsigned long) card->rsq.base,
  1017. SAR_REG_RSQH);
  1018. }
  1019. static void
  1020. idt77252_rx_raw(struct idt77252_dev *card)
  1021. {
  1022. struct sk_buff *queue;
  1023. u32 head, tail;
  1024. struct atm_vcc *vcc;
  1025. struct vc_map *vc;
  1026. struct sk_buff *sb;
  1027. if (card->raw_cell_head == NULL) {
  1028. u32 handle = le32_to_cpu(*(card->raw_cell_hnd + 1));
  1029. card->raw_cell_head = sb_pool_skb(card, handle);
  1030. }
  1031. queue = card->raw_cell_head;
  1032. if (!queue)
  1033. return;
  1034. head = IDT77252_PRV_PADDR(queue) + (queue->data - queue->head - 16);
  1035. tail = readl(SAR_REG_RAWCT);
  1036. pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
  1037. skb_end_pointer(queue) - queue->head - 16,
  1038. PCI_DMA_FROMDEVICE);
  1039. while (head != tail) {
  1040. unsigned int vpi, vci, pti;
  1041. u32 header;
  1042. header = le32_to_cpu(*(u32 *) &queue->data[0]);
  1043. vpi = (header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
  1044. vci = (header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
  1045. pti = (header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
  1046. #ifdef CONFIG_ATM_IDT77252_DEBUG
  1047. if (debug & DBG_RAW_CELL) {
  1048. int i;
  1049. printk("%s: raw cell %x.%02x.%04x.%x.%x\n",
  1050. card->name, (header >> 28) & 0x000f,
  1051. (header >> 20) & 0x00ff,
  1052. (header >> 4) & 0xffff,
  1053. (header >> 1) & 0x0007,
  1054. (header >> 0) & 0x0001);
  1055. for (i = 16; i < 64; i++)
  1056. printk(" %02x", queue->data[i]);
  1057. printk("\n");
  1058. }
  1059. #endif
  1060. if (vpi >= (1<<card->vpibits) || vci >= (1<<card->vcibits)) {
  1061. RPRINTK("%s: SDU received for out-of-range vc %u.%u\n",
  1062. card->name, vpi, vci);
  1063. goto drop;
  1064. }
  1065. vc = card->vcs[VPCI2VC(card, vpi, vci)];
  1066. if (!vc || !test_bit(VCF_RX, &vc->flags)) {
  1067. RPRINTK("%s: SDU received on non RX vc %u.%u\n",
  1068. card->name, vpi, vci);
  1069. goto drop;
  1070. }
  1071. vcc = vc->rx_vcc;
  1072. if (vcc->qos.aal != ATM_AAL0) {
  1073. RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
  1074. card->name, vpi, vci);
  1075. atomic_inc(&vcc->stats->rx_drop);
  1076. goto drop;
  1077. }
  1078. if ((sb = dev_alloc_skb(64)) == NULL) {
  1079. printk("%s: Can't allocate buffers for AAL0.\n",
  1080. card->name);
  1081. atomic_inc(&vcc->stats->rx_err);
  1082. goto drop;
  1083. }
  1084. if (!atm_charge(vcc, sb->truesize)) {
  1085. RXPRINTK("%s: atm_charge() dropped AAL0 packets.\n",
  1086. card->name);
  1087. dev_kfree_skb(sb);
  1088. goto drop;
  1089. }
  1090. *((u32 *) sb->data) = header;
  1091. skb_put(sb, sizeof(u32));
  1092. memcpy(skb_put(sb, ATM_CELL_PAYLOAD), &(queue->data[16]),
  1093. ATM_CELL_PAYLOAD);
  1094. ATM_SKB(sb)->vcc = vcc;
  1095. __net_timestamp(sb);
  1096. vcc->push(vcc, sb);
  1097. atomic_inc(&vcc->stats->rx);
  1098. drop:
  1099. skb_pull(queue, 64);
  1100. head = IDT77252_PRV_PADDR(queue)
  1101. + (queue->data - queue->head - 16);
  1102. if (queue->len < 128) {
  1103. struct sk_buff *next;
  1104. u32 handle;
  1105. head = le32_to_cpu(*(u32 *) &queue->data[0]);
  1106. handle = le32_to_cpu(*(u32 *) &queue->data[4]);
  1107. next = sb_pool_skb(card, handle);
  1108. recycle_rx_skb(card, queue);
  1109. if (next) {
  1110. card->raw_cell_head = next;
  1111. queue = card->raw_cell_head;
  1112. pci_dma_sync_single_for_cpu(card->pcidev,
  1113. IDT77252_PRV_PADDR(queue),
  1114. (skb_end_pointer(queue) -
  1115. queue->data),
  1116. PCI_DMA_FROMDEVICE);
  1117. } else {
  1118. card->raw_cell_head = NULL;
  1119. printk("%s: raw cell queue overrun\n",
  1120. card->name);
  1121. break;
  1122. }
  1123. }
  1124. }
  1125. }
  1126. /*****************************************************************************/
  1127. /* */
  1128. /* TSQ Handling */
  1129. /* */
  1130. /*****************************************************************************/
  1131. static int
  1132. init_tsq(struct idt77252_dev *card)
  1133. {
  1134. struct tsq_entry *tsqe;
  1135. card->tsq.base = pci_alloc_consistent(card->pcidev, RSQSIZE,
  1136. &card->tsq.paddr);
  1137. if (card->tsq.base == NULL) {
  1138. printk("%s: can't allocate TSQ.\n", card->name);
  1139. return -1;
  1140. }
  1141. memset(card->tsq.base, 0, TSQSIZE);
  1142. card->tsq.last = card->tsq.base + TSQ_NUM_ENTRIES - 1;
  1143. card->tsq.next = card->tsq.last;
  1144. for (tsqe = card->tsq.base; tsqe <= card->tsq.last; tsqe++)
  1145. tsqe->word_2 = cpu_to_le32(SAR_TSQE_INVALID);
  1146. writel(card->tsq.paddr, SAR_REG_TSQB);
  1147. writel((unsigned long) card->tsq.next - (unsigned long) card->tsq.base,
  1148. SAR_REG_TSQH);
  1149. return 0;
  1150. }
  1151. static void
  1152. deinit_tsq(struct idt77252_dev *card)
  1153. {
  1154. pci_free_consistent(card->pcidev, TSQSIZE,
  1155. card->tsq.base, card->tsq.paddr);
  1156. }
  1157. static void
  1158. idt77252_tx(struct idt77252_dev *card)
  1159. {
  1160. struct tsq_entry *tsqe;
  1161. unsigned int vpi, vci;
  1162. struct vc_map *vc;
  1163. u32 conn, stat;
  1164. if (card->tsq.next == card->tsq.last)
  1165. tsqe = card->tsq.base;
  1166. else
  1167. tsqe = card->tsq.next + 1;
  1168. TXPRINTK("idt77252_tx: tsq %p: base %p, next %p, last %p\n", tsqe,
  1169. card->tsq.base, card->tsq.next, card->tsq.last);
  1170. TXPRINTK("idt77252_tx: tsqb %08x, tsqt %08x, tsqh %08x, \n",
  1171. readl(SAR_REG_TSQB),
  1172. readl(SAR_REG_TSQT),
  1173. readl(SAR_REG_TSQH));
  1174. stat = le32_to_cpu(tsqe->word_2);
  1175. if (stat & SAR_TSQE_INVALID)
  1176. return;
  1177. do {
  1178. TXPRINTK("tsqe: 0x%p [0x%08x 0x%08x]\n", tsqe,
  1179. le32_to_cpu(tsqe->word_1),
  1180. le32_to_cpu(tsqe->word_2));
  1181. switch (stat & SAR_TSQE_TYPE) {
  1182. case SAR_TSQE_TYPE_TIMER:
  1183. TXPRINTK("%s: Timer RollOver detected.\n", card->name);
  1184. break;
  1185. case SAR_TSQE_TYPE_IDLE:
  1186. conn = le32_to_cpu(tsqe->word_1);
  1187. if (SAR_TSQE_TAG(stat) == 0x10) {
  1188. #ifdef NOTDEF
  1189. printk("%s: Connection %d halted.\n",
  1190. card->name,
  1191. le32_to_cpu(tsqe->word_1) & 0x1fff);
  1192. #endif
  1193. break;
  1194. }
  1195. vc = card->vcs[conn & 0x1fff];
  1196. if (!vc) {
  1197. printk("%s: could not find VC from conn %d\n",
  1198. card->name, conn & 0x1fff);
  1199. break;
  1200. }
  1201. printk("%s: Connection %d IDLE.\n",
  1202. card->name, vc->index);
  1203. set_bit(VCF_IDLE, &vc->flags);
  1204. break;
  1205. case SAR_TSQE_TYPE_TSR:
  1206. conn = le32_to_cpu(tsqe->word_1);
  1207. vc = card->vcs[conn & 0x1fff];
  1208. if (!vc) {
  1209. printk("%s: no VC at index %d\n",
  1210. card->name,
  1211. le32_to_cpu(tsqe->word_1) & 0x1fff);
  1212. break;
  1213. }
  1214. drain_scq(card, vc);
  1215. break;
  1216. case SAR_TSQE_TYPE_TBD_COMP:
  1217. conn = le32_to_cpu(tsqe->word_1);
  1218. vpi = (conn >> SAR_TBD_VPI_SHIFT) & 0x00ff;
  1219. vci = (conn >> SAR_TBD_VCI_SHIFT) & 0xffff;
  1220. if (vpi >= (1 << card->vpibits) ||
  1221. vci >= (1 << card->vcibits)) {
  1222. printk("%s: TBD complete: "
  1223. "out of range VPI.VCI %u.%u\n",
  1224. card->name, vpi, vci);
  1225. break;
  1226. }
  1227. vc = card->vcs[VPCI2VC(card, vpi, vci)];
  1228. if (!vc) {
  1229. printk("%s: TBD complete: "
  1230. "no VC at VPI.VCI %u.%u\n",
  1231. card->name, vpi, vci);
  1232. break;
  1233. }
  1234. drain_scq(card, vc);
  1235. break;
  1236. }
  1237. tsqe->word_2 = cpu_to_le32(SAR_TSQE_INVALID);
  1238. card->tsq.next = tsqe;
  1239. if (card->tsq.next == card->tsq.last)
  1240. tsqe = card->tsq.base;
  1241. else
  1242. tsqe = card->tsq.next + 1;
  1243. TXPRINTK("tsqe: %p: base %p, next %p, last %p\n", tsqe,
  1244. card->tsq.base, card->tsq.next, card->tsq.last);
  1245. stat = le32_to_cpu(tsqe->word_2);
  1246. } while (!(stat & SAR_TSQE_INVALID));
  1247. writel((unsigned long)card->tsq.next - (unsigned long)card->tsq.base,
  1248. SAR_REG_TSQH);
  1249. XPRINTK("idt77252_tx-after writel%d: TSQ head = 0x%x, tail = 0x%x, next = 0x%p.\n",
  1250. card->index, readl(SAR_REG_TSQH),
  1251. readl(SAR_REG_TSQT), card->tsq.next);
  1252. }
  1253. static void
  1254. tst_timer(unsigned long data)
  1255. {
  1256. struct idt77252_dev *card = (struct idt77252_dev *)data;
  1257. unsigned long base, idle, jump;
  1258. unsigned long flags;
  1259. u32 pc;
  1260. int e;
  1261. spin_lock_irqsave(&card->tst_lock, flags);
  1262. base = card->tst[card->tst_index];
  1263. idle = card->tst[card->tst_index ^ 1];
  1264. if (test_bit(TST_SWITCH_WAIT, &card->tst_state)) {
  1265. jump = base + card->tst_size - 2;
  1266. pc = readl(SAR_REG_NOW) >> 2;
  1267. if ((pc ^ idle) & ~(card->tst_size - 1)) {
  1268. mod_timer(&card->tst_timer, jiffies + 1);
  1269. goto out;
  1270. }
  1271. clear_bit(TST_SWITCH_WAIT, &card->tst_state);
  1272. card->tst_index ^= 1;
  1273. write_sram(card, jump, TSTE_OPC_JMP | (base << 2));
  1274. base = card->tst[card->tst_index];
  1275. idle = card->tst[card->tst_index ^ 1];
  1276. for (e = 0; e < card->tst_size - 2; e++) {
  1277. if (card->soft_tst[e].tste & TSTE_PUSH_IDLE) {
  1278. write_sram(card, idle + e,
  1279. card->soft_tst[e].tste & TSTE_MASK);
  1280. card->soft_tst[e].tste &= ~(TSTE_PUSH_IDLE);
  1281. }
  1282. }
  1283. }
  1284. if (test_and_clear_bit(TST_SWITCH_PENDING, &card->tst_state)) {
  1285. for (e = 0; e < card->tst_size - 2; e++) {
  1286. if (card->soft_tst[e].tste & TSTE_PUSH_ACTIVE) {
  1287. write_sram(card, idle + e,
  1288. card->soft_tst[e].tste & TSTE_MASK);
  1289. card->soft_tst[e].tste &= ~(TSTE_PUSH_ACTIVE);
  1290. card->soft_tst[e].tste |= TSTE_PUSH_IDLE;
  1291. }
  1292. }
  1293. jump = base + card->tst_size - 2;
  1294. write_sram(card, jump, TSTE_OPC_NULL);
  1295. set_bit(TST_SWITCH_WAIT, &card->tst_state);
  1296. mod_timer(&card->tst_timer, jiffies + 1);
  1297. }
  1298. out:
  1299. spin_unlock_irqrestore(&card->tst_lock, flags);
  1300. }
  1301. static int
  1302. __fill_tst(struct idt77252_dev *card, struct vc_map *vc,
  1303. int n, unsigned int opc)
  1304. {
  1305. unsigned long cl, avail;
  1306. unsigned long idle;
  1307. int e, r;
  1308. u32 data;
  1309. avail = card->tst_size - 2;
  1310. for (e = 0; e < avail; e++) {
  1311. if (card->soft_tst[e].vc == NULL)
  1312. break;
  1313. }
  1314. if (e >= avail) {
  1315. printk("%s: No free TST entries found\n", card->name);
  1316. return -1;
  1317. }
  1318. NPRINTK("%s: conn %d: first TST entry at %d.\n",
  1319. card->name, vc ? vc->index : -1, e);
  1320. r = n;
  1321. cl = avail;
  1322. data = opc & TSTE_OPC_MASK;
  1323. if (vc && (opc != TSTE_OPC_NULL))
  1324. data = opc | vc->index;
  1325. idle = card->tst[card->tst_index ^ 1];
  1326. /*
  1327. * Fill Soft TST.
  1328. */
  1329. while (r > 0) {
  1330. if ((cl >= avail) && (card->soft_tst[e].vc == NULL)) {
  1331. if (vc)
  1332. card->soft_tst[e].vc = vc;
  1333. else
  1334. card->soft_tst[e].vc = (void *)-1;
  1335. card->soft_tst[e].tste = data;
  1336. if (timer_pending(&card->tst_timer))
  1337. card->soft_tst[e].tste |= TSTE_PUSH_ACTIVE;
  1338. else {
  1339. write_sram(card, idle + e, data);
  1340. card->soft_tst[e].tste |= TSTE_PUSH_IDLE;
  1341. }
  1342. cl -= card->tst_size;
  1343. r--;
  1344. }
  1345. if (++e == avail)
  1346. e = 0;
  1347. cl += n;
  1348. }
  1349. return 0;
  1350. }
  1351. static int
  1352. fill_tst(struct idt77252_dev *card, struct vc_map *vc, int n, unsigned int opc)
  1353. {
  1354. unsigned long flags;
  1355. int res;
  1356. spin_lock_irqsave(&card->tst_lock, flags);
  1357. res = __fill_tst(card, vc, n, opc);
  1358. set_bit(TST_SWITCH_PENDING, &card->tst_state);
  1359. if (!timer_pending(&card->tst_timer))
  1360. mod_timer(&card->tst_timer, jiffies + 1);
  1361. spin_unlock_irqrestore(&card->tst_lock, flags);
  1362. return res;
  1363. }
  1364. static int
  1365. __clear_tst(struct idt77252_dev *card, struct vc_map *vc)
  1366. {
  1367. unsigned long idle;
  1368. int e;
  1369. idle = card->tst[card->tst_index ^ 1];
  1370. for (e = 0; e < card->tst_size - 2; e++) {
  1371. if (card->soft_tst[e].vc == vc) {
  1372. card->soft_tst[e].vc = NULL;
  1373. card->soft_tst[e].tste = TSTE_OPC_VAR;
  1374. if (timer_pending(&card->tst_timer))
  1375. card->soft_tst[e].tste |= TSTE_PUSH_ACTIVE;
  1376. else {
  1377. write_sram(card, idle + e, TSTE_OPC_VAR);
  1378. card->soft_tst[e].tste |= TSTE_PUSH_IDLE;
  1379. }
  1380. }
  1381. }
  1382. return 0;
  1383. }
  1384. static int
  1385. clear_tst(struct idt77252_dev *card, struct vc_map *vc)
  1386. {
  1387. unsigned long flags;
  1388. int res;
  1389. spin_lock_irqsave(&card->tst_lock, flags);
  1390. res = __clear_tst(card, vc);
  1391. set_bit(TST_SWITCH_PENDING, &card->tst_state);
  1392. if (!timer_pending(&card->tst_timer))
  1393. mod_timer(&card->tst_timer, jiffies + 1);
  1394. spin_unlock_irqrestore(&card->tst_lock, flags);
  1395. return res;
  1396. }
  1397. static int
  1398. change_tst(struct idt77252_dev *card, struct vc_map *vc,
  1399. int n, unsigned int opc)
  1400. {
  1401. unsigned long flags;
  1402. int res;
  1403. spin_lock_irqsave(&card->tst_lock, flags);
  1404. __clear_tst(card, vc);
  1405. res = __fill_tst(card, vc, n, opc);
  1406. set_bit(TST_SWITCH_PENDING, &card->tst_state);
  1407. if (!timer_pending(&card->tst_timer))
  1408. mod_timer(&card->tst_timer, jiffies + 1);
  1409. spin_unlock_irqrestore(&card->tst_lock, flags);
  1410. return res;
  1411. }
  1412. static int
  1413. set_tct(struct idt77252_dev *card, struct vc_map *vc)
  1414. {
  1415. unsigned long tct;
  1416. tct = (unsigned long) (card->tct_base + vc->index * SAR_SRAM_TCT_SIZE);
  1417. switch (vc->class) {
  1418. case SCHED_CBR:
  1419. OPRINTK("%s: writing TCT at 0x%lx, SCD 0x%lx.\n",
  1420. card->name, tct, vc->scq->scd);
  1421. write_sram(card, tct + 0, TCT_CBR | vc->scq->scd);
  1422. write_sram(card, tct + 1, 0);
  1423. write_sram(card, tct + 2, 0);
  1424. write_sram(card, tct + 3, 0);
  1425. write_sram(card, tct + 4, 0);
  1426. write_sram(card, tct + 5, 0);
  1427. write_sram(card, tct + 6, 0);
  1428. write_sram(card, tct + 7, 0);
  1429. break;
  1430. case SCHED_UBR:
  1431. OPRINTK("%s: writing TCT at 0x%lx, SCD 0x%lx.\n",
  1432. card->name, tct, vc->scq->scd);
  1433. write_sram(card, tct + 0, TCT_UBR | vc->scq->scd);
  1434. write_sram(card, tct + 1, 0);
  1435. write_sram(card, tct + 2, TCT_TSIF);
  1436. write_sram(card, tct + 3, TCT_HALT | TCT_IDLE);
  1437. write_sram(card, tct + 4, 0);
  1438. write_sram(card, tct + 5, vc->init_er);
  1439. write_sram(card, tct + 6, 0);
  1440. write_sram(card, tct + 7, TCT_FLAG_UBR);
  1441. break;
  1442. case SCHED_VBR:
  1443. case SCHED_ABR:
  1444. default:
  1445. return -ENOSYS;
  1446. }
  1447. return 0;
  1448. }
  1449. /*****************************************************************************/
  1450. /* */
  1451. /* FBQ Handling */
  1452. /* */
  1453. /*****************************************************************************/
  1454. static __inline__ int
  1455. idt77252_fbq_level(struct idt77252_dev *card, int queue)
  1456. {
  1457. return (readl(SAR_REG_STAT) >> (16 + (queue << 2))) & 0x0f;
  1458. }
  1459. static __inline__ int
  1460. idt77252_fbq_full(struct idt77252_dev *card, int queue)
  1461. {
  1462. return (readl(SAR_REG_STAT) >> (16 + (queue << 2))) == 0x0f;
  1463. }
  1464. static int
  1465. push_rx_skb(struct idt77252_dev *card, struct sk_buff *skb, int queue)
  1466. {
  1467. unsigned long flags;
  1468. u32 handle;
  1469. u32 addr;
  1470. skb->data = skb->head;
  1471. skb_reset_tail_pointer(skb);
  1472. skb->len = 0;
  1473. skb_reserve(skb, 16);
  1474. switch (queue) {
  1475. case 0:
  1476. skb_put(skb, SAR_FB_SIZE_0);
  1477. break;
  1478. case 1:
  1479. skb_put(skb, SAR_FB_SIZE_1);
  1480. break;
  1481. case 2:
  1482. skb_put(skb, SAR_FB_SIZE_2);
  1483. break;
  1484. case 3:
  1485. skb_put(skb, SAR_FB_SIZE_3);
  1486. break;
  1487. default:
  1488. return -1;
  1489. }
  1490. if (idt77252_fbq_full(card, queue))
  1491. return -1;
  1492. memset(&skb->data[(skb->len & ~(0x3f)) - 64], 0, 2 * sizeof(u32));
  1493. handle = IDT77252_PRV_POOL(skb);
  1494. addr = IDT77252_PRV_PADDR(skb);
  1495. spin_lock_irqsave(&card->cmd_lock, flags);
  1496. writel(handle, card->fbq[queue]);
  1497. writel(addr, card->fbq[queue]);
  1498. spin_unlock_irqrestore(&card->cmd_lock, flags);
  1499. return 0;
  1500. }
  1501. static void
  1502. add_rx_skb(struct idt77252_dev *card, int queue,
  1503. unsigned int size, unsigned int count)
  1504. {
  1505. struct sk_buff *skb;
  1506. dma_addr_t paddr;
  1507. u32 handle;
  1508. while (count--) {
  1509. skb = dev_alloc_skb(size);
  1510. if (!skb)
  1511. return;
  1512. if (sb_pool_add(card, skb, queue)) {
  1513. printk("%s: SB POOL full\n", __func__);
  1514. goto outfree;
  1515. }
  1516. paddr = pci_map_single(card->pcidev, skb->data,
  1517. skb_end_pointer(skb) - skb->data,
  1518. PCI_DMA_FROMDEVICE);
  1519. IDT77252_PRV_PADDR(skb) = paddr;
  1520. if (push_rx_skb(card, skb, queue)) {
  1521. printk("%s: FB QUEUE full\n", __func__);
  1522. goto outunmap;
  1523. }
  1524. }
  1525. return;
  1526. outunmap:
  1527. pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
  1528. skb_end_pointer(skb) - skb->data, PCI_DMA_FROMDEVICE);
  1529. handle = IDT77252_PRV_POOL(skb);
  1530. card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;
  1531. outfree:
  1532. dev_kfree_skb(skb);
  1533. }
  1534. static void
  1535. recycle_rx_skb(struct idt77252_dev *card, struct sk_buff *skb)
  1536. {
  1537. u32 handle = IDT77252_PRV_POOL(skb);
  1538. int err;
  1539. pci_dma_sync_single_for_device(card->pcidev, IDT77252_PRV_PADDR(skb),
  1540. skb_end_pointer(skb) - skb->data,
  1541. PCI_DMA_FROMDEVICE);
  1542. err = push_rx_skb(card, skb, POOL_QUEUE(handle));
  1543. if (err) {
  1544. pci_unmap_single(card->pcidev, IDT77252_PRV_PADDR(skb),
  1545. skb_end_pointer(skb) - skb->data,
  1546. PCI_DMA_FROMDEVICE);
  1547. sb_pool_remove(card, skb);
  1548. dev_kfree_skb(skb);
  1549. }
  1550. }
  1551. static void
  1552. flush_rx_pool(struct idt77252_dev *card, struct rx_pool *rpp)
  1553. {
  1554. skb_queue_head_init(&rpp->queue);
  1555. rpp->len = 0;
  1556. }
  1557. static void
  1558. recycle_rx_pool_skb(struct idt77252_dev *card, struct rx_pool *rpp)
  1559. {
  1560. struct sk_buff *skb, *tmp;
  1561. skb_queue_walk_safe(&rpp->queue, skb, tmp)
  1562. recycle_rx_skb(card, skb);
  1563. flush_rx_pool(card, rpp);
  1564. }
  1565. /*****************************************************************************/
  1566. /* */
  1567. /* ATM Interface */
  1568. /* */
  1569. /*****************************************************************************/
  1570. static void
  1571. idt77252_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr)
  1572. {
  1573. write_utility(dev->dev_data, 0x100 + (addr & 0x1ff), value);
  1574. }
  1575. static unsigned char
  1576. idt77252_phy_get(struct atm_dev *dev, unsigned long addr)
  1577. {
  1578. return read_utility(dev->dev_data, 0x100 + (addr & 0x1ff));
  1579. }
  1580. static inline int
  1581. idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
  1582. {
  1583. struct atm_dev *dev = vcc->dev;
  1584. struct idt77252_dev *card = dev->dev_data;
  1585. struct vc_map *vc = vcc->dev_data;
  1586. int err;
  1587. if (vc == NULL) {
  1588. printk("%s: NULL connection in send().\n", card->name);
  1589. atomic_inc(&vcc->stats->tx_err);
  1590. dev_kfree_skb(skb);
  1591. return -EINVAL;
  1592. }
  1593. if (!test_bit(VCF_TX, &vc->flags)) {
  1594. printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
  1595. atomic_inc(&vcc->stats->tx_err);
  1596. dev_kfree_skb(skb);
  1597. return -EINVAL;
  1598. }
  1599. switch (vcc->qos.aal) {
  1600. case ATM_AAL0:
  1601. case ATM_AAL1:
  1602. case ATM_AAL5:
  1603. break;
  1604. default:
  1605. printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
  1606. atomic_inc(&vcc->stats->tx_err);
  1607. dev_kfree_skb(skb);
  1608. return -EINVAL;
  1609. }
  1610. if (skb_shinfo(skb)->nr_frags != 0) {
  1611. printk("%s: No scatter-gather yet.\n", card->name);
  1612. atomic_inc(&vcc->stats->tx_err);
  1613. dev_kfree_skb(skb);
  1614. return -EINVAL;
  1615. }
  1616. ATM_SKB(skb)->vcc = vcc;
  1617. err = queue_skb(card, vc, skb, oam);
  1618. if (err) {
  1619. atomic_inc(&vcc->stats->tx_err);
  1620. dev_kfree_skb(skb);
  1621. return err;
  1622. }
  1623. return 0;
  1624. }
  1625. static int idt77252_send(struct atm_vcc *vcc, struct sk_buff *skb)
  1626. {
  1627. return idt77252_send_skb(vcc, skb, 0);
  1628. }
  1629. static int
  1630. idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
  1631. {
  1632. struct atm_dev *dev = vcc->dev;
  1633. struct idt77252_dev *card = dev->dev_data;
  1634. struct sk_buff *skb;
  1635. skb = dev_alloc_skb(64);
  1636. if (!skb) {
  1637. printk("%s: Out of memory in send_oam().\n", card->name);
  1638. atomic_inc(&vcc->stats->tx_err);
  1639. return -ENOMEM;
  1640. }
  1641. atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
  1642. memcpy(skb_put(skb, 52), cell, 52);
  1643. return idt77252_send_skb(vcc, skb, 1);
  1644. }
  1645. static __inline__ unsigned int
  1646. idt77252_fls(unsigned int x)
  1647. {
  1648. int r = 1;
  1649. if (x == 0)
  1650. return 0;
  1651. if (x & 0xffff0000) {
  1652. x >>= 16;
  1653. r += 16;
  1654. }
  1655. if (x & 0xff00) {
  1656. x >>= 8;
  1657. r += 8;
  1658. }
  1659. if (x & 0xf0) {
  1660. x >>= 4;
  1661. r += 4;
  1662. }
  1663. if (x & 0xc) {
  1664. x >>= 2;
  1665. r += 2;
  1666. }
  1667. if (x & 0x2)
  1668. r += 1;
  1669. return r;
  1670. }
  1671. static u16
  1672. idt77252_int_to_atmfp(unsigned int rate)
  1673. {
  1674. u16 m, e;
  1675. if (rate == 0)
  1676. return 0;
  1677. e = idt77252_fls(rate) - 1;
  1678. if (e < 9)
  1679. m = (rate - (1 << e)) << (9 - e);
  1680. else if (e == 9)
  1681. m = (rate - (1 << e));
  1682. else /* e > 9 */
  1683. m = (rate - (1 << e)) >> (e - 9);
  1684. return 0x4000 | (e << 9) | m;
  1685. }
  1686. static u8
  1687. idt77252_rate_logindex(struct idt77252_dev *card, int pcr)
  1688. {
  1689. u16 afp;
  1690. afp = idt77252_int_to_atmfp(pcr < 0 ? -pcr : pcr);
  1691. if (pcr < 0)
  1692. return rate_to_log[(afp >> 5) & 0x1ff];
  1693. return rate_to_log[((afp >> 5) + 1) & 0x1ff];
  1694. }
  1695. static void
  1696. idt77252_est_timer(unsigned long data)
  1697. {
  1698. struct vc_map *vc = (struct vc_map *)data;
  1699. struct idt77252_dev *card = vc->card;
  1700. struct rate_estimator *est;
  1701. unsigned long flags;
  1702. u32 rate, cps;
  1703. u64 ncells;
  1704. u8 lacr;
  1705. spin_lock_irqsave(&vc->lock, flags);
  1706. est = vc->estimator;
  1707. if (!est)
  1708. goto out;
  1709. ncells = est->cells;
  1710. rate = ((u32)(ncells - est->last_cells)) << (7 - est->interval);
  1711. est->last_cells = ncells;
  1712. est->avcps += ((long)rate - (long)est->avcps) >> est->ewma_log;
  1713. est->cps = (est->avcps + 0x1f) >> 5;
  1714. cps = est->cps;
  1715. if (cps < (est->maxcps >> 4))
  1716. cps = est->maxcps >> 4;
  1717. lacr = idt77252_rate_logindex(card, cps);
  1718. if (lacr > vc->max_er)
  1719. lacr = vc->max_er;
  1720. if (lacr != vc->lacr) {
  1721. vc->lacr = lacr;
  1722. writel(TCMDQ_LACR|(vc->lacr << 16)|vc->index, SAR_REG_TCMDQ);
  1723. }
  1724. est->timer.expires = jiffies + ((HZ / 4) << est->interval);
  1725. add_timer(&est->timer);
  1726. out:
  1727. spin_unlock_irqrestore(&vc->lock, flags);
  1728. }
  1729. static struct rate_estimator *
  1730. idt77252_init_est(struct vc_map *vc, int pcr)
  1731. {
  1732. struct rate_estimator *est;
  1733. est = kzalloc(sizeof(struct rate_estimator), GFP_KERNEL);
  1734. if (!est)
  1735. return NULL;
  1736. est->maxcps = pcr < 0 ? -pcr : pcr;
  1737. est->cps = est->maxcps;
  1738. est->avcps = est->cps << 5;
  1739. est->interval = 2; /* XXX: make this configurable */
  1740. est->ewma_log = 2; /* XXX: make this configurable */
  1741. init_timer(&est->timer);
  1742. est->timer.data = (unsigned long)vc;
  1743. est->timer.function = idt77252_est_timer;
  1744. est->timer.expires = jiffies + ((HZ / 4) << est->interval);
  1745. add_timer(&est->timer);
  1746. return est;
  1747. }
  1748. static int
  1749. idt77252_init_cbr(struct idt77252_dev *card, struct vc_map *vc,
  1750. struct atm_vcc *vcc, struct atm_qos *qos)
  1751. {
  1752. int tst_free, tst_used, tst_entries;
  1753. unsig