/drivers/net/cnic.c
C | 5454 lines | 4415 code | 972 blank | 67 comment | 632 complexity | 3e06451f8280a2f355f843f82eb02300 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
Large files files are truncated, but you can click here to view the full file
/* cnic.c: Broadcom CNIC core network driver. * * Copyright (c) 2006-2010 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) * Modified and maintained by: Michael Chan <mchan@broadcom.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/uio_driver.h> #include <linux/in.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/if_vlan.h> #include <linux/prefetch.h> #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) #define BCM_VLAN 1 #endif #include <net/ip.h> #include <net/tcp.h> #include <net/route.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/ip6_checksum.h> #include <scsi/iscsi_if.h> #include "cnic_if.h" #include "bnx2.h" #include "bnx2x/bnx2x_reg.h" #include "bnx2x/bnx2x_fw_defs.h" #include "bnx2x/bnx2x_hsi.h" #include "../scsi/bnx2i/57xx_iscsi_constants.h" #include "../scsi/bnx2i/57xx_iscsi_hsi.h" #include "cnic.h" #include "cnic_defs.h" #define DRV_MODULE_NAME "cnic" static char version[] __devinitdata = "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " "Chen (zongxi@broadcom.com"); MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(CNIC_MODULE_VERSION); /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */ static LIST_HEAD(cnic_dev_list); static LIST_HEAD(cnic_udev_list); static DEFINE_RWLOCK(cnic_dev_lock); static DEFINE_MUTEX(cnic_lock); static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; /* helper function, assuming cnic_lock is held */ static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type) { return rcu_dereference_protected(cnic_ulp_tbl[type], lockdep_is_held(&cnic_lock)); } static int cnic_service_bnx2(void *, void *); static int cnic_service_bnx2x(void *, void *); static int cnic_ctl(void *, struct cnic_ctl_info *); static struct cnic_ops cnic_bnx2_ops = { .cnic_owner = THIS_MODULE, .cnic_handler = cnic_service_bnx2, .cnic_ctl = cnic_ctl, }; static struct cnic_ops cnic_bnx2x_ops = { .cnic_owner = THIS_MODULE, .cnic_handler = cnic_service_bnx2x, .cnic_ctl = cnic_ctl, }; static struct workqueue_struct *cnic_wq; static void cnic_shutdown_rings(struct cnic_dev *); static void cnic_init_rings(struct cnic_dev *); static int cnic_cm_set_pg(struct cnic_sock *); static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) { struct cnic_uio_dev *udev = uinfo->priv; struct cnic_dev *dev; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (udev->uio_dev != -1) return -EBUSY; rtnl_lock(); dev = udev->dev; if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) { rtnl_unlock(); return -ENODEV; } udev->uio_dev = iminor(inode); cnic_shutdown_rings(dev); cnic_init_rings(dev); rtnl_unlock(); return 0; } static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) { struct cnic_uio_dev *udev = uinfo->priv; udev->uio_dev = -1; return 0; } static inline void cnic_hold(struct cnic_dev *dev) { atomic_inc(&dev->ref_count); } static inline void cnic_put(struct cnic_dev *dev) { atomic_dec(&dev->ref_count); } static inline void csk_hold(struct cnic_sock *csk) { atomic_inc(&csk->ref_count); } static inline void csk_put(struct cnic_sock *csk) { atomic_dec(&csk->ref_count); } static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) { struct cnic_dev *cdev; read_lock(&cnic_dev_lock); list_for_each_entry(cdev, &cnic_dev_list, list) { if (netdev == cdev->netdev) { cnic_hold(cdev); read_unlock(&cnic_dev_lock); return cdev; } } read_unlock(&cnic_dev_lock); return NULL; } static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) { atomic_inc(&ulp_ops->ref_count); } static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) { atomic_dec(&ulp_ops->ref_count); } static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) { struct cnic_local *cp = dev->cnic_priv; struct cnic_eth_dev *ethdev = cp->ethdev; struct drv_ctl_info info; struct drv_ctl_io *io = &info.data.io; info.cmd = DRV_CTL_CTX_WR_CMD; io->cid_addr = cid_addr; io->offset = off; io->data = val; ethdev->drv_ctl(dev->netdev, &info); } static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) { struct cnic_local *cp = dev->cnic_priv; struct cnic_eth_dev *ethdev = cp->ethdev; struct drv_ctl_info info; struct drv_ctl_io *io = &info.data.io; info.cmd = DRV_CTL_CTXTBL_WR_CMD; io->offset = off; io->dma_addr = addr; ethdev->drv_ctl(dev->netdev, &info); } static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) { struct cnic_local *cp = dev->cnic_priv; struct cnic_eth_dev *ethdev = cp->ethdev; struct drv_ctl_info info; struct drv_ctl_l2_ring *ring = &info.data.ring; if (start) info.cmd = DRV_CTL_START_L2_CMD; else info.cmd = DRV_CTL_STOP_L2_CMD; ring->cid = cid; ring->client_id = cl_id; ethdev->drv_ctl(dev->netdev, &info); } static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) { struct cnic_local *cp = dev->cnic_priv; struct cnic_eth_dev *ethdev = cp->ethdev; struct drv_ctl_info info; struct drv_ctl_io *io = &info.data.io; info.cmd = DRV_CTL_IO_WR_CMD; io->offset = off; io->data = val; ethdev->drv_ctl(dev->netdev, &info); } static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) { struct cnic_local *cp = dev->cnic_priv; struct cnic_eth_dev *ethdev = cp->ethdev; struct drv_ctl_info info; struct drv_ctl_io *io = &info.data.io; info.cmd = DRV_CTL_IO_RD_CMD; io->offset = off; ethdev->drv_ctl(dev->netdev, &info); return io->data; } static int cnic_in_use(struct cnic_sock *csk) { return test_bit(SK_F_INUSE, &csk->flags); } static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count) { struct cnic_local *cp = dev->cnic_priv; struct cnic_eth_dev *ethdev = cp->ethdev; struct drv_ctl_info info; info.cmd = cmd; info.data.credit.credit_count = count; ethdev->drv_ctl(dev->netdev, &info); } static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) { u32 i; for (i = 0; i < cp->max_cid_space; i++) { if (cp->ctx_tbl[i].cid == cid) { *l5_cid = i; return 0; } } return -EINVAL; } static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, struct cnic_sock *csk) { struct iscsi_path path_req; char *buf = NULL; u16 len = 0; u32 msg_type = ISCSI_KEVENT_IF_DOWN; struct cnic_ulp_ops *ulp_ops; struct cnic_uio_dev *udev = cp->udev; int rc = 0, retry = 0; if (!udev || udev->uio_dev == -1) return -ENODEV; if (csk) { len = sizeof(path_req); buf = (char *) &path_req; memset(&path_req, 0, len); msg_type = ISCSI_KEVENT_PATH_REQ; path_req.handle = (u64) csk->l5_cid; if (test_bit(SK_F_IPV6, &csk->flags)) { memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], sizeof(struct in6_addr)); path_req.ip_addr_len = 16; } else { memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], sizeof(struct in_addr)); path_req.ip_addr_len = 4; } path_req.vlan_id = csk->vlan_id; path_req.pmtu = csk->mtu; } while (retry < 3) { rc = 0; rcu_read_lock(); ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); if (ulp_ops) rc = ulp_ops->iscsi_nl_send_msg( cp->ulp_handle[CNIC_ULP_ISCSI], msg_type, buf, len); rcu_read_unlock(); if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ) break; msleep(100); retry++; } return 0; } static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8); static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, char *buf, u16 len) { int rc = -EINVAL; switch (msg_type) { case ISCSI_UEVENT_PATH_UPDATE: { struct cnic_local *cp; u32 l5_cid; struct cnic_sock *csk; struct iscsi_path *path_resp; if (len < sizeof(*path_resp)) break; path_resp = (struct iscsi_path *) buf; cp = dev->cnic_priv; l5_cid = (u32) path_resp->handle; if (l5_cid >= MAX_CM_SK_TBL_SZ) break; rcu_read_lock(); if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) { rc = -ENODEV; rcu_read_unlock(); break; } csk = &cp->csk_tbl[l5_cid]; csk_hold(csk); if (cnic_in_use(csk) && test_bit(SK_F_CONNECT_START, &csk->flags)) { memcpy(csk->ha, path_resp->mac_addr, 6); if (test_bit(SK_F_IPV6, &csk->flags)) memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, sizeof(struct in6_addr)); else memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, sizeof(struct in_addr)); if (is_valid_ether_addr(csk->ha)) { cnic_cm_set_pg(csk); } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) && !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { cnic_cm_upcall(cp, csk, L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); clear_bit(SK_F_CONNECT_START, &csk->flags); } } csk_put(csk); rcu_read_unlock(); rc = 0; } } return rc; } static int cnic_offld_prep(struct cnic_sock *csk) { if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) return 0; if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { clear_bit(SK_F_OFFLD_SCHED, &csk->flags); return 0; } return 1; } static int cnic_close_prep(struct cnic_sock *csk) { clear_bit(SK_F_CONNECT_START, &csk->flags); smp_mb__after_clear_bit(); if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) msleep(1); return 1; } return 0; } static int cnic_abort_prep(struct cnic_sock *csk) { clear_bit(SK_F_CONNECT_START, &csk->flags); smp_mb__after_clear_bit(); while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) msleep(1); if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; return 1; } return 0; } int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) { struct cnic_dev *dev; if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { pr_err("%s: Bad type %d\n", __func__, ulp_type); return -EINVAL; } mutex_lock(&cnic_lock); if (cnic_ulp_tbl_prot(ulp_type)) { pr_err("%s: Type %d has already been registered\n", __func__, ulp_type); mutex_unlock(&cnic_lock); return -EBUSY; } read_lock(&cnic_dev_lock); list_for_each_entry(dev, &cnic_dev_list, list) { struct cnic_local *cp = dev->cnic_priv; clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); } read_unlock(&cnic_dev_lock); atomic_set(&ulp_ops->ref_count, 0); rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); mutex_unlock(&cnic_lock); /* Prevent race conditions with netdev_event */ rtnl_lock(); list_for_each_entry(dev, &cnic_dev_list, list) { struct cnic_local *cp = dev->cnic_priv; if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) ulp_ops->cnic_init(dev); } rtnl_unlock(); return 0; } int cnic_unregister_driver(int ulp_type) { struct cnic_dev *dev; struct cnic_ulp_ops *ulp_ops; int i = 0; if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { pr_err("%s: Bad type %d\n", __func__, ulp_type); return -EINVAL; } mutex_lock(&cnic_lock); ulp_ops = cnic_ulp_tbl_prot(ulp_type); if (!ulp_ops) { pr_err("%s: Type %d has not been registered\n", __func__, ulp_type); goto out_unlock; } read_lock(&cnic_dev_lock); list_for_each_entry(dev, &cnic_dev_list, list) { struct cnic_local *cp = dev->cnic_priv; if (rcu_dereference(cp->ulp_ops[ulp_type])) { pr_err("%s: Type %d still has devices registered\n", __func__, ulp_type); read_unlock(&cnic_dev_lock); goto out_unlock; } } read_unlock(&cnic_dev_lock); rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); mutex_unlock(&cnic_lock); synchronize_rcu(); while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { msleep(100); i++; } if (atomic_read(&ulp_ops->ref_count) != 0) netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n"); return 0; out_unlock: mutex_unlock(&cnic_lock); return -EINVAL; } static int cnic_start_hw(struct cnic_dev *); static void cnic_stop_hw(struct cnic_dev *); static int cnic_register_device(struct cnic_dev *dev, int ulp_type, void *ulp_ctx) { struct cnic_local *cp = dev->cnic_priv; struct cnic_ulp_ops *ulp_ops; if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { pr_err("%s: Bad type %d\n", __func__, ulp_type); return -EINVAL; } mutex_lock(&cnic_lock); if (cnic_ulp_tbl_prot(ulp_type) == NULL) { pr_err("%s: Driver with type %d has not been registered\n", __func__, ulp_type); mutex_unlock(&cnic_lock); return -EAGAIN; } if (rcu_dereference(cp->ulp_ops[ulp_type])) { pr_err("%s: Type %d has already been registered to this device\n", __func__, ulp_type); mutex_unlock(&cnic_lock); return -EBUSY; } clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); cp->ulp_handle[ulp_type] = ulp_ctx; ulp_ops = cnic_ulp_tbl_prot(ulp_type); rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); cnic_hold(dev); if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); mutex_unlock(&cnic_lock); return 0; } EXPORT_SYMBOL(cnic_register_driver); static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) { struct cnic_local *cp = dev->cnic_priv; int i = 0; if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { pr_err("%s: Bad type %d\n", __func__, ulp_type); return -EINVAL; } mutex_lock(&cnic_lock); if (rcu_dereference(cp->ulp_ops[ulp_type])) { rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); cnic_put(dev); } else { pr_err("%s: device not registered to this ulp type %d\n", __func__, ulp_type); mutex_unlock(&cnic_lock); return -EINVAL; } mutex_unlock(&cnic_lock); if (ulp_type == CNIC_ULP_ISCSI) cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); synchronize_rcu(); while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && i < 20) { msleep(100); i++; } if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); return 0; } EXPORT_SYMBOL(cnic_unregister_driver); static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id, u32 next) { id_tbl->start = start_id; id_tbl->max = size; id_tbl->next = next; spin_lock_init(&id_tbl->lock); id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); if (!id_tbl->table) return -ENOMEM; return 0; } static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) { kfree(id_tbl->table); id_tbl->table = NULL; } static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) { int ret = -1; id -= id_tbl->start; if (id >= id_tbl->max) return ret; spin_lock(&id_tbl->lock); if (!test_bit(id, id_tbl->table)) { set_bit(id, id_tbl->table); ret = 0; } spin_unlock(&id_tbl->lock); return ret; } /* Returns -1 if not successful */ static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) { u32 id; spin_lock(&id_tbl->lock); id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); if (id >= id_tbl->max) { id = -1; if (id_tbl->next != 0) { id = find_first_zero_bit(id_tbl->table, id_tbl->next); if (id >= id_tbl->next) id = -1; } } if (id < id_tbl->max) { set_bit(id, id_tbl->table); id_tbl->next = (id + 1) & (id_tbl->max - 1); id += id_tbl->start; } spin_unlock(&id_tbl->lock); return id; } static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) { if (id == -1) return; id -= id_tbl->start; if (id >= id_tbl->max) return; clear_bit(id, id_tbl->table); } static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) { int i; if (!dma->pg_arr) return; for (i = 0; i < dma->num_pages; i++) { if (dma->pg_arr[i]) { dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, dma->pg_arr[i], dma->pg_map_arr[i]); dma->pg_arr[i] = NULL; } } if (dma->pgtbl) { dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size, dma->pgtbl, dma->pgtbl_map); dma->pgtbl = NULL; } kfree(dma->pg_arr); dma->pg_arr = NULL; dma->num_pages = 0; } static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) { int i; __le32 *page_table = (__le32 *) dma->pgtbl; for (i = 0; i < dma->num_pages; i++) { /* Each entry needs to be in big endian format. */ *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); page_table++; *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); page_table++; } } static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) { int i; __le32 *page_table = (__le32 *) dma->pgtbl; for (i = 0; i < dma->num_pages; i++) { /* Each entry needs to be in little endian format. */ *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); page_table++; *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); page_table++; } } static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, int pages, int use_pg_tbl) { int i, size; struct cnic_local *cp = dev->cnic_priv; size = pages * (sizeof(void *) + sizeof(dma_addr_t)); dma->pg_arr = kzalloc(size, GFP_ATOMIC); if (dma->pg_arr == NULL) return -ENOMEM; dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); dma->num_pages = pages; for (i = 0; i < pages; i++) { dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, &dma->pg_map_arr[i], GFP_ATOMIC); if (dma->pg_arr[i] == NULL) goto error; } if (!use_pg_tbl) return 0; dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & ~(BCM_PAGE_SIZE - 1); dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, &dma->pgtbl_map, GFP_ATOMIC); if (dma->pgtbl == NULL) goto error; cp->setup_pgtbl(dev, dma); return 0; error: cnic_free_dma(dev, dma); return -ENOMEM; } static void cnic_free_context(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; int i; for (i = 0; i < cp->ctx_blks; i++) { if (cp->ctx_arr[i].ctx) { dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, cp->ctx_arr[i].ctx, cp->ctx_arr[i].mapping); cp->ctx_arr[i].ctx = NULL; } } } static void __cnic_free_uio(struct cnic_uio_dev *udev) { uio_unregister_device(&udev->cnic_uinfo); if (udev->l2_buf) { dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, udev->l2_buf, udev->l2_buf_map); udev->l2_buf = NULL; } if (udev->l2_ring) { dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, udev->l2_ring, udev->l2_ring_map); udev->l2_ring = NULL; } pci_dev_put(udev->pdev); kfree(udev); } static void cnic_free_uio(struct cnic_uio_dev *udev) { if (!udev) return; write_lock(&cnic_dev_lock); list_del_init(&udev->list); write_unlock(&cnic_dev_lock); __cnic_free_uio(udev); } static void cnic_free_resc(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; struct cnic_uio_dev *udev = cp->udev; if (udev) { udev->dev = NULL; cp->udev = NULL; } cnic_free_context(dev); kfree(cp->ctx_arr); cp->ctx_arr = NULL; cp->ctx_blks = 0; cnic_free_dma(dev, &cp->gbl_buf_info); cnic_free_dma(dev, &cp->conn_buf_info); cnic_free_dma(dev, &cp->kwq_info); cnic_free_dma(dev, &cp->kwq_16_data_info); cnic_free_dma(dev, &cp->kcq2.dma); cnic_free_dma(dev, &cp->kcq1.dma); kfree(cp->iscsi_tbl); cp->iscsi_tbl = NULL; kfree(cp->ctx_tbl); cp->ctx_tbl = NULL; cnic_free_id_tbl(&cp->fcoe_cid_tbl); cnic_free_id_tbl(&cp->cid_tbl); } static int cnic_alloc_context(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; if (CHIP_NUM(cp) == CHIP_NUM_5709) { int i, k, arr_size; cp->ctx_blk_size = BCM_PAGE_SIZE; cp->cids_per_blk = BCM_PAGE_SIZE / 128; arr_size = BNX2_MAX_CID / cp->cids_per_blk * sizeof(struct cnic_ctx); cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); if (cp->ctx_arr == NULL) return -ENOMEM; k = 0; for (i = 0; i < 2; i++) { u32 j, reg, off, lo, hi; if (i == 0) off = BNX2_PG_CTX_MAP; else off = BNX2_ISCSI_CTX_MAP; reg = cnic_reg_rd_ind(dev, off); lo = reg >> 16; hi = reg & 0xffff; for (j = lo; j < hi; j += cp->cids_per_blk, k++) cp->ctx_arr[k].cid = j; } cp->ctx_blks = k; if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { cp->ctx_blks = 0; return -ENOMEM; } for (i = 0; i < cp->ctx_blks; i++) { cp->ctx_arr[i].ctx = dma_alloc_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, &cp->ctx_arr[i].mapping, GFP_KERNEL); if (cp->ctx_arr[i].ctx == NULL) return -ENOMEM; } } return 0; } static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info) { int err, i, is_bnx2 = 0; struct kcqe **kcq; if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) is_bnx2 = 1; err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, is_bnx2); if (err) return err; kcq = (struct kcqe **) info->dma.pg_arr; info->kcq = kcq; if (is_bnx2) return 0; for (i = 0; i < KCQ_PAGE_CNT; i++) { struct bnx2x_bd_chain_next *next = (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT]; int j = i + 1; if (j >= KCQ_PAGE_CNT) j = 0; next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32; next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff; } return 0; } static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) { struct cnic_local *cp = dev->cnic_priv; struct cnic_uio_dev *udev; read_lock(&cnic_dev_lock); list_for_each_entry(udev, &cnic_udev_list, list) { if (udev->pdev == dev->pcidev) { udev->dev = dev; cp->udev = udev; read_unlock(&cnic_dev_lock); return 0; } } read_unlock(&cnic_dev_lock); udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); if (!udev) return -ENOMEM; udev->uio_dev = -1; udev->dev = dev; udev->pdev = dev->pcidev; udev->l2_ring_size = pages * BCM_PAGE_SIZE; udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, &udev->l2_ring_map, GFP_KERNEL | __GFP_COMP); if (!udev->l2_ring) goto err_udev; udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, &udev->l2_buf_map, GFP_KERNEL | __GFP_COMP); if (!udev->l2_buf) goto err_dma; write_lock(&cnic_dev_lock); list_add(&udev->list, &cnic_udev_list); write_unlock(&cnic_dev_lock); pci_dev_get(udev->pdev); cp->udev = udev; return 0; err_dma: dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, udev->l2_ring, udev->l2_ring_map); err_udev: kfree(udev); return -ENOMEM; } static int cnic_init_uio(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; struct cnic_uio_dev *udev = cp->udev; struct uio_info *uinfo; int ret = 0; if (!udev) return -ENOMEM; uinfo = &udev->cnic_uinfo; uinfo->mem[0].addr = dev->netdev->base_addr; uinfo->mem[0].internal_addr = dev->regview; uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; uinfo->mem[0].memtype = UIO_MEM_PHYS; if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & PAGE_MASK; if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; else uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; uinfo->name = "bnx2_cnic"; } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & PAGE_MASK; uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); uinfo->name = "bnx2x_cnic"; } uinfo->mem[1].memtype = UIO_MEM_LOGICAL; uinfo->mem[2].addr = (unsigned long) udev->l2_ring; uinfo->mem[2].size = udev->l2_ring_size; uinfo->mem[2].memtype = UIO_MEM_LOGICAL; uinfo->mem[3].addr = (unsigned long) udev->l2_buf; uinfo->mem[3].size = udev->l2_buf_size; uinfo->mem[3].memtype = UIO_MEM_LOGICAL; uinfo->version = CNIC_MODULE_VERSION; uinfo->irq = UIO_IRQ_CUSTOM; uinfo->open = cnic_uio_open; uinfo->release = cnic_uio_close; if (udev->uio_dev == -1) { if (!uinfo->priv) { uinfo->priv = udev; ret = uio_register_device(&udev->pdev->dev, uinfo); } } else { cnic_init_rings(dev); } return ret; } static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; int ret; ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); if (ret) goto error; cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; ret = cnic_alloc_kcq(dev, &cp->kcq1); if (ret) goto error; ret = cnic_alloc_context(dev); if (ret) goto error; ret = cnic_alloc_uio_rings(dev, 2); if (ret) goto error; ret = cnic_init_uio(dev); if (ret) goto error; return 0; error: cnic_free_resc(dev); return ret; } static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; int ctx_blk_size = cp->ethdev->ctx_blk_size; int total_mem, blks, i; total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space; blks = total_mem / ctx_blk_size; if (total_mem % ctx_blk_size) blks++; if (blks > cp->ethdev->ctx_tbl_len) return -ENOMEM; cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL); if (cp->ctx_arr == NULL) return -ENOMEM; cp->ctx_blks = blks; cp->ctx_blk_size = ctx_blk_size; if (!BNX2X_CHIP_IS_57710(cp->chip_id)) cp->ctx_align = 0; else cp->ctx_align = ctx_blk_size; cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; for (i = 0; i < blks; i++) { cp->ctx_arr[i].ctx = dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, &cp->ctx_arr[i].mapping, GFP_KERNEL); if (cp->ctx_arr[i].ctx == NULL) return -ENOMEM; if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { cnic_free_context(dev); cp->ctx_blk_size += cp->ctx_align; i = -1; continue; } } } return 0; } static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; struct cnic_eth_dev *ethdev = cp->ethdev; u32 start_cid = ethdev->starting_cid; int i, j, n, ret, pages; struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; cp->iro_arr = ethdev->iro_arr; cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS; cp->iscsi_start_cid = start_cid; cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; if (BNX2X_CHIP_IS_E2(cp->chip_id)) { cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS; cp->fcoe_init_cid = ethdev->fcoe_init_cid; if (!cp->fcoe_init_cid) cp->fcoe_init_cid = 0x10; } if (start_cid < BNX2X_ISCSI_START_CID) { u32 delta = BNX2X_ISCSI_START_CID - start_cid; cp->iscsi_start_cid = BNX2X_ISCSI_START_CID; cp->fcoe_start_cid += delta; cp->max_cid_space += delta; } cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, GFP_KERNEL); if (!cp->iscsi_tbl) goto error; cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * cp->max_cid_space, GFP_KERNEL); if (!cp->ctx_tbl) goto error; for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; } for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / PAGE_SIZE; ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); if (ret) return -ENOMEM; n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; for (i = 0, j = 0; i < cp->max_cid_space; i++) { long off = CNIC_KWQ16_DATA_SIZE * (i % n); cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + off; if ((i % n) == (n - 1)) j++; } ret = cnic_alloc_kcq(dev, &cp->kcq1); if (ret) goto error; if (BNX2X_CHIP_IS_E2(cp->chip_id)) { ret = cnic_alloc_kcq(dev, &cp->kcq2); if (ret) goto error; } pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1); if (ret) goto error; pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); if (ret) goto error; ret = cnic_alloc_bnx2x_context(dev); if (ret) goto error; cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; cp->l2_rx_ring_size = 15; ret = cnic_alloc_uio_rings(dev, 4); if (ret) goto error; ret = cnic_init_uio(dev); if (ret) goto error; return 0; error: cnic_free_resc(dev); return -ENOMEM; } static inline u32 cnic_kwq_avail(struct cnic_local *cp) { return cp->max_kwq_idx - ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); } static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], u32 num_wqes) { struct cnic_local *cp = dev->cnic_priv; struct kwqe *prod_qe; u16 prod, sw_prod, i; if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) return -EAGAIN; /* bnx2 is down */ spin_lock_bh(&cp->cnic_ulp_lock); if (num_wqes > cnic_kwq_avail(cp) && !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) { spin_unlock_bh(&cp->cnic_ulp_lock); return -EAGAIN; } clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); prod = cp->kwq_prod_idx; sw_prod = prod & MAX_KWQ_IDX; for (i = 0; i < num_wqes; i++) { prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); prod++; sw_prod = prod & MAX_KWQ_IDX; } cp->kwq_prod_idx = prod; CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); spin_unlock_bh(&cp->cnic_ulp_lock); return 0; } static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, union l5cm_specific_data *l5_data) { struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; dma_addr_t map; map = ctx->kwqe_data_mapping; l5_data->phy_address.lo = (u64) map & 0xffffffff; l5_data->phy_address.hi = (u64) map >> 32; return ctx->kwqe_data; } static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, u32 type, union l5cm_specific_data *l5_data) { struct cnic_local *cp = dev->cnic_priv; struct l5cm_spe kwqe; struct kwqe_16 *kwq[1]; u16 type_16; int ret; kwqe.hdr.conn_and_cmd_data = cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | BNX2X_HW_CID(cp, cid))); type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & SPE_HDR_FUNCTION_ID; kwqe.hdr.type = cpu_to_le16(type_16); kwqe.hdr.reserved1 = 0; kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); kwq[0] = (struct kwqe_16 *) &kwqe; spin_lock_bh(&cp->cnic_ulp_lock); ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); spin_unlock_bh(&cp->cnic_ulp_lock); if (ret == 1) return 0; return -EBUSY; } static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, struct kcqe *cqes[], u32 num_cqes) { struct cnic_local *cp = dev->cnic_priv; struct cnic_ulp_ops *ulp_ops; rcu_read_lock(); ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); if (likely(ulp_ops)) { ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], cqes, num_cqes); } rcu_read_unlock(); } static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) { struct cnic_local *cp = dev->cnic_priv; struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; int hq_bds, pages; u32 pfid = cp->pfid; cp->num_iscsi_tasks = req1->num_tasks_per_conn; cp->num_ccells = req1->num_ccells_per_conn; cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * cp->num_iscsi_tasks; cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * BNX2X_ISCSI_R2TQE_SIZE; cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); cp->num_cqs = req1->num_cqs; if (!dev->max_iscsi_conn) return 0; /* init Tstorm RAM */ CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), req1->rq_num_wqes); CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), PAGE_SIZE); CNIC_WR8(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), req1->num_tasks_per_conn); /* init Ustorm RAM */ CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), req1->rq_buffer_size); CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), PAGE_SIZE); CNIC_WR8(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), req1->num_tasks_per_conn); CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid), req1->rq_num_wqes); CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid), req1->cq_num_wqes); CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); /* init Xstorm RAM */ CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), PAGE_SIZE); CNIC_WR8(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), req1->num_tasks_per_conn); CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), hq_bds); CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid), req1->num_tasks_per_conn); CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); /* init Cstorm RAM */ CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), PAGE_SIZE); CNIC_WR8(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), req1->num_tasks_per_conn); CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid), req1->cq_num_wqes); CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), hq_bds); return 0; } static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) { struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; struct cnic_local *cp = dev->cnic_priv; u32 pfid = cp->pfid; struct iscsi_kcqe kcqe; struct kcqe *cqes[1]; memset(&kcqe, 0, sizeof(kcqe)); if (!dev->max_iscsi_conn) { kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; goto done; } CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, req2->error_bit_map[1]); CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); CNIC_WR(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); CNIC_WR(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, req2->error_bit_map[1]); CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; done: kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; cqes[0] = (struct kcqe *) &kcqe; cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); return 0; } static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) { struct cnic_local *cp = dev->cnic_priv; struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { struct cnic_iscsi *iscsi = ctx->proto.iscsi; cnic_free_dma(dev, &iscsi->hq_info); cnic_free_dma(dev, &iscsi->r2tq_info); cnic_free_dma(dev, &iscsi->task_array_info); cnic_free_id(&cp->cid_tbl, ctx->cid); } else { cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid); } ctx->cid = 0; } static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) { u32 cid; int ret, pages; struct cnic_local *cp = dev->cnic_priv; struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; struct cnic_iscsi *iscsi = ctx->proto.iscsi; if (ctx->ulp_proto_id == CNIC_ULP_FCOE) { cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl); if (cid == -1) { ret = -ENOMEM; goto error; } ctx->cid = cid; return 0; } cid = cnic_alloc_new_id(&cp->cid_tbl); if (cid == -1) { ret = -ENOMEM; goto error; } ctx->cid = cid; pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); if (ret) goto error; pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); if (ret) goto error; pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); if (ret) goto error; return 0; error: cnic_free_bnx2x_conn_resc(dev, l5_cid); return ret; } static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, struct regpair *ctx_addr) { struct cnic_local *cp = dev->cnic_priv; struct cnic_eth_dev *ethdev = cp->ethdev; int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; unsigned long align_off = 0; dma_addr_t ctx_map; void *ctx; if (cp->ctx_align) { unsigned long mask = cp->ctx_align - 1; if (cp->ctx_arr[blk].mapping & mask) align_off = cp->ctx_align - (cp->ctx_arr[blk].mapping & mask); } ctx_map = cp->ctx_arr[blk].mapping + align_off + (off * BNX2X_CONTEXT_MEM_SIZE); ctx = cp->ctx_arr[blk].ctx + align_off + (off * BNX2X_CONTEXT_MEM_SIZE); if (init) memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); ctx_addr->lo = ctx_map & 0xffffffff; ctx_addr->hi = (u64) ctx_map >> 32; return ctx; } static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], u32 num) { struct cnic_local *cp = dev->cnic_priv; struct iscsi_kwqe_conn_offload1 *req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; struct iscsi_kwqe_conn_offload2 *req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; struct iscsi_kwqe_conn_offload3 *req3; struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; struct cnic_iscsi *iscsi = ctx->proto.iscsi; u32 cid = ctx->cid; u32 hw_cid = BNX2X_HW_CID(cp, cid); struct iscsi_context *ictx; struct regpair context_addr; int i, j, n = 2, n_max; ctx->ctx_flags = 0; if (!req2->num_additional_wqes) return -EINVAL; n_max = req2->num_additional_wqes + 2; ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); if (ictx == NULL) return -ENOMEM; req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; ictx->xstorm_ag_context.hq_prod = 1; ictx->xstorm_st_context.iscsi.first_burst_length = ISCSI_DEF_FIRST_BURST_LEN; ictx->xstorm_st_context.iscsi.max_send_pdu_length = ISCSI_DEF_MAX_RECV_SEG_LEN; ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = req1->sq_page_table_addr_lo; ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = req1->sq_page_table_addr_hi; ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = iscsi->hq_info.pgtbl_map & 0xffffffff; ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = (u64) iscsi->hq_info.pgtbl_map >> 32; ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = iscsi->hq_info.pgtbl[0]; ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = iscsi->hq_info.pgtbl[1]; ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = iscsi->r2tq_info.pgtbl_map & 0xffffffff; ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = (u64) iscsi->r2tq_info.pgtbl_map >> 32; ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = iscsi->r2tq_info.pgtbl[0]; ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = iscsi->r2tq_info.pgtbl[1]; ictx->xstorm_st_context.iscsi.task_pbl_base.lo = iscsi->task_array_info.pgtbl_map & 0xffffffff; ictx->xstorm_st_context.iscsi.task_pbl_base.hi = (u64) iscsi->task_array_info.pgtbl_map >> 32; ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = BNX2X_ISCSI_PBL_NOT_CACHED; ictx->xstorm_st_context.iscsi.flags.flags |= XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; ictx->xstorm_st_context.iscsi.flags.flags |= XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; /* TSTORM requires the base address of RQ DB & not PTE */ ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = req2->rq_page_table_addr_lo & PAGE_MASK; ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = req2->rq_page_table_addr_hi; ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; ictx->tstorm_st_context.tcp.cwnd = 0x5A8; ictx->tstorm_st_context.tcp.flags2 |= TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; ictx->tstorm_st_context.tcp.ooo_support_mode = TCP_TSTORM_OOO_DROP_AND_PROC_ACK; ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; ictx->ustorm_st_context.ring.rq.pbl_base.lo = req2->rq_page_table_addr_lo; ictx->ustorm_st_context.ring.rq.pbl_base.hi = req2->rq_page_table_addr_hi; ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = iscsi->r2tq_info.pgtbl_map & 0xffffffff; ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = (u64) iscsi->r2tq_info.pgtbl_map >> 32; ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = iscsi->r2tq_info.pgtbl[0]; ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = iscsi->r2tq_info.pgtbl[1]; ictx->ustorm_st_context.ring.cq_pbl_base.lo = req1->cq_page_table_addr_lo; ictx->ustorm_st_context.ring.cq_pbl_base.hi = req1->cq_page_table_addr_hi; ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; ictx->ustorm_st_context.task_pbe_cache_index = BNX2X_ISCSI_PBL_NOT_CACHED; ictx->ustorm_st_context.task_pdu_cache_index = BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { if (j == 3) { if (n >= n_max) break; req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; j = 0; } ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = req3->qp_first_pte[j].hi; ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = req3->qp_first_pte[j].lo; } ictx->ustorm_st_context.task_pbl_base.lo = iscsi->task_array_info.pgtbl_map & 0xffffffff; ictx->ustorm_st_context.task_pbl_base.hi = (u64) iscsi->task_array_info.pgtbl_map >> 32; ictx->ustorm_st_context.tce_phy_addr.lo = iscsi->task_array_info.pgtbl[0]; ictx->ustorm_st_context.tce_phy_addr.hi = iscsi->task_array_info.pgtbl[1]; ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; ictx->ustorm_st_context.num_cqs = cp->num_cqs; ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; ictx->ustorm_st_context.negotiated_rx_and_flags |= ISCSI_DEF_MAX_BURST_LEN; ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; ictx->cstorm_st_context.hq_pbl_base.lo = iscsi->hq_info.pgtbl_map & 0xffffffff; ictx->cstorm_st_context.hq_pbl_base.hi = (u64) iscsi->hq_info.pgtbl_map >> 32; ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; ictx->cstorm_st_context.task_pbl_base.lo = iscsi->task_array_info.pgtbl_map & 0xffffffff; ictx->cstorm_st_context.task_pbl_base.hi = (u64) iscsi->task_array_info.pgtbl_map >> 32; /* CSTORM and USTORM initialization is different, CSTORM requires * CQ DB base & not PTE addr */ ictx->cstorm_st_context.cq_db_base.lo = req1->cq_page_table_addr_lo & PAGE_MASK; ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; for (i = 0; i < cp->num_cqs; i++) { ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = ISCSI_INITIAL_SN; ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = ISCSI_INITIAL_SN; } ictx->xstorm_ag_context.cdu_reserved = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, ISCSI_CONNECTION_TYPE); ictx->ustorm_ag_context.cdu_usage = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, ISCSI_CONNECTION_TYPE); return 0; } static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], u32 num, int *work) { struct iscsi_kwqe_conn_offload1 *req1; struct iscsi_kwqe_conn_offload2 *req2; struct cnic_local *cp = dev->cnic_priv; struct cnic_context *ctx; struct iscsi_kcqe kcqe; struct kcqe *cqes[1]; u32 l5_cid; int ret = 0; if (num < 2) { *work = num; return -EINVAL; } req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; if ((num - 2) < req2->num_additional_wqes) { *work = num; return -EINVAL; } *work = 2 + req2->num_additional_wqes; l5_cid = req1->iscsi_conn_id; if (l5_cid >= MAX_ISCSI_TBL_SZ) return -EINVAL; memset(&kcqe, 0, sizeof(kcqe)); kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; kcqe.iscsi_conn_id = l5_cid; kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; ctx = &cp->ctx_tbl[l5_cid]; if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) { kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY; goto done; } if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { atomic_dec(&cp->iscsi_conn); goto done; } ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); if (ret) { atomic_dec(&cp->iscsi_conn); ret = 0; goto done; } ret = cnic_setup_bnx2x_ctx(dev, wqes, num); if (ret < 0) { cnic_free_bnx2x_conn_resc(dev, l5_cid); atomic_dec(&cp->iscsi_conn); goto done; } kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid); done: cqes[0] = (struct kcqe *) &kcqe; cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); return ret; } static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) { struct cnic_local *cp = dev->cnic_priv; struct iscsi_kwqe_conn_update *req = (struct iscsi_kwqe_conn_update *) kwqe; void *data; union l5cm_specific_data l5_data; u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); int ret; if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) return -EINVAL; data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); if (!data) return -ENOMEM; memcpy(data, kwqe, sizeof(struct kwqe)); ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); return ret; } static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) { struct cnic_local *cp = dev->cnic_priv; struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; union l5cm_specific_data l5_data; int ret; u32 hw_cid; init_waitqueue_head(&ctx->waitq); ctx->wait_cond = 0; memset(&l5_data, 0, sizeof(l5_data)); hw_cid = BNX2X_HW_CID(cp, ctx->cid); ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, hw_cid, NONE_CONNECTION_TYPE, &l5_data); if (ret == 0) wait_event(ctx->waitq, ctx->wait_cond); return ret; } static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) { struct cnic_local *cp = dev->cnic_priv; struct iscsi_kwqe_conn_destroy *req = (struct iscsi_kwqe_conn_destroy *) kwqe; u32 l5_cid = req->reserved0; struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; int ret = 0; struct iscsi_kcqe kcqe; struct kcqe *cqes[1]; if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) goto skip_cfc_delete; if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies; if (delta > (2 * HZ)) delta = 0; set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); queue_delayed_work(cnic_wq, &cp->delete_task, delta); goto destroy_reply; } ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid); skip_cfc_delete: cnic_free_bnx2x_conn_resc(dev, l5_cid); atomic_dec(&cp->iscsi_conn); clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); destroy_reply: memset(&kcqe, 0, sizeof(kcqe)); kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; kcqe.iscsi_conn_id = l5_cid; kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; kcqe.iscsi_conn_context_id = req->context_id; cqes[0] = (struct kcqe *) &kcqe; cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); return ret; } static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, struct l4_kwq_connect_req1 *kwqe1, struct l4_kwq_connect_req3 *kwqe3, struct l5cm_active_conn_buffer *conn_buf) { struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; struct l5cm_xstorm_conn_buffer *xstorm_buf = &conn_buf->xstorm_conn_buffer; struct l5cm_tstorm_conn_buffer *tstorm_buf = &conn_buf->tstorm_conn_buffer; struct regpair context_addr; u32 cid = BNX2X_SW_CID(kwqe1->cid); struct in6_addr src_ip, dst_ip; int i; u32 *addrp; addrp = (u32 *) &conn_addr->local_ip_addr; for (i = 0; i < 4; i++, addrp++) src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); addrp = (u32 *) &conn_addr->remote_ip_addr; for (i = 0; i < 4; i++, addrp+…
Large files files are truncated, but you can click here to view the full file