/drivers/scsi/lpfc/lpfc_mem.c

http://github.com/mirrors/linux · C · 743 lines · 439 code · 67 blank · 237 comment · 53 complexity · 478ad4ceaed514413d23c38664885710 MD5 · raw file

  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
  6. * Copyright (C) 2004-2014 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  10. * *
  11. * This program is free software; you can redistribute it and/or *
  12. * modify it under the terms of version 2 of the GNU General *
  13. * Public License as published by the Free Software Foundation. *
  14. * This program is distributed in the hope that it will be useful. *
  15. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  16. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  17. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  18. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  20. * more details, a copy of which can be found in the file COPYING *
  21. * included with this package. *
  22. *******************************************************************/
  23. #include <linux/mempool.h>
  24. #include <linux/slab.h>
  25. #include <linux/pci.h>
  26. #include <linux/interrupt.h>
  27. #include <scsi/scsi.h>
  28. #include <scsi/scsi_device.h>
  29. #include <scsi/scsi_transport_fc.h>
  30. #include <scsi/fc/fc_fs.h>
  31. #include <linux/nvme-fc-driver.h>
  32. #include "lpfc_hw4.h"
  33. #include "lpfc_hw.h"
  34. #include "lpfc_sli.h"
  35. #include "lpfc_sli4.h"
  36. #include "lpfc_nl.h"
  37. #include "lpfc_disc.h"
  38. #include "lpfc.h"
  39. #include "lpfc_scsi.h"
  40. #include "lpfc_nvme.h"
  41. #include "lpfc_nvmet.h"
  42. #include "lpfc_crtn.h"
  43. #include "lpfc_logmsg.h"
  44. #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
  45. #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
  46. #define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */
  47. int
  48. lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
  49. size_t bytes;
  50. int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
  51. if (max_xri <= 0)
  52. return -ENOMEM;
  53. bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) *
  54. sizeof(unsigned long);
  55. phba->cfg_rrq_xri_bitmap_sz = bytes;
  56. phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
  57. bytes);
  58. if (!phba->active_rrq_pool)
  59. return -ENOMEM;
  60. else
  61. return 0;
  62. }
  63. /**
  64. * lpfc_mem_alloc - create and allocate all PCI and memory pools
  65. * @phba: HBA to allocate pools for
  66. *
  67. * Description: Creates and allocates PCI pools lpfc_mbuf_pool,
  68. * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
  69. * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
  70. *
  71. * Notes: Not interrupt-safe. Must be called with no locks held. If any
  72. * allocation fails, frees all successfully allocated memory before returning.
  73. *
  74. * Returns:
  75. * 0 on success
  76. * -ENOMEM on failure (if any memory allocations fail)
  77. **/
  78. int
  79. lpfc_mem_alloc(struct lpfc_hba *phba, int align)
  80. {
  81. struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
  82. int i;
  83. phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev,
  84. LPFC_BPL_SIZE,
  85. align, 0);
  86. if (!phba->lpfc_mbuf_pool)
  87. goto fail;
  88. pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE,
  89. sizeof(struct lpfc_dmabuf),
  90. GFP_KERNEL);
  91. if (!pool->elements)
  92. goto fail_free_lpfc_mbuf_pool;
  93. pool->max_count = 0;
  94. pool->current_count = 0;
  95. for ( i = 0; i < LPFC_MBUF_POOL_SIZE; i++) {
  96. pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool,
  97. GFP_KERNEL, &pool->elements[i].phys);
  98. if (!pool->elements[i].virt)
  99. goto fail_free_mbuf_pool;
  100. pool->max_count++;
  101. pool->current_count++;
  102. }
  103. phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
  104. sizeof(LPFC_MBOXQ_t));
  105. if (!phba->mbox_mem_pool)
  106. goto fail_free_mbuf_pool;
  107. phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
  108. sizeof(struct lpfc_nodelist));
  109. if (!phba->nlp_mem_pool)
  110. goto fail_free_mbox_pool;
  111. if (phba->sli_rev == LPFC_SLI_REV4) {
  112. phba->rrq_pool =
  113. mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
  114. sizeof(struct lpfc_node_rrq));
  115. if (!phba->rrq_pool)
  116. goto fail_free_nlp_mem_pool;
  117. phba->lpfc_hrb_pool = dma_pool_create("lpfc_hrb_pool",
  118. &phba->pcidev->dev,
  119. LPFC_HDR_BUF_SIZE, align, 0);
  120. if (!phba->lpfc_hrb_pool)
  121. goto fail_free_rrq_mem_pool;
  122. phba->lpfc_drb_pool = dma_pool_create("lpfc_drb_pool",
  123. &phba->pcidev->dev,
  124. LPFC_DATA_BUF_SIZE, align, 0);
  125. if (!phba->lpfc_drb_pool)
  126. goto fail_free_hrb_pool;
  127. phba->lpfc_hbq_pool = NULL;
  128. } else {
  129. phba->lpfc_hbq_pool = dma_pool_create("lpfc_hbq_pool",
  130. &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0);
  131. if (!phba->lpfc_hbq_pool)
  132. goto fail_free_nlp_mem_pool;
  133. phba->lpfc_hrb_pool = NULL;
  134. phba->lpfc_drb_pool = NULL;
  135. }
  136. if (phba->cfg_EnableXLane) {
  137. phba->device_data_mem_pool = mempool_create_kmalloc_pool(
  138. LPFC_DEVICE_DATA_POOL_SIZE,
  139. sizeof(struct lpfc_device_data));
  140. if (!phba->device_data_mem_pool)
  141. goto fail_free_drb_pool;
  142. } else {
  143. phba->device_data_mem_pool = NULL;
  144. }
  145. return 0;
  146. fail_free_drb_pool:
  147. dma_pool_destroy(phba->lpfc_drb_pool);
  148. phba->lpfc_drb_pool = NULL;
  149. fail_free_hrb_pool:
  150. dma_pool_destroy(phba->lpfc_hrb_pool);
  151. phba->lpfc_hrb_pool = NULL;
  152. fail_free_rrq_mem_pool:
  153. mempool_destroy(phba->rrq_pool);
  154. phba->rrq_pool = NULL;
  155. fail_free_nlp_mem_pool:
  156. mempool_destroy(phba->nlp_mem_pool);
  157. phba->nlp_mem_pool = NULL;
  158. fail_free_mbox_pool:
  159. mempool_destroy(phba->mbox_mem_pool);
  160. phba->mbox_mem_pool = NULL;
  161. fail_free_mbuf_pool:
  162. while (i--)
  163. dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
  164. pool->elements[i].phys);
  165. kfree(pool->elements);
  166. fail_free_lpfc_mbuf_pool:
  167. dma_pool_destroy(phba->lpfc_mbuf_pool);
  168. phba->lpfc_mbuf_pool = NULL;
  169. fail:
  170. return -ENOMEM;
  171. }
  172. int
  173. lpfc_nvmet_mem_alloc(struct lpfc_hba *phba)
  174. {
  175. phba->lpfc_nvmet_drb_pool =
  176. dma_pool_create("lpfc_nvmet_drb_pool",
  177. &phba->pcidev->dev, LPFC_NVMET_DATA_BUF_SIZE,
  178. SGL_ALIGN_SZ, 0);
  179. if (!phba->lpfc_nvmet_drb_pool) {
  180. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  181. "6024 Can't enable NVME Target - no memory\n");
  182. return -ENOMEM;
  183. }
  184. return 0;
  185. }
  186. /**
  187. * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
  188. * @phba: HBA to free memory for
  189. *
  190. * Description: Free the memory allocated by lpfc_mem_alloc routine. This
  191. * routine is a the counterpart of lpfc_mem_alloc.
  192. *
  193. * Returns: None
  194. **/
  195. void
  196. lpfc_mem_free(struct lpfc_hba *phba)
  197. {
  198. int i;
  199. struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
  200. struct lpfc_device_data *device_data;
  201. /* Free HBQ pools */
  202. lpfc_sli_hbqbuf_free_all(phba);
  203. dma_pool_destroy(phba->lpfc_nvmet_drb_pool);
  204. phba->lpfc_nvmet_drb_pool = NULL;
  205. dma_pool_destroy(phba->lpfc_drb_pool);
  206. phba->lpfc_drb_pool = NULL;
  207. dma_pool_destroy(phba->lpfc_hrb_pool);
  208. phba->lpfc_hrb_pool = NULL;
  209. dma_pool_destroy(phba->lpfc_hbq_pool);
  210. phba->lpfc_hbq_pool = NULL;
  211. mempool_destroy(phba->rrq_pool);
  212. phba->rrq_pool = NULL;
  213. /* Free NLP memory pool */
  214. mempool_destroy(phba->nlp_mem_pool);
  215. phba->nlp_mem_pool = NULL;
  216. if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) {
  217. mempool_destroy(phba->active_rrq_pool);
  218. phba->active_rrq_pool = NULL;
  219. }
  220. /* Free mbox memory pool */
  221. mempool_destroy(phba->mbox_mem_pool);
  222. phba->mbox_mem_pool = NULL;
  223. /* Free MBUF memory pool */
  224. for (i = 0; i < pool->current_count; i++)
  225. dma_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
  226. pool->elements[i].phys);
  227. kfree(pool->elements);
  228. dma_pool_destroy(phba->lpfc_mbuf_pool);
  229. phba->lpfc_mbuf_pool = NULL;
  230. /* Free Device Data memory pool */
  231. if (phba->device_data_mem_pool) {
  232. /* Ensure all objects have been returned to the pool */
  233. while (!list_empty(&phba->luns)) {
  234. device_data = list_first_entry(&phba->luns,
  235. struct lpfc_device_data,
  236. listentry);
  237. list_del(&device_data->listentry);
  238. mempool_free(device_data, phba->device_data_mem_pool);
  239. }
  240. mempool_destroy(phba->device_data_mem_pool);
  241. }
  242. phba->device_data_mem_pool = NULL;
  243. return;
  244. }
  245. /**
  246. * lpfc_mem_free_all - Frees all PCI and driver memory
  247. * @phba: HBA to free memory for
  248. *
  249. * Description: Free memory from PCI and driver memory pools and also those
  250. * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
  251. * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
  252. * the VPI bitmask.
  253. *
  254. * Returns: None
  255. **/
  256. void
  257. lpfc_mem_free_all(struct lpfc_hba *phba)
  258. {
  259. struct lpfc_sli *psli = &phba->sli;
  260. LPFC_MBOXQ_t *mbox, *next_mbox;
  261. struct lpfc_dmabuf *mp;
  262. /* Free memory used in mailbox queue back to mailbox memory pool */
  263. list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
  264. mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
  265. if (mp) {
  266. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  267. kfree(mp);
  268. }
  269. list_del(&mbox->list);
  270. mempool_free(mbox, phba->mbox_mem_pool);
  271. }
  272. /* Free memory used in mailbox cmpl list back to mailbox memory pool */
  273. list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
  274. mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
  275. if (mp) {
  276. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  277. kfree(mp);
  278. }
  279. list_del(&mbox->list);
  280. mempool_free(mbox, phba->mbox_mem_pool);
  281. }
  282. /* Free the active mailbox command back to the mailbox memory pool */
  283. spin_lock_irq(&phba->hbalock);
  284. psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  285. spin_unlock_irq(&phba->hbalock);
  286. if (psli->mbox_active) {
  287. mbox = psli->mbox_active;
  288. mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
  289. if (mp) {
  290. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  291. kfree(mp);
  292. }
  293. mempool_free(mbox, phba->mbox_mem_pool);
  294. psli->mbox_active = NULL;
  295. }
  296. /* Free and destroy all the allocated memory pools */
  297. lpfc_mem_free(phba);
  298. /* Free DMA buffer memory pool */
  299. dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
  300. phba->lpfc_sg_dma_buf_pool = NULL;
  301. dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
  302. phba->lpfc_cmd_rsp_buf_pool = NULL;
  303. /* Free the iocb lookup array */
  304. kfree(psli->iocbq_lookup);
  305. psli->iocbq_lookup = NULL;
  306. return;
  307. }
  308. /**
  309. * lpfc_mbuf_alloc - Allocate an mbuf from the lpfc_mbuf_pool PCI pool
  310. * @phba: HBA which owns the pool to allocate from
  311. * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
  312. * @handle: used to return the DMA-mapped address of the mbuf
  313. *
  314. * Description: Allocates a DMA-mapped buffer from the lpfc_mbuf_pool PCI pool.
  315. * Allocates from generic dma_pool_alloc function first and if that fails and
  316. * mem_flags has MEM_PRI set (the only defined flag), returns an mbuf from the
  317. * HBA's pool.
  318. *
  319. * Notes: Not interrupt-safe. Must be called with no locks held. Takes
  320. * phba->hbalock.
  321. *
  322. * Returns:
  323. * pointer to the allocated mbuf on success
  324. * NULL on failure
  325. **/
  326. void *
  327. lpfc_mbuf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
  328. {
  329. struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
  330. unsigned long iflags;
  331. void *ret;
  332. ret = dma_pool_alloc(phba->lpfc_mbuf_pool, GFP_KERNEL, handle);
  333. spin_lock_irqsave(&phba->hbalock, iflags);
  334. if (!ret && (mem_flags & MEM_PRI) && pool->current_count) {
  335. pool->current_count--;
  336. ret = pool->elements[pool->current_count].virt;
  337. *handle = pool->elements[pool->current_count].phys;
  338. }
  339. spin_unlock_irqrestore(&phba->hbalock, iflags);
  340. return ret;
  341. }
  342. /**
  343. * __lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (locked)
  344. * @phba: HBA which owns the pool to return to
  345. * @virt: mbuf to free
  346. * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
  347. *
  348. * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
  349. * it is below its max_count, frees the mbuf otherwise.
  350. *
  351. * Notes: Must be called with phba->hbalock held to synchronize access to
  352. * lpfc_mbuf_safety_pool.
  353. *
  354. * Returns: None
  355. **/
  356. void
  357. __lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
  358. {
  359. struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
  360. if (pool->current_count < pool->max_count) {
  361. pool->elements[pool->current_count].virt = virt;
  362. pool->elements[pool->current_count].phys = dma;
  363. pool->current_count++;
  364. } else {
  365. dma_pool_free(phba->lpfc_mbuf_pool, virt, dma);
  366. }
  367. return;
  368. }
  369. /**
  370. * lpfc_mbuf_free - Free an mbuf from the lpfc_mbuf_pool PCI pool (unlocked)
  371. * @phba: HBA which owns the pool to return to
  372. * @virt: mbuf to free
  373. * @dma: the DMA-mapped address of the lpfc_mbuf_pool to be freed
  374. *
  375. * Description: Returns an mbuf lpfc_mbuf_pool to the lpfc_mbuf_safety_pool if
  376. * it is below its max_count, frees the mbuf otherwise.
  377. *
  378. * Notes: Takes phba->hbalock. Can be called with or without other locks held.
  379. *
  380. * Returns: None
  381. **/
  382. void
  383. lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
  384. {
  385. unsigned long iflags;
  386. spin_lock_irqsave(&phba->hbalock, iflags);
  387. __lpfc_mbuf_free(phba, virt, dma);
  388. spin_unlock_irqrestore(&phba->hbalock, iflags);
  389. return;
  390. }
  391. /**
  392. * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the
  393. * lpfc_sg_dma_buf_pool PCI pool
  394. * @phba: HBA which owns the pool to allocate from
  395. * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
  396. * @handle: used to return the DMA-mapped address of the nvmet_buf
  397. *
  398. * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
  399. * PCI pool. Allocates from generic dma_pool_alloc function.
  400. *
  401. * Returns:
  402. * pointer to the allocated nvmet_buf on success
  403. * NULL on failure
  404. **/
  405. void *
  406. lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
  407. {
  408. void *ret;
  409. ret = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
  410. return ret;
  411. }
  412. /**
  413. * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool
  414. * PCI pool
  415. * @phba: HBA which owns the pool to return to
  416. * @virt: nvmet_buf to free
  417. * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed
  418. *
  419. * Returns: None
  420. **/
  421. void
  422. lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
  423. {
  424. dma_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
  425. }
  426. /**
  427. * lpfc_els_hbq_alloc - Allocate an HBQ buffer
  428. * @phba: HBA to allocate HBQ buffer for
  429. *
  430. * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
  431. * pool along a non-DMA-mapped container for it.
  432. *
  433. * Notes: Not interrupt-safe. Must be called with no locks held.
  434. *
  435. * Returns:
  436. * pointer to HBQ on success
  437. * NULL on failure
  438. **/
  439. struct hbq_dmabuf *
  440. lpfc_els_hbq_alloc(struct lpfc_hba *phba)
  441. {
  442. struct hbq_dmabuf *hbqbp;
  443. hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
  444. if (!hbqbp)
  445. return NULL;
  446. hbqbp->dbuf.virt = dma_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
  447. &hbqbp->dbuf.phys);
  448. if (!hbqbp->dbuf.virt) {
  449. kfree(hbqbp);
  450. return NULL;
  451. }
  452. hbqbp->total_size = LPFC_BPL_SIZE;
  453. return hbqbp;
  454. }
  455. /**
  456. * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
  457. * @phba: HBA buffer was allocated for
  458. * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
  459. *
  460. * Description: Frees both the container and the DMA-mapped buffer returned by
  461. * lpfc_els_hbq_alloc.
  462. *
  463. * Notes: Can be called with or without locks held.
  464. *
  465. * Returns: None
  466. **/
  467. void
  468. lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
  469. {
  470. dma_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
  471. kfree(hbqbp);
  472. return;
  473. }
  474. /**
  475. * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
  476. * @phba: HBA to allocate a receive buffer for
  477. *
  478. * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
  479. * pool along a non-DMA-mapped container for it.
  480. *
  481. * Notes: Not interrupt-safe. Must be called with no locks held.
  482. *
  483. * Returns:
  484. * pointer to HBQ on success
  485. * NULL on failure
  486. **/
  487. struct hbq_dmabuf *
  488. lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
  489. {
  490. struct hbq_dmabuf *dma_buf;
  491. dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
  492. if (!dma_buf)
  493. return NULL;
  494. dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
  495. &dma_buf->hbuf.phys);
  496. if (!dma_buf->hbuf.virt) {
  497. kfree(dma_buf);
  498. return NULL;
  499. }
  500. dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
  501. &dma_buf->dbuf.phys);
  502. if (!dma_buf->dbuf.virt) {
  503. dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
  504. dma_buf->hbuf.phys);
  505. kfree(dma_buf);
  506. return NULL;
  507. }
  508. dma_buf->total_size = LPFC_DATA_BUF_SIZE;
  509. return dma_buf;
  510. }
  511. /**
  512. * lpfc_sli4_rb_free - Frees a receive buffer
  513. * @phba: HBA buffer was allocated for
  514. * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
  515. *
  516. * Description: Frees both the container and the DMA-mapped buffers returned by
  517. * lpfc_sli4_rb_alloc.
  518. *
  519. * Notes: Can be called with or without locks held.
  520. *
  521. * Returns: None
  522. **/
  523. void
  524. lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
  525. {
  526. dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
  527. dma_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
  528. kfree(dmab);
  529. }
  530. /**
  531. * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer
  532. * @phba: HBA to allocate a receive buffer for
  533. *
  534. * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
  535. * pool along a non-DMA-mapped container for it.
  536. *
  537. * Notes: Not interrupt-safe. Must be called with no locks held.
  538. *
  539. * Returns:
  540. * pointer to HBQ on success
  541. * NULL on failure
  542. **/
  543. struct rqb_dmabuf *
  544. lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
  545. {
  546. struct rqb_dmabuf *dma_buf;
  547. dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
  548. if (!dma_buf)
  549. return NULL;
  550. dma_buf->hbuf.virt = dma_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
  551. &dma_buf->hbuf.phys);
  552. if (!dma_buf->hbuf.virt) {
  553. kfree(dma_buf);
  554. return NULL;
  555. }
  556. dma_buf->dbuf.virt = dma_pool_alloc(phba->lpfc_nvmet_drb_pool,
  557. GFP_KERNEL, &dma_buf->dbuf.phys);
  558. if (!dma_buf->dbuf.virt) {
  559. dma_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
  560. dma_buf->hbuf.phys);
  561. kfree(dma_buf);
  562. return NULL;
  563. }
  564. dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
  565. return dma_buf;
  566. }
  567. /**
  568. * lpfc_sli4_nvmet_free - Frees a receive buffer
  569. * @phba: HBA buffer was allocated for
  570. * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc
  571. *
  572. * Description: Frees both the container and the DMA-mapped buffers returned by
  573. * lpfc_sli4_nvmet_alloc.
  574. *
  575. * Notes: Can be called with or without locks held.
  576. *
  577. * Returns: None
  578. **/
  579. void
  580. lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
  581. {
  582. dma_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
  583. dma_pool_free(phba->lpfc_nvmet_drb_pool,
  584. dmab->dbuf.virt, dmab->dbuf.phys);
  585. kfree(dmab);
  586. }
  587. /**
  588. * lpfc_in_buf_free - Free a DMA buffer
  589. * @phba: HBA buffer is associated with
  590. * @mp: Buffer to free
  591. *
  592. * Description: Frees the given DMA buffer in the appropriate way given if the
  593. * HBA is running in SLI3 mode with HBQs enabled.
  594. *
  595. * Notes: Takes phba->hbalock. Can be called with or without other locks held.
  596. *
  597. * Returns: None
  598. **/
  599. void
  600. lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
  601. {
  602. struct hbq_dmabuf *hbq_entry;
  603. unsigned long flags;
  604. if (!mp)
  605. return;
  606. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  607. hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
  608. /* Check whether HBQ is still in use */
  609. spin_lock_irqsave(&phba->hbalock, flags);
  610. if (!phba->hbq_in_use) {
  611. spin_unlock_irqrestore(&phba->hbalock, flags);
  612. return;
  613. }
  614. list_del(&hbq_entry->dbuf.list);
  615. if (hbq_entry->tag == -1) {
  616. (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
  617. (phba, hbq_entry);
  618. } else {
  619. lpfc_sli_free_hbq(phba, hbq_entry);
  620. }
  621. spin_unlock_irqrestore(&phba->hbalock, flags);
  622. } else {
  623. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  624. kfree(mp);
  625. }
  626. return;
  627. }
  628. /**
  629. * lpfc_rq_buf_free - Free a RQ DMA buffer
  630. * @phba: HBA buffer is associated with
  631. * @mp: Buffer to free
  632. *
  633. * Description: Frees the given DMA buffer in the appropriate way given by
  634. * reposting it to its associated RQ so it can be reused.
  635. *
  636. * Notes: Takes phba->hbalock. Can be called with or without other locks held.
  637. *
  638. * Returns: None
  639. **/
  640. void
  641. lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
  642. {
  643. struct lpfc_rqb *rqbp;
  644. struct lpfc_rqe hrqe;
  645. struct lpfc_rqe drqe;
  646. struct rqb_dmabuf *rqb_entry;
  647. unsigned long flags;
  648. int rc;
  649. if (!mp)
  650. return;
  651. rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf);
  652. rqbp = rqb_entry->hrq->rqbp;
  653. spin_lock_irqsave(&phba->hbalock, flags);
  654. list_del(&rqb_entry->hbuf.list);
  655. hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys);
  656. hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys);
  657. drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys);
  658. drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
  659. rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
  660. if (rc < 0) {
  661. (rqbp->rqb_free_buffer)(phba, rqb_entry);
  662. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  663. "6409 Cannot post to HRQ %d: %x %x %x "
  664. "DRQ %x %x\n",
  665. rqb_entry->hrq->queue_id,
  666. rqb_entry->hrq->host_index,
  667. rqb_entry->hrq->hba_index,
  668. rqb_entry->hrq->entry_count,
  669. rqb_entry->drq->host_index,
  670. rqb_entry->drq->hba_index);
  671. } else {
  672. list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
  673. rqbp->buffer_count++;
  674. }
  675. spin_unlock_irqrestore(&phba->hbalock, flags);
  676. }