PageRenderTime 94ms CodeModel.GetById 32ms RepoModel.GetById 0ms app.codeStats 1ms

/kernel-source/drivers/net/ethernet/mellanox/mlx5/core/main.c

https://gitlab.com/karrei/imx6-kernel
C | 554 lines | 420 code | 100 blank | 34 comment | 45 complexity | 351edcc5f9aa2c618737d8de6b708d57 MD5 | raw file
  1. /*
  2. * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <asm-generic/kmap_types.h>
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/errno.h>
  36. #include <linux/pci.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/slab.h>
  39. #include <linux/io-mapping.h>
  40. #include <linux/mlx5/driver.h>
  41. #include <linux/mlx5/cq.h>
  42. #include <linux/mlx5/qp.h>
  43. #include <linux/mlx5/srq.h>
  44. #include <linux/debugfs.h>
  45. #include "mlx5_core.h"
  46. #define DRIVER_NAME "mlx5_core"
  47. #define DRIVER_VERSION "2.2-1"
  48. #define DRIVER_RELDATE "Feb 2014"
  49. MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
  50. MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
  51. MODULE_LICENSE("Dual BSD/GPL");
  52. MODULE_VERSION(DRIVER_VERSION);
  53. int mlx5_core_debug_mask;
  54. module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
  55. MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
  56. struct workqueue_struct *mlx5_core_wq;
  57. static int set_dma_caps(struct pci_dev *pdev)
  58. {
  59. int err;
  60. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  61. if (err) {
  62. dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
  63. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  64. if (err) {
  65. dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
  66. return err;
  67. }
  68. }
  69. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  70. if (err) {
  71. dev_warn(&pdev->dev,
  72. "Warning: couldn't set 64-bit consistent PCI DMA mask.\n");
  73. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  74. if (err) {
  75. dev_err(&pdev->dev,
  76. "Can't set consistent PCI DMA mask, aborting.\n");
  77. return err;
  78. }
  79. }
  80. dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
  81. return err;
  82. }
  83. static int request_bar(struct pci_dev *pdev)
  84. {
  85. int err = 0;
  86. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  87. dev_err(&pdev->dev, "Missing registers BAR, aborting.\n");
  88. return -ENODEV;
  89. }
  90. err = pci_request_regions(pdev, DRIVER_NAME);
  91. if (err)
  92. dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
  93. return err;
  94. }
  95. static void release_bar(struct pci_dev *pdev)
  96. {
  97. pci_release_regions(pdev);
  98. }
  99. static int mlx5_enable_msix(struct mlx5_core_dev *dev)
  100. {
  101. struct mlx5_eq_table *table = &dev->priv.eq_table;
  102. int num_eqs = 1 << dev->caps.log_max_eq;
  103. int nvec;
  104. int err;
  105. int i;
  106. nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
  107. nvec = min_t(int, nvec, num_eqs);
  108. if (nvec <= MLX5_EQ_VEC_COMP_BASE)
  109. return -ENOMEM;
  110. table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL);
  111. if (!table->msix_arr)
  112. return -ENOMEM;
  113. for (i = 0; i < nvec; i++)
  114. table->msix_arr[i].entry = i;
  115. retry:
  116. table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
  117. err = pci_enable_msix(dev->pdev, table->msix_arr, nvec);
  118. if (err <= 0) {
  119. return err;
  120. } else if (err > 2) {
  121. nvec = err;
  122. goto retry;
  123. }
  124. mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec);
  125. return 0;
  126. }
  127. static void mlx5_disable_msix(struct mlx5_core_dev *dev)
  128. {
  129. struct mlx5_eq_table *table = &dev->priv.eq_table;
  130. pci_disable_msix(dev->pdev);
  131. kfree(table->msix_arr);
  132. }
  133. struct mlx5_reg_host_endianess {
  134. u8 he;
  135. u8 rsvd[15];
  136. };
  137. #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
  138. enum {
  139. MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
  140. CAP_MASK(MLX5_CAP_OFF_DCT, 1),
  141. };
  142. /* selectively copy writable fields clearing any reserved area
  143. */
  144. static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_hca_cap *from)
  145. {
  146. u64 v64;
  147. to->log_max_qp = from->log_max_qp & 0x1f;
  148. to->log_max_ra_req_dc = from->log_max_ra_req_dc & 0x3f;
  149. to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f;
  150. to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f;
  151. to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f;
  152. to->log_max_atomic_size_qp = from->log_max_atomic_size_qp;
  153. to->log_max_atomic_size_dc = from->log_max_atomic_size_dc;
  154. v64 = be64_to_cpu(from->flags) & MLX5_CAP_BITS_RW_MASK;
  155. to->flags = cpu_to_be64(v64);
  156. }
  157. enum {
  158. HCA_CAP_OPMOD_GET_MAX = 0,
  159. HCA_CAP_OPMOD_GET_CUR = 1,
  160. };
  161. static int handle_hca_cap(struct mlx5_core_dev *dev)
  162. {
  163. struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL;
  164. struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
  165. struct mlx5_cmd_query_hca_cap_mbox_in query_ctx;
  166. struct mlx5_cmd_set_hca_cap_mbox_out set_out;
  167. u64 flags;
  168. int err;
  169. memset(&query_ctx, 0, sizeof(query_ctx));
  170. query_out = kzalloc(sizeof(*query_out), GFP_KERNEL);
  171. if (!query_out)
  172. return -ENOMEM;
  173. set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL);
  174. if (!set_ctx) {
  175. err = -ENOMEM;
  176. goto query_ex;
  177. }
  178. query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
  179. query_ctx.hdr.opmod = cpu_to_be16(HCA_CAP_OPMOD_GET_CUR);
  180. err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx),
  181. query_out, sizeof(*query_out));
  182. if (err)
  183. goto query_ex;
  184. err = mlx5_cmd_status_to_err(&query_out->hdr);
  185. if (err) {
  186. mlx5_core_warn(dev, "query hca cap failed, %d\n", err);
  187. goto query_ex;
  188. }
  189. copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap);
  190. if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
  191. set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
  192. flags = be64_to_cpu(query_out->hca_cap.flags);
  193. /* disable checksum */
  194. flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
  195. set_ctx->hca_cap.flags = cpu_to_be64(flags);
  196. memset(&set_out, 0, sizeof(set_out));
  197. set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
  198. set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
  199. err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx),
  200. &set_out, sizeof(set_out));
  201. if (err) {
  202. mlx5_core_warn(dev, "set hca cap failed, %d\n", err);
  203. goto query_ex;
  204. }
  205. err = mlx5_cmd_status_to_err(&set_out.hdr);
  206. if (err)
  207. goto query_ex;
  208. query_ex:
  209. kfree(query_out);
  210. kfree(set_ctx);
  211. return err;
  212. }
  213. static int set_hca_ctrl(struct mlx5_core_dev *dev)
  214. {
  215. struct mlx5_reg_host_endianess he_in;
  216. struct mlx5_reg_host_endianess he_out;
  217. int err;
  218. memset(&he_in, 0, sizeof(he_in));
  219. he_in.he = MLX5_SET_HOST_ENDIANNESS;
  220. err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
  221. &he_out, sizeof(he_out),
  222. MLX5_REG_HOST_ENDIANNESS, 0, 1);
  223. return err;
  224. }
  225. static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
  226. {
  227. int err;
  228. struct mlx5_enable_hca_mbox_in in;
  229. struct mlx5_enable_hca_mbox_out out;
  230. memset(&in, 0, sizeof(in));
  231. memset(&out, 0, sizeof(out));
  232. in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA);
  233. err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
  234. if (err)
  235. return err;
  236. if (out.hdr.status)
  237. return mlx5_cmd_status_to_err(&out.hdr);
  238. return 0;
  239. }
  240. static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
  241. {
  242. int err;
  243. struct mlx5_disable_hca_mbox_in in;
  244. struct mlx5_disable_hca_mbox_out out;
  245. memset(&in, 0, sizeof(in));
  246. memset(&out, 0, sizeof(out));
  247. in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA);
  248. err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
  249. if (err)
  250. return err;
  251. if (out.hdr.status)
  252. return mlx5_cmd_status_to_err(&out.hdr);
  253. return 0;
  254. }
  255. int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
  256. {
  257. struct mlx5_priv *priv = &dev->priv;
  258. int err;
  259. dev->pdev = pdev;
  260. pci_set_drvdata(dev->pdev, dev);
  261. strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
  262. priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
  263. mutex_init(&priv->pgdir_mutex);
  264. INIT_LIST_HEAD(&priv->pgdir_list);
  265. spin_lock_init(&priv->mkey_lock);
  266. priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
  267. if (!priv->dbg_root)
  268. return -ENOMEM;
  269. err = pci_enable_device(pdev);
  270. if (err) {
  271. dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
  272. goto err_dbg;
  273. }
  274. err = request_bar(pdev);
  275. if (err) {
  276. dev_err(&pdev->dev, "error requesting BARs, aborting.\n");
  277. goto err_disable;
  278. }
  279. pci_set_master(pdev);
  280. err = set_dma_caps(pdev);
  281. if (err) {
  282. dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
  283. goto err_clr_master;
  284. }
  285. dev->iseg_base = pci_resource_start(dev->pdev, 0);
  286. dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
  287. if (!dev->iseg) {
  288. err = -ENOMEM;
  289. dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
  290. goto err_clr_master;
  291. }
  292. dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
  293. fw_rev_min(dev), fw_rev_sub(dev));
  294. err = mlx5_cmd_init(dev);
  295. if (err) {
  296. dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
  297. goto err_unmap;
  298. }
  299. mlx5_pagealloc_init(dev);
  300. err = mlx5_core_enable_hca(dev);
  301. if (err) {
  302. dev_err(&pdev->dev, "enable hca failed\n");
  303. goto err_pagealloc_cleanup;
  304. }
  305. err = mlx5_satisfy_startup_pages(dev, 1);
  306. if (err) {
  307. dev_err(&pdev->dev, "failed to allocate boot pages\n");
  308. goto err_disable_hca;
  309. }
  310. err = set_hca_ctrl(dev);
  311. if (err) {
  312. dev_err(&pdev->dev, "set_hca_ctrl failed\n");
  313. goto reclaim_boot_pages;
  314. }
  315. err = handle_hca_cap(dev);
  316. if (err) {
  317. dev_err(&pdev->dev, "handle_hca_cap failed\n");
  318. goto reclaim_boot_pages;
  319. }
  320. err = mlx5_satisfy_startup_pages(dev, 0);
  321. if (err) {
  322. dev_err(&pdev->dev, "failed to allocate init pages\n");
  323. goto reclaim_boot_pages;
  324. }
  325. err = mlx5_pagealloc_start(dev);
  326. if (err) {
  327. dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
  328. goto reclaim_boot_pages;
  329. }
  330. err = mlx5_cmd_init_hca(dev);
  331. if (err) {
  332. dev_err(&pdev->dev, "init hca failed\n");
  333. goto err_pagealloc_stop;
  334. }
  335. mlx5_start_health_poll(dev);
  336. err = mlx5_cmd_query_hca_cap(dev, &dev->caps);
  337. if (err) {
  338. dev_err(&pdev->dev, "query hca failed\n");
  339. goto err_stop_poll;
  340. }
  341. err = mlx5_cmd_query_adapter(dev);
  342. if (err) {
  343. dev_err(&pdev->dev, "query adapter failed\n");
  344. goto err_stop_poll;
  345. }
  346. err = mlx5_enable_msix(dev);
  347. if (err) {
  348. dev_err(&pdev->dev, "enable msix failed\n");
  349. goto err_stop_poll;
  350. }
  351. err = mlx5_eq_init(dev);
  352. if (err) {
  353. dev_err(&pdev->dev, "failed to initialize eq\n");
  354. goto disable_msix;
  355. }
  356. err = mlx5_alloc_uuars(dev, &priv->uuari);
  357. if (err) {
  358. dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
  359. goto err_eq_cleanup;
  360. }
  361. err = mlx5_start_eqs(dev);
  362. if (err) {
  363. dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
  364. goto err_free_uar;
  365. }
  366. MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
  367. mlx5_init_cq_table(dev);
  368. mlx5_init_qp_table(dev);
  369. mlx5_init_srq_table(dev);
  370. return 0;
  371. err_free_uar:
  372. mlx5_free_uuars(dev, &priv->uuari);
  373. err_eq_cleanup:
  374. mlx5_eq_cleanup(dev);
  375. disable_msix:
  376. mlx5_disable_msix(dev);
  377. err_stop_poll:
  378. mlx5_stop_health_poll(dev);
  379. if (mlx5_cmd_teardown_hca(dev)) {
  380. dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
  381. return err;
  382. }
  383. err_pagealloc_stop:
  384. mlx5_pagealloc_stop(dev);
  385. reclaim_boot_pages:
  386. mlx5_reclaim_startup_pages(dev);
  387. err_disable_hca:
  388. mlx5_core_disable_hca(dev);
  389. err_pagealloc_cleanup:
  390. mlx5_pagealloc_cleanup(dev);
  391. mlx5_cmd_cleanup(dev);
  392. err_unmap:
  393. iounmap(dev->iseg);
  394. err_clr_master:
  395. pci_clear_master(dev->pdev);
  396. release_bar(dev->pdev);
  397. err_disable:
  398. pci_disable_device(dev->pdev);
  399. err_dbg:
  400. debugfs_remove(priv->dbg_root);
  401. return err;
  402. }
  403. EXPORT_SYMBOL(mlx5_dev_init);
  404. void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
  405. {
  406. struct mlx5_priv *priv = &dev->priv;
  407. mlx5_cleanup_srq_table(dev);
  408. mlx5_cleanup_qp_table(dev);
  409. mlx5_cleanup_cq_table(dev);
  410. mlx5_stop_eqs(dev);
  411. mlx5_free_uuars(dev, &priv->uuari);
  412. mlx5_eq_cleanup(dev);
  413. mlx5_disable_msix(dev);
  414. mlx5_stop_health_poll(dev);
  415. if (mlx5_cmd_teardown_hca(dev)) {
  416. dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
  417. return;
  418. }
  419. mlx5_pagealloc_stop(dev);
  420. mlx5_reclaim_startup_pages(dev);
  421. mlx5_core_disable_hca(dev);
  422. mlx5_pagealloc_cleanup(dev);
  423. mlx5_cmd_cleanup(dev);
  424. iounmap(dev->iseg);
  425. pci_clear_master(dev->pdev);
  426. release_bar(dev->pdev);
  427. pci_disable_device(dev->pdev);
  428. debugfs_remove(priv->dbg_root);
  429. }
  430. EXPORT_SYMBOL(mlx5_dev_cleanup);
  431. static int __init init(void)
  432. {
  433. int err;
  434. mlx5_register_debugfs();
  435. mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
  436. if (!mlx5_core_wq) {
  437. err = -ENOMEM;
  438. goto err_debug;
  439. }
  440. mlx5_health_init();
  441. return 0;
  442. mlx5_health_cleanup();
  443. err_debug:
  444. mlx5_unregister_debugfs();
  445. return err;
  446. }
  447. static void __exit cleanup(void)
  448. {
  449. mlx5_health_cleanup();
  450. destroy_workqueue(mlx5_core_wq);
  451. mlx5_unregister_debugfs();
  452. }
  453. module_init(init);
  454. module_exit(cleanup);