PageRenderTime 97ms CodeModel.GetById 19ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/staging/octeon/cvmx-cmd-queue.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 306 lines | 185 code | 23 blank | 98 comment | 45 complexity | 3f48682c9d26e3ddd8768bfc18e4cd82 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /***********************license start***************
  2. * Author: Cavium Networks
  3. *
  4. * Contact: support@caviumnetworks.com
  5. * This file is part of the OCTEON SDK
  6. *
  7. * Copyright (c) 2003-2008 Cavium Networks
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more
  17. * details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this file; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22. * or visit http://www.gnu.org/licenses/.
  23. *
  24. * This file may also be available under a different license from Cavium.
  25. * Contact Cavium Networks for more information
  26. ***********************license end**************************************/
  27. /*
  28. * Support functions for managing command queues used for
  29. * various hardware blocks.
  30. */
  31. #include <linux/kernel.h>
  32. #include <asm/octeon/octeon.h>
  33. #include "cvmx-config.h"
  34. #include "cvmx-fpa.h"
  35. #include "cvmx-cmd-queue.h"
  36. #include <asm/octeon/cvmx-npei-defs.h>
  37. #include <asm/octeon/cvmx-pexp-defs.h>
  38. #include "cvmx-pko-defs.h"
  39. /**
  40. * This application uses this pointer to access the global queue
  41. * state. It points to a bootmem named block.
  42. */
  43. __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
  44. /**
  45. * Initialize the Global queue state pointer.
  46. *
  47. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  48. */
  49. static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
  50. {
  51. char *alloc_name = "cvmx_cmd_queues";
  52. #if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
  53. extern uint64_t octeon_reserve32_memory;
  54. #endif
  55. if (likely(__cvmx_cmd_queue_state_ptr))
  56. return CVMX_CMD_QUEUE_SUCCESS;
  57. #if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
  58. if (octeon_reserve32_memory)
  59. __cvmx_cmd_queue_state_ptr =
  60. cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
  61. octeon_reserve32_memory,
  62. octeon_reserve32_memory +
  63. (CONFIG_CAVIUM_RESERVE32 <<
  64. 20) - 1, 128, alloc_name);
  65. else
  66. #endif
  67. __cvmx_cmd_queue_state_ptr =
  68. cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
  69. 128,
  70. alloc_name);
  71. if (__cvmx_cmd_queue_state_ptr)
  72. memset(__cvmx_cmd_queue_state_ptr, 0,
  73. sizeof(*__cvmx_cmd_queue_state_ptr));
  74. else {
  75. struct cvmx_bootmem_named_block_desc *block_desc =
  76. cvmx_bootmem_find_named_block(alloc_name);
  77. if (block_desc)
  78. __cvmx_cmd_queue_state_ptr =
  79. cvmx_phys_to_ptr(block_desc->base_addr);
  80. else {
  81. cvmx_dprintf
  82. ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
  83. alloc_name);
  84. return CVMX_CMD_QUEUE_NO_MEMORY;
  85. }
  86. }
  87. return CVMX_CMD_QUEUE_SUCCESS;
  88. }
  89. /**
  90. * Initialize a command queue for use. The initial FPA buffer is
  91. * allocated and the hardware unit is configured to point to the
  92. * new command queue.
  93. *
  94. * @queue_id: Hardware command queue to initialize.
  95. * @max_depth: Maximum outstanding commands that can be queued.
  96. * @fpa_pool: FPA pool the command queues should come from.
  97. * @pool_size: Size of each buffer in the FPA pool (bytes)
  98. *
  99. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  100. */
  101. cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
  102. int max_depth, int fpa_pool,
  103. int pool_size)
  104. {
  105. __cvmx_cmd_queue_state_t *qstate;
  106. cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
  107. if (result != CVMX_CMD_QUEUE_SUCCESS)
  108. return result;
  109. qstate = __cvmx_cmd_queue_get_state(queue_id);
  110. if (qstate == NULL)
  111. return CVMX_CMD_QUEUE_INVALID_PARAM;
  112. /*
  113. * We artificially limit max_depth to 1<<20 words. It is an
  114. * arbitrary limit.
  115. */
  116. if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
  117. if ((max_depth < 0) || (max_depth > 1 << 20))
  118. return CVMX_CMD_QUEUE_INVALID_PARAM;
  119. } else if (max_depth != 0)
  120. return CVMX_CMD_QUEUE_INVALID_PARAM;
  121. if ((fpa_pool < 0) || (fpa_pool > 7))
  122. return CVMX_CMD_QUEUE_INVALID_PARAM;
  123. if ((pool_size < 128) || (pool_size > 65536))
  124. return CVMX_CMD_QUEUE_INVALID_PARAM;
  125. /* See if someone else has already initialized the queue */
  126. if (qstate->base_ptr_div128) {
  127. if (max_depth != (int)qstate->max_depth) {
  128. cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
  129. "Queue already initialized with different "
  130. "max_depth (%d).\n",
  131. (int)qstate->max_depth);
  132. return CVMX_CMD_QUEUE_INVALID_PARAM;
  133. }
  134. if (fpa_pool != qstate->fpa_pool) {
  135. cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
  136. "Queue already initialized with different "
  137. "FPA pool (%u).\n",
  138. qstate->fpa_pool);
  139. return CVMX_CMD_QUEUE_INVALID_PARAM;
  140. }
  141. if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
  142. cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
  143. "Queue already initialized with different "
  144. "FPA pool size (%u).\n",
  145. (qstate->pool_size_m1 + 1) << 3);
  146. return CVMX_CMD_QUEUE_INVALID_PARAM;
  147. }
  148. CVMX_SYNCWS;
  149. return CVMX_CMD_QUEUE_ALREADY_SETUP;
  150. } else {
  151. union cvmx_fpa_ctl_status status;
  152. void *buffer;
  153. status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
  154. if (!status.s.enb) {
  155. cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
  156. "FPA is not enabled.\n");
  157. return CVMX_CMD_QUEUE_NO_MEMORY;
  158. }
  159. buffer = cvmx_fpa_alloc(fpa_pool);
  160. if (buffer == NULL) {
  161. cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
  162. "Unable to allocate initial buffer.\n");
  163. return CVMX_CMD_QUEUE_NO_MEMORY;
  164. }
  165. memset(qstate, 0, sizeof(*qstate));
  166. qstate->max_depth = max_depth;
  167. qstate->fpa_pool = fpa_pool;
  168. qstate->pool_size_m1 = (pool_size >> 3) - 1;
  169. qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
  170. /*
  171. * We zeroed the now serving field so we need to also
  172. * zero the ticket.
  173. */
  174. __cvmx_cmd_queue_state_ptr->
  175. ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
  176. CVMX_SYNCWS;
  177. return CVMX_CMD_QUEUE_SUCCESS;
  178. }
  179. }
  180. /**
  181. * Shutdown a queue a free it's command buffers to the FPA. The
  182. * hardware connected to the queue must be stopped before this
  183. * function is called.
  184. *
  185. * @queue_id: Queue to shutdown
  186. *
  187. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  188. */
  189. cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
  190. {
  191. __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
  192. if (qptr == NULL) {
  193. cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
  194. "get queue information.\n");
  195. return CVMX_CMD_QUEUE_INVALID_PARAM;
  196. }
  197. if (cvmx_cmd_queue_length(queue_id) > 0) {
  198. cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
  199. "has data in it.\n");
  200. return CVMX_CMD_QUEUE_FULL;
  201. }
  202. __cvmx_cmd_queue_lock(queue_id, qptr);
  203. if (qptr->base_ptr_div128) {
  204. cvmx_fpa_free(cvmx_phys_to_ptr
  205. ((uint64_t) qptr->base_ptr_div128 << 7),
  206. qptr->fpa_pool, 0);
  207. qptr->base_ptr_div128 = 0;
  208. }
  209. __cvmx_cmd_queue_unlock(qptr);
  210. return CVMX_CMD_QUEUE_SUCCESS;
  211. }
  212. /**
  213. * Return the number of command words pending in the queue. This
  214. * function may be relatively slow for some hardware units.
  215. *
  216. * @queue_id: Hardware command queue to query
  217. *
  218. * Returns Number of outstanding commands
  219. */
  220. int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
  221. {
  222. if (CVMX_ENABLE_PARAMETER_CHECKING) {
  223. if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
  224. return CVMX_CMD_QUEUE_INVALID_PARAM;
  225. }
  226. /*
  227. * The cast is here so gcc with check that all values in the
  228. * cvmx_cmd_queue_id_t enumeration are here.
  229. */
  230. switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
  231. case CVMX_CMD_QUEUE_PKO_BASE:
  232. /*
  233. * FIXME: Need atomic lock on
  234. * CVMX_PKO_REG_READ_IDX. Right now we are normally
  235. * called with the queue lock, so that is a SLIGHT
  236. * amount of protection.
  237. */
  238. cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
  239. if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
  240. union cvmx_pko_mem_debug9 debug9;
  241. debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
  242. return debug9.cn38xx.doorbell;
  243. } else {
  244. union cvmx_pko_mem_debug8 debug8;
  245. debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
  246. return debug8.cn58xx.doorbell;
  247. }
  248. case CVMX_CMD_QUEUE_ZIP:
  249. case CVMX_CMD_QUEUE_DFA:
  250. case CVMX_CMD_QUEUE_RAID:
  251. /* FIXME: Implement other lengths */
  252. return 0;
  253. case CVMX_CMD_QUEUE_DMA_BASE:
  254. {
  255. union cvmx_npei_dmax_counts dmax_counts;
  256. dmax_counts.u64 =
  257. cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
  258. (queue_id & 0x7));
  259. return dmax_counts.s.dbell;
  260. }
  261. case CVMX_CMD_QUEUE_END:
  262. return CVMX_CMD_QUEUE_INVALID_PARAM;
  263. }
  264. return CVMX_CMD_QUEUE_INVALID_PARAM;
  265. }
  266. /**
  267. * Return the command buffer to be written to. The purpose of this
  268. * function is to allow CVMX routine access t othe low level buffer
  269. * for initial hardware setup. User applications should not call this
  270. * function directly.
  271. *
  272. * @queue_id: Command queue to query
  273. *
  274. * Returns Command buffer or NULL on failure
  275. */
  276. void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
  277. {
  278. __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
  279. if (qptr && qptr->base_ptr_div128)
  280. return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
  281. else
  282. return NULL;
  283. }