PageRenderTime 141ms CodeModel.GetById 3ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/staging/tidspbridge/pmgr/cmm.c

https://bitbucket.org/wisechild/galaxy-nexus
C | 1009 lines | 636 code | 114 blank | 259 comment | 143 complexity | 48ffab649f1ccf8db818132b57ef47bc MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * cmm.c
  3. *
  4. * DSP-BIOS Bridge driver support functions for TI OMAP processors.
  5. *
  6. * The Communication(Shared) Memory Management(CMM) module provides
  7. * shared memory management services for DSP/BIOS Bridge data streaming
  8. * and messaging.
  9. *
  10. * Multiple shared memory segments can be registered with CMM.
  11. * Each registered SM segment is represented by a SM "allocator" that
  12. * describes a block of physically contiguous shared memory used for
  13. * future allocations by CMM.
  14. *
  15. * Memory is coalesced back to the appropriate heap when a buffer is
  16. * freed.
  17. *
  18. * Notes:
  19. * Va: Virtual address.
  20. * Pa: Physical or kernel system address.
  21. *
  22. * Copyright (C) 2005-2006 Texas Instruments, Inc.
  23. *
  24. * This package is free software; you can redistribute it and/or modify
  25. * it under the terms of the GNU General Public License version 2 as
  26. * published by the Free Software Foundation.
  27. *
  28. * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  29. * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  30. * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  31. */
  32. #include <linux/types.h>
  33. #include <linux/list.h>
  34. /* ----------------------------------- DSP/BIOS Bridge */
  35. #include <dspbridge/dbdefs.h>
  36. /* ----------------------------------- Trace & Debug */
  37. #include <dspbridge/dbc.h>
  38. /* ----------------------------------- OS Adaptation Layer */
  39. #include <dspbridge/sync.h>
  40. /* ----------------------------------- Platform Manager */
  41. #include <dspbridge/dev.h>
  42. #include <dspbridge/proc.h>
  43. /* ----------------------------------- This */
  44. #include <dspbridge/cmm.h>
  45. /* ----------------------------------- Defines, Data Structures, Typedefs */
  46. #define NEXT_PA(pnode) (pnode->pa + pnode->size)
  47. /* Other bus/platform translations */
  48. #define DSPPA2GPPPA(base, x, y) ((x)+(y))
  49. #define GPPPA2DSPPA(base, x, y) ((x)-(y))
  50. /*
  51. * Allocators define a block of contiguous memory used for future allocations.
  52. *
  53. * sma - shared memory allocator.
  54. * vma - virtual memory allocator.(not used).
  55. */
  56. struct cmm_allocator { /* sma */
  57. unsigned int shm_base; /* Start of physical SM block */
  58. u32 sm_size; /* Size of SM block in bytes */
  59. unsigned int vm_base; /* Start of VM block. (Dev driver
  60. * context for 'sma') */
  61. u32 dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
  62. * SM space */
  63. s8 c_factor; /* DSPPa to GPPPa Conversion Factor */
  64. unsigned int dsp_base; /* DSP virt base byte address */
  65. u32 dsp_size; /* DSP seg size in bytes */
  66. struct cmm_object *cmm_mgr; /* back ref to parent mgr */
  67. /* node list of available memory */
  68. struct list_head free_list;
  69. /* node list of memory in use */
  70. struct list_head in_use_list;
  71. };
  72. struct cmm_xlator { /* Pa<->Va translator object */
  73. /* CMM object this translator associated */
  74. struct cmm_object *cmm_mgr;
  75. /*
  76. * Client process virtual base address that corresponds to phys SM
  77. * base address for translator's seg_id.
  78. * Only 1 segment ID currently supported.
  79. */
  80. unsigned int virt_base; /* virtual base address */
  81. u32 virt_size; /* size of virt space in bytes */
  82. u32 seg_id; /* Segment Id */
  83. };
  84. /* CMM Mgr */
  85. struct cmm_object {
  86. /*
  87. * Cmm Lock is used to serialize access mem manager for multi-threads.
  88. */
  89. struct mutex cmm_lock; /* Lock to access cmm mgr */
  90. struct list_head node_free_list; /* Free list of memory nodes */
  91. u32 min_block_size; /* Min SM block; default 16 bytes */
  92. u32 page_size; /* Memory Page size (1k/4k) */
  93. /* GPP SM segment ptrs */
  94. struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
  95. };
  96. /* Default CMM Mgr attributes */
  97. static struct cmm_mgrattrs cmm_dfltmgrattrs = {
  98. /* min_block_size, min block size(bytes) allocated by cmm mgr */
  99. 16
  100. };
  101. /* Default allocation attributes */
  102. static struct cmm_attrs cmm_dfltalctattrs = {
  103. 1 /* seg_id, default segment Id for allocator */
  104. };
  105. /* Address translator default attrs */
  106. static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
  107. /* seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
  108. 1,
  109. 0, /* dsp_bufs */
  110. 0, /* dsp_buf_size */
  111. NULL, /* vm_base */
  112. 0, /* vm_size */
  113. };
  114. /* SM node representing a block of memory. */
  115. struct cmm_mnode {
  116. struct list_head link; /* must be 1st element */
  117. u32 pa; /* Phys addr */
  118. u32 va; /* Virtual address in device process context */
  119. u32 size; /* SM block size in bytes */
  120. u32 client_proc; /* Process that allocated this mem block */
  121. };
  122. /* ----------------------------------- Globals */
  123. static u32 refs; /* module reference count */
  124. /* ----------------------------------- Function Prototypes */
  125. static void add_to_free_list(struct cmm_allocator *allocator,
  126. struct cmm_mnode *pnode);
  127. static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
  128. u32 ul_seg_id);
  129. static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
  130. u32 usize);
  131. static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
  132. u32 dw_va, u32 ul_size);
  133. /* get available slot for new allocator */
  134. static s32 get_slot(struct cmm_object *cmm_mgr_obj);
  135. static void un_register_gppsm_seg(struct cmm_allocator *psma);
  136. /*
  137. * ======== cmm_calloc_buf ========
  138. * Purpose:
  139. * Allocate a SM buffer, zero contents, and return the physical address
  140. * and optional driver context virtual address(pp_buf_va).
  141. *
  142. * The freelist is sorted in increasing size order. Get the first
  143. * block that satifies the request and sort the remaining back on
  144. * the freelist; if large enough. The kept block is placed on the
  145. * inUseList.
  146. */
  147. void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
  148. struct cmm_attrs *pattrs, void **pp_buf_va)
  149. {
  150. struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
  151. void *buf_pa = NULL;
  152. struct cmm_mnode *pnode = NULL;
  153. struct cmm_mnode *new_node = NULL;
  154. struct cmm_allocator *allocator = NULL;
  155. u32 delta_size;
  156. u8 *pbyte = NULL;
  157. s32 cnt;
  158. if (pattrs == NULL)
  159. pattrs = &cmm_dfltalctattrs;
  160. if (pp_buf_va != NULL)
  161. *pp_buf_va = NULL;
  162. if (cmm_mgr_obj && (usize != 0)) {
  163. if (pattrs->seg_id > 0) {
  164. /* SegId > 0 is SM */
  165. /* get the allocator object for this segment id */
  166. allocator =
  167. get_allocator(cmm_mgr_obj, pattrs->seg_id);
  168. /* keep block size a multiple of min_block_size */
  169. usize =
  170. ((usize - 1) & ~(cmm_mgr_obj->min_block_size -
  171. 1))
  172. + cmm_mgr_obj->min_block_size;
  173. mutex_lock(&cmm_mgr_obj->cmm_lock);
  174. pnode = get_free_block(allocator, usize);
  175. }
  176. if (pnode) {
  177. delta_size = (pnode->size - usize);
  178. if (delta_size >= cmm_mgr_obj->min_block_size) {
  179. /* create a new block with the leftovers and
  180. * add to freelist */
  181. new_node =
  182. get_node(cmm_mgr_obj, pnode->pa + usize,
  183. pnode->va + usize,
  184. (u32) delta_size);
  185. /* leftovers go free */
  186. add_to_free_list(allocator, new_node);
  187. /* adjust our node's size */
  188. pnode->size = usize;
  189. }
  190. /* Tag node with client process requesting allocation
  191. * We'll need to free up a process's alloc'd SM if the
  192. * client process goes away.
  193. */
  194. /* Return TGID instead of process handle */
  195. pnode->client_proc = current->tgid;
  196. /* put our node on InUse list */
  197. list_add_tail(&pnode->link, &allocator->in_use_list);
  198. buf_pa = (void *)pnode->pa; /* physical address */
  199. /* clear mem */
  200. pbyte = (u8 *) pnode->va;
  201. for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
  202. *pbyte = 0;
  203. if (pp_buf_va != NULL) {
  204. /* Virtual address */
  205. *pp_buf_va = (void *)pnode->va;
  206. }
  207. }
  208. mutex_unlock(&cmm_mgr_obj->cmm_lock);
  209. }
  210. return buf_pa;
  211. }
  212. /*
  213. * ======== cmm_create ========
  214. * Purpose:
  215. * Create a communication memory manager object.
  216. */
  217. int cmm_create(struct cmm_object **ph_cmm_mgr,
  218. struct dev_object *hdev_obj,
  219. const struct cmm_mgrattrs *mgr_attrts)
  220. {
  221. struct cmm_object *cmm_obj = NULL;
  222. int status = 0;
  223. DBC_REQUIRE(refs > 0);
  224. DBC_REQUIRE(ph_cmm_mgr != NULL);
  225. *ph_cmm_mgr = NULL;
  226. /* create, zero, and tag a cmm mgr object */
  227. cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
  228. if (!cmm_obj)
  229. return -ENOMEM;
  230. if (mgr_attrts == NULL)
  231. mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
  232. /* 4 bytes minimum */
  233. DBC_ASSERT(mgr_attrts->min_block_size >= 4);
  234. /* save away smallest block allocation for this cmm mgr */
  235. cmm_obj->min_block_size = mgr_attrts->min_block_size;
  236. cmm_obj->page_size = PAGE_SIZE;
  237. /* create node free list */
  238. INIT_LIST_HEAD(&cmm_obj->node_free_list);
  239. mutex_init(&cmm_obj->cmm_lock);
  240. *ph_cmm_mgr = cmm_obj;
  241. return status;
  242. }
  243. /*
  244. * ======== cmm_destroy ========
  245. * Purpose:
  246. * Release the communication memory manager resources.
  247. */
  248. int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
  249. {
  250. struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
  251. struct cmm_info temp_info;
  252. int status = 0;
  253. s32 slot_seg;
  254. struct cmm_mnode *node, *tmp;
  255. DBC_REQUIRE(refs > 0);
  256. if (!hcmm_mgr) {
  257. status = -EFAULT;
  258. return status;
  259. }
  260. mutex_lock(&cmm_mgr_obj->cmm_lock);
  261. /* If not force then fail if outstanding allocations exist */
  262. if (!force) {
  263. /* Check for outstanding memory allocations */
  264. status = cmm_get_info(hcmm_mgr, &temp_info);
  265. if (!status) {
  266. if (temp_info.total_in_use_cnt > 0) {
  267. /* outstanding allocations */
  268. status = -EPERM;
  269. }
  270. }
  271. }
  272. if (!status) {
  273. /* UnRegister SM allocator */
  274. for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
  275. if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
  276. un_register_gppsm_seg
  277. (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
  278. /* Set slot to NULL for future reuse */
  279. cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
  280. }
  281. }
  282. }
  283. list_for_each_entry_safe(node, tmp, &cmm_mgr_obj->node_free_list,
  284. link) {
  285. list_del(&node->link);
  286. kfree(node);
  287. }
  288. mutex_unlock(&cmm_mgr_obj->cmm_lock);
  289. if (!status) {
  290. /* delete CS & cmm mgr object */
  291. mutex_destroy(&cmm_mgr_obj->cmm_lock);
  292. kfree(cmm_mgr_obj);
  293. }
  294. return status;
  295. }
  296. /*
  297. * ======== cmm_exit ========
  298. * Purpose:
  299. * Discontinue usage of module; free resources when reference count
  300. * reaches 0.
  301. */
  302. void cmm_exit(void)
  303. {
  304. DBC_REQUIRE(refs > 0);
  305. refs--;
  306. }
  307. /*
  308. * ======== cmm_free_buf ========
  309. * Purpose:
  310. * Free the given buffer.
  311. */
  312. int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
  313. {
  314. struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
  315. int status = -EFAULT;
  316. struct cmm_mnode *curr, *tmp;
  317. struct cmm_allocator *allocator;
  318. struct cmm_attrs *pattrs;
  319. DBC_REQUIRE(refs > 0);
  320. DBC_REQUIRE(buf_pa != NULL);
  321. if (ul_seg_id == 0) {
  322. pattrs = &cmm_dfltalctattrs;
  323. ul_seg_id = pattrs->seg_id;
  324. }
  325. if (!hcmm_mgr || !(ul_seg_id > 0)) {
  326. status = -EFAULT;
  327. return status;
  328. }
  329. allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
  330. if (!allocator)
  331. return status;
  332. mutex_lock(&cmm_mgr_obj->cmm_lock);
  333. list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) {
  334. if (curr->pa == (u32) buf_pa) {
  335. list_del(&curr->link);
  336. add_to_free_list(allocator, curr);
  337. status = 0;
  338. break;
  339. }
  340. }
  341. mutex_unlock(&cmm_mgr_obj->cmm_lock);
  342. return status;
  343. }
  344. /*
  345. * ======== cmm_get_handle ========
  346. * Purpose:
  347. * Return the communication memory manager object for this device.
  348. * This is typically called from the client process.
  349. */
  350. int cmm_get_handle(void *hprocessor, struct cmm_object ** ph_cmm_mgr)
  351. {
  352. int status = 0;
  353. struct dev_object *hdev_obj;
  354. DBC_REQUIRE(refs > 0);
  355. DBC_REQUIRE(ph_cmm_mgr != NULL);
  356. if (hprocessor != NULL)
  357. status = proc_get_dev_object(hprocessor, &hdev_obj);
  358. else
  359. hdev_obj = dev_get_first(); /* default */
  360. if (!status)
  361. status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
  362. return status;
  363. }
  364. /*
  365. * ======== cmm_get_info ========
  366. * Purpose:
  367. * Return the current memory utilization information.
  368. */
  369. int cmm_get_info(struct cmm_object *hcmm_mgr,
  370. struct cmm_info *cmm_info_obj)
  371. {
  372. struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
  373. u32 ul_seg;
  374. int status = 0;
  375. struct cmm_allocator *altr;
  376. struct cmm_mnode *curr;
  377. DBC_REQUIRE(cmm_info_obj != NULL);
  378. if (!hcmm_mgr) {
  379. status = -EFAULT;
  380. return status;
  381. }
  382. mutex_lock(&cmm_mgr_obj->cmm_lock);
  383. cmm_info_obj->num_gppsm_segs = 0; /* # of SM segments */
  384. /* Total # of outstanding alloc */
  385. cmm_info_obj->total_in_use_cnt = 0;
  386. /* min block size */
  387. cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size;
  388. /* check SM memory segments */
  389. for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
  390. /* get the allocator object for this segment id */
  391. altr = get_allocator(cmm_mgr_obj, ul_seg);
  392. if (!altr)
  393. continue;
  394. cmm_info_obj->num_gppsm_segs++;
  395. cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa =
  396. altr->shm_base - altr->dsp_size;
  397. cmm_info_obj->seg_info[ul_seg - 1].total_seg_size =
  398. altr->dsp_size + altr->sm_size;
  399. cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa =
  400. altr->shm_base;
  401. cmm_info_obj->seg_info[ul_seg - 1].gpp_size =
  402. altr->sm_size;
  403. cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va =
  404. altr->dsp_base;
  405. cmm_info_obj->seg_info[ul_seg - 1].dsp_size =
  406. altr->dsp_size;
  407. cmm_info_obj->seg_info[ul_seg - 1].seg_base_va =
  408. altr->vm_base - altr->dsp_size;
  409. cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0;
  410. list_for_each_entry(curr, &altr->in_use_list, link) {
  411. cmm_info_obj->total_in_use_cnt++;
  412. cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++;
  413. }
  414. }
  415. mutex_unlock(&cmm_mgr_obj->cmm_lock);
  416. return status;
  417. }
  418. /*
  419. * ======== cmm_init ========
  420. * Purpose:
  421. * Initializes private state of CMM module.
  422. */
  423. bool cmm_init(void)
  424. {
  425. bool ret = true;
  426. DBC_REQUIRE(refs >= 0);
  427. if (ret)
  428. refs++;
  429. DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
  430. return ret;
  431. }
  432. /*
  433. * ======== cmm_register_gppsm_seg ========
  434. * Purpose:
  435. * Register a block of SM with the CMM to be used for later GPP SM
  436. * allocations.
  437. */
  438. int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
  439. u32 dw_gpp_base_pa, u32 ul_size,
  440. u32 dsp_addr_offset, s8 c_factor,
  441. u32 dw_dsp_base, u32 ul_dsp_size,
  442. u32 *sgmt_id, u32 gpp_base_va)
  443. {
  444. struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
  445. struct cmm_allocator *psma = NULL;
  446. int status = 0;
  447. struct cmm_mnode *new_node;
  448. s32 slot_seg;
  449. DBC_REQUIRE(ul_size > 0);
  450. DBC_REQUIRE(sgmt_id != NULL);
  451. DBC_REQUIRE(dw_gpp_base_pa != 0);
  452. DBC_REQUIRE(gpp_base_va != 0);
  453. DBC_REQUIRE((c_factor <= CMM_ADDTODSPPA) &&
  454. (c_factor >= CMM_SUBFROMDSPPA));
  455. dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x "
  456. "dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
  457. __func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
  458. dw_dsp_base, ul_dsp_size, gpp_base_va);
  459. if (!hcmm_mgr)
  460. return -EFAULT;
  461. /* make sure we have room for another allocator */
  462. mutex_lock(&cmm_mgr_obj->cmm_lock);
  463. slot_seg = get_slot(cmm_mgr_obj);
  464. if (slot_seg < 0) {
  465. status = -EPERM;
  466. goto func_end;
  467. }
  468. /* Check if input ul_size is big enough to alloc at least one block */
  469. if (ul_size < cmm_mgr_obj->min_block_size) {
  470. status = -EINVAL;
  471. goto func_end;
  472. }
  473. /* create, zero, and tag an SM allocator object */
  474. psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
  475. if (!psma) {
  476. status = -ENOMEM;
  477. goto func_end;
  478. }
  479. psma->cmm_mgr = hcmm_mgr; /* ref to parent */
  480. psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
  481. psma->sm_size = ul_size; /* SM segment size in bytes */
  482. psma->vm_base = gpp_base_va;
  483. psma->dsp_phys_addr_offset = dsp_addr_offset;
  484. psma->c_factor = c_factor;
  485. psma->dsp_base = dw_dsp_base;
  486. psma->dsp_size = ul_dsp_size;
  487. if (psma->vm_base == 0) {
  488. status = -EPERM;
  489. goto func_end;
  490. }
  491. /* return the actual segment identifier */
  492. *sgmt_id = (u32) slot_seg + 1;
  493. INIT_LIST_HEAD(&psma->free_list);
  494. INIT_LIST_HEAD(&psma->in_use_list);
  495. /* Get a mem node for this hunk-o-memory */
  496. new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
  497. psma->vm_base, ul_size);
  498. /* Place node on the SM allocator's free list */
  499. if (new_node) {
  500. list_add_tail(&new_node->link, &psma->free_list);
  501. } else {
  502. status = -ENOMEM;
  503. goto func_end;
  504. }
  505. /* make entry */
  506. cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
  507. func_end:
  508. /* Cleanup allocator */
  509. if (status && psma)
  510. un_register_gppsm_seg(psma);
  511. mutex_unlock(&cmm_mgr_obj->cmm_lock);
  512. return status;
  513. }
  514. /*
  515. * ======== cmm_un_register_gppsm_seg ========
  516. * Purpose:
  517. * UnRegister GPP SM segments with the CMM.
  518. */
  519. int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
  520. u32 ul_seg_id)
  521. {
  522. struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
  523. int status = 0;
  524. struct cmm_allocator *psma;
  525. u32 ul_id = ul_seg_id;
  526. DBC_REQUIRE(ul_seg_id > 0);
  527. if (!hcmm_mgr)
  528. return -EFAULT;
  529. if (ul_seg_id == CMM_ALLSEGMENTS)
  530. ul_id = 1;
  531. if ((ul_id <= 0) || (ul_id > CMM_MAXGPPSEGS))
  532. return -EINVAL;
  533. /*
  534. * FIXME: CMM_MAXGPPSEGS == 1. why use a while cycle? Seems to me like
  535. * the ul_seg_id is not needed here. It must be always 1.
  536. */
  537. while (ul_id <= CMM_MAXGPPSEGS) {
  538. mutex_lock(&cmm_mgr_obj->cmm_lock);
  539. /* slot = seg_id-1 */
  540. psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
  541. if (psma != NULL) {
  542. un_register_gppsm_seg(psma);
  543. /* Set alctr ptr to NULL for future reuse */
  544. cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1] = NULL;
  545. } else if (ul_seg_id != CMM_ALLSEGMENTS) {
  546. status = -EPERM;
  547. }
  548. mutex_unlock(&cmm_mgr_obj->cmm_lock);
  549. if (ul_seg_id != CMM_ALLSEGMENTS)
  550. break;
  551. ul_id++;
  552. } /* end while */
  553. return status;
  554. }
  555. /*
  556. * ======== un_register_gppsm_seg ========
  557. * Purpose:
  558. * UnRegister the SM allocator by freeing all its resources and
  559. * nulling cmm mgr table entry.
  560. * Note:
  561. * This routine is always called within cmm lock crit sect.
  562. */
  563. static void un_register_gppsm_seg(struct cmm_allocator *psma)
  564. {
  565. struct cmm_mnode *curr, *tmp;
  566. DBC_REQUIRE(psma != NULL);
  567. /* free nodes on free list */
  568. list_for_each_entry_safe(curr, tmp, &psma->free_list, link) {
  569. list_del(&curr->link);
  570. kfree(curr);
  571. }
  572. /* free nodes on InUse list */
  573. list_for_each_entry_safe(curr, tmp, &psma->in_use_list, link) {
  574. list_del(&curr->link);
  575. kfree(curr);
  576. }
  577. if ((void *)psma->vm_base != NULL)
  578. MEM_UNMAP_LINEAR_ADDRESS((void *)psma->vm_base);
  579. /* Free allocator itself */
  580. kfree(psma);
  581. }
  582. /*
  583. * ======== get_slot ========
  584. * Purpose:
  585. * An available slot # is returned. Returns negative on failure.
  586. */
  587. static s32 get_slot(struct cmm_object *cmm_mgr_obj)
  588. {
  589. s32 slot_seg = -1; /* neg on failure */
  590. DBC_REQUIRE(cmm_mgr_obj != NULL);
  591. /* get first available slot in cmm mgr SMSegTab[] */
  592. for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
  593. if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
  594. break;
  595. }
  596. if (slot_seg == CMM_MAXGPPSEGS)
  597. slot_seg = -1; /* failed */
  598. return slot_seg;
  599. }
  600. /*
  601. * ======== get_node ========
  602. * Purpose:
  603. * Get a memory node from freelist or create a new one.
  604. */
  605. static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
  606. u32 dw_va, u32 ul_size)
  607. {
  608. struct cmm_mnode *pnode;
  609. DBC_REQUIRE(cmm_mgr_obj != NULL);
  610. DBC_REQUIRE(dw_pa != 0);
  611. DBC_REQUIRE(dw_va != 0);
  612. DBC_REQUIRE(ul_size != 0);
  613. /* Check cmm mgr's node freelist */
  614. if (list_empty(&cmm_mgr_obj->node_free_list)) {
  615. pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
  616. if (!pnode)
  617. return NULL;
  618. } else {
  619. /* surely a valid element */
  620. pnode = list_first_entry(&cmm_mgr_obj->node_free_list,
  621. struct cmm_mnode, link);
  622. list_del_init(&pnode->link);
  623. }
  624. pnode->pa = dw_pa;
  625. pnode->va = dw_va;
  626. pnode->size = ul_size;
  627. return pnode;
  628. }
  629. /*
  630. * ======== delete_node ========
  631. * Purpose:
  632. * Put a memory node on the cmm nodelist for later use.
  633. * Doesn't actually delete the node. Heap thrashing friendly.
  634. */
  635. static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
  636. {
  637. DBC_REQUIRE(pnode != NULL);
  638. list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list);
  639. }
  640. /*
  641. * ====== get_free_block ========
  642. * Purpose:
  643. * Scan the free block list and return the first block that satisfies
  644. * the size.
  645. */
  646. static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
  647. u32 usize)
  648. {
  649. struct cmm_mnode *node, *tmp;
  650. if (!allocator)
  651. return NULL;
  652. list_for_each_entry_safe(node, tmp, &allocator->free_list, link) {
  653. if (usize <= node->size) {
  654. list_del(&node->link);
  655. return node;
  656. }
  657. }
  658. return NULL;
  659. }
  660. /*
  661. * ======== add_to_free_list ========
  662. * Purpose:
  663. * Coalesce node into the freelist in ascending size order.
  664. */
  665. static void add_to_free_list(struct cmm_allocator *allocator,
  666. struct cmm_mnode *node)
  667. {
  668. struct cmm_mnode *curr;
  669. if (!node) {
  670. pr_err("%s: failed - node is NULL\n", __func__);
  671. return;
  672. }
  673. list_for_each_entry(curr, &allocator->free_list, link) {
  674. if (NEXT_PA(curr) == node->pa) {
  675. curr->size += node->size;
  676. delete_node(allocator->cmm_mgr, node);
  677. return;
  678. }
  679. if (curr->pa == NEXT_PA(node)) {
  680. curr->pa = node->pa;
  681. curr->va = node->va;
  682. curr->size += node->size;
  683. delete_node(allocator->cmm_mgr, node);
  684. return;
  685. }
  686. }
  687. list_for_each_entry(curr, &allocator->free_list, link) {
  688. if (curr->size >= node->size) {
  689. list_add_tail(&node->link, &curr->link);
  690. return;
  691. }
  692. }
  693. list_add_tail(&node->link, &allocator->free_list);
  694. }
  695. /*
  696. * ======== get_allocator ========
  697. * Purpose:
  698. * Return the allocator for the given SM Segid.
  699. * SegIds: 1,2,3..max.
  700. */
  701. static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
  702. u32 ul_seg_id)
  703. {
  704. DBC_REQUIRE(cmm_mgr_obj != NULL);
  705. DBC_REQUIRE((ul_seg_id > 0) && (ul_seg_id <= CMM_MAXGPPSEGS));
  706. return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
  707. }
  708. /*
  709. * The CMM_Xlator[xxx] routines below are used by Node and Stream
  710. * to perform SM address translation to the client process address space.
  711. * A "translator" object is created by a node/stream for each SM seg used.
  712. */
  713. /*
  714. * ======== cmm_xlator_create ========
  715. * Purpose:
  716. * Create an address translator object.
  717. */
  718. int cmm_xlator_create(struct cmm_xlatorobject **xlator,
  719. struct cmm_object *hcmm_mgr,
  720. struct cmm_xlatorattrs *xlator_attrs)
  721. {
  722. struct cmm_xlator *xlator_object = NULL;
  723. int status = 0;
  724. DBC_REQUIRE(refs > 0);
  725. DBC_REQUIRE(xlator != NULL);
  726. DBC_REQUIRE(hcmm_mgr != NULL);
  727. *xlator = NULL;
  728. if (xlator_attrs == NULL)
  729. xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */
  730. xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
  731. if (xlator_object != NULL) {
  732. xlator_object->cmm_mgr = hcmm_mgr; /* ref back to CMM */
  733. /* SM seg_id */
  734. xlator_object->seg_id = xlator_attrs->seg_id;
  735. } else {
  736. status = -ENOMEM;
  737. }
  738. if (!status)
  739. *xlator = (struct cmm_xlatorobject *)xlator_object;
  740. return status;
  741. }
  742. /*
  743. * ======== cmm_xlator_alloc_buf ========
  744. */
  745. void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
  746. u32 pa_size)
  747. {
  748. struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
  749. void *pbuf = NULL;
  750. void *tmp_va_buff;
  751. struct cmm_attrs attrs;
  752. DBC_REQUIRE(refs > 0);
  753. DBC_REQUIRE(xlator != NULL);
  754. DBC_REQUIRE(xlator_obj->cmm_mgr != NULL);
  755. DBC_REQUIRE(va_buf != NULL);
  756. DBC_REQUIRE(pa_size > 0);
  757. DBC_REQUIRE(xlator_obj->seg_id > 0);
  758. if (xlator_obj) {
  759. attrs.seg_id = xlator_obj->seg_id;
  760. __raw_writel(0, va_buf);
  761. /* Alloc SM */
  762. pbuf =
  763. cmm_calloc_buf(xlator_obj->cmm_mgr, pa_size, &attrs, NULL);
  764. if (pbuf) {
  765. /* convert to translator(node/strm) process Virtual
  766. * address */
  767. tmp_va_buff = cmm_xlator_translate(xlator,
  768. pbuf, CMM_PA2VA);
  769. __raw_writel((u32)tmp_va_buff, va_buf);
  770. }
  771. }
  772. return pbuf;
  773. }
  774. /*
  775. * ======== cmm_xlator_free_buf ========
  776. * Purpose:
  777. * Free the given SM buffer and descriptor.
  778. * Does not free virtual memory.
  779. */
  780. int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
  781. {
  782. struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
  783. int status = -EPERM;
  784. void *buf_pa = NULL;
  785. DBC_REQUIRE(refs > 0);
  786. DBC_REQUIRE(buf_va != NULL);
  787. DBC_REQUIRE(xlator_obj->seg_id > 0);
  788. if (xlator_obj) {
  789. /* convert Va to Pa so we can free it. */
  790. buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
  791. if (buf_pa) {
  792. status = cmm_free_buf(xlator_obj->cmm_mgr, buf_pa,
  793. xlator_obj->seg_id);
  794. if (status) {
  795. /* Uh oh, this shouldn't happen. Descriptor
  796. * gone! */
  797. DBC_ASSERT(false); /* CMM is leaking mem */
  798. }
  799. }
  800. }
  801. return status;
  802. }
  803. /*
  804. * ======== cmm_xlator_info ========
  805. * Purpose:
  806. * Set/Get translator info.
  807. */
  808. int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 ** paddr,
  809. u32 ul_size, u32 segm_id, bool set_info)
  810. {
  811. struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
  812. int status = 0;
  813. DBC_REQUIRE(refs > 0);
  814. DBC_REQUIRE(paddr != NULL);
  815. DBC_REQUIRE((segm_id > 0) && (segm_id <= CMM_MAXGPPSEGS));
  816. if (xlator_obj) {
  817. if (set_info) {
  818. /* set translators virtual address range */
  819. xlator_obj->virt_base = (u32) *paddr;
  820. xlator_obj->virt_size = ul_size;
  821. } else { /* return virt base address */
  822. *paddr = (u8 *) xlator_obj->virt_base;
  823. }
  824. } else {
  825. status = -EFAULT;
  826. }
  827. return status;
  828. }
  829. /*
  830. * ======== cmm_xlator_translate ========
  831. */
  832. void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
  833. enum cmm_xlatetype xtype)
  834. {
  835. u32 dw_addr_xlate = 0;
  836. struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
  837. struct cmm_object *cmm_mgr_obj = NULL;
  838. struct cmm_allocator *allocator = NULL;
  839. u32 dw_offset = 0;
  840. DBC_REQUIRE(refs > 0);
  841. DBC_REQUIRE(paddr != NULL);
  842. DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
  843. if (!xlator_obj)
  844. goto loop_cont;
  845. cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr;
  846. /* get this translator's default SM allocator */
  847. DBC_ASSERT(xlator_obj->seg_id > 0);
  848. allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1];
  849. if (!allocator)
  850. goto loop_cont;
  851. if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
  852. (xtype == CMM_PA2VA)) {
  853. if (xtype == CMM_PA2VA) {
  854. /* Gpp Va = Va Base + offset */
  855. dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
  856. allocator->
  857. dsp_size);
  858. dw_addr_xlate = xlator_obj->virt_base + dw_offset;
  859. /* Check if translated Va base is in range */
  860. if ((dw_addr_xlate < xlator_obj->virt_base) ||
  861. (dw_addr_xlate >=
  862. (xlator_obj->virt_base +
  863. xlator_obj->virt_size))) {
  864. dw_addr_xlate = 0; /* bad address */
  865. }
  866. } else {
  867. /* Gpp PA = Gpp Base + offset */
  868. dw_offset =
  869. (u8 *) paddr - (u8 *) xlator_obj->virt_base;
  870. dw_addr_xlate =
  871. allocator->shm_base - allocator->dsp_size +
  872. dw_offset;
  873. }
  874. } else {
  875. dw_addr_xlate = (u32) paddr;
  876. }
  877. /*Now convert address to proper target physical address if needed */
  878. if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
  879. /* Got Gpp Pa now, convert to DSP Pa */
  880. dw_addr_xlate =
  881. GPPPA2DSPPA((allocator->shm_base - allocator->dsp_size),
  882. dw_addr_xlate,
  883. allocator->dsp_phys_addr_offset *
  884. allocator->c_factor);
  885. } else if (xtype == CMM_DSPPA2PA) {
  886. /* Got DSP Pa, convert to GPP Pa */
  887. dw_addr_xlate =
  888. DSPPA2GPPPA(allocator->shm_base - allocator->dsp_size,
  889. dw_addr_xlate,
  890. allocator->dsp_phys_addr_offset *
  891. allocator->c_factor);
  892. }
  893. loop_cont:
  894. return (void *)dw_addr_xlate;
  895. }