PageRenderTime 111ms CodeModel.GetById 20ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/staging/tidspbridge/pmgr/dmm.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2
C | 533 lines | 314 code | 68 blank | 151 comment | 56 complexity | 70255c569686f2e0ca7c6df972fb081c MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * dmm.c
  3. *
  4. * DSP-BIOS Bridge driver support functions for TI OMAP processors.
  5. *
  6. * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
  7. * space that can be directly mapped to any MPU buffer or memory region
  8. *
  9. * Notes:
  10. * Region: Generic memory entitiy having a start address and a size
  11. * Chunk: Reserved region
  12. *
  13. * Copyright (C) 2005-2006 Texas Instruments, Inc.
  14. *
  15. * This package is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License version 2 as
  17. * published by the Free Software Foundation.
  18. *
  19. * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
  20. * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
  21. * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  22. */
  23. #include <linux/types.h>
  24. /* ----------------------------------- Host OS */
  25. #include <dspbridge/host_os.h>
  26. /* ----------------------------------- DSP/BIOS Bridge */
  27. #include <dspbridge/dbdefs.h>
  28. /* ----------------------------------- Trace & Debug */
  29. #include <dspbridge/dbc.h>
  30. /* ----------------------------------- OS Adaptation Layer */
  31. #include <dspbridge/sync.h>
  32. /* ----------------------------------- Platform Manager */
  33. #include <dspbridge/dev.h>
  34. #include <dspbridge/proc.h>
  35. /* ----------------------------------- This */
  36. #include <dspbridge/dmm.h>
  37. /* ----------------------------------- Defines, Data Structures, Typedefs */
  38. #define DMM_ADDR_VIRTUAL(a) \
  39. (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
  40. dyn_mem_map_beg)
  41. #define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
  42. /* DMM Mgr */
  43. struct dmm_object {
  44. /* Dmm Lock is used to serialize access mem manager for
  45. * multi-threads. */
  46. spinlock_t dmm_lock; /* Lock to access dmm mgr */
  47. };
  48. /* ----------------------------------- Globals */
  49. static u32 refs; /* module reference count */
  50. struct map_page {
  51. u32 region_size:15;
  52. u32 mapped_size:15;
  53. u32 reserved:1;
  54. u32 mapped:1;
  55. };
  56. /* Create the free list */
  57. static struct map_page *virtual_mapping_table;
  58. static u32 free_region; /* The index of free region */
  59. static u32 free_size;
  60. static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */
  61. static u32 table_size; /* The size of virt and phys pages tables */
  62. /* ----------------------------------- Function Prototypes */
  63. static struct map_page *get_region(u32 addr);
  64. static struct map_page *get_free_region(u32 len);
  65. static struct map_page *get_mapped_region(u32 addrs);
  66. /* ======== dmm_create_tables ========
  67. * Purpose:
  68. * Create table to hold the information of physical address
  69. * the buffer pages that is passed by the user, and the table
  70. * to hold the information of the virtual memory that is reserved
  71. * for DSP.
  72. */
  73. int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
  74. {
  75. struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
  76. int status = 0;
  77. status = dmm_delete_tables(dmm_obj);
  78. if (!status) {
  79. dyn_mem_map_beg = addr;
  80. table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
  81. /* Create the free list */
  82. virtual_mapping_table = __vmalloc(table_size *
  83. sizeof(struct map_page), GFP_KERNEL |
  84. __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
  85. if (virtual_mapping_table == NULL)
  86. status = -ENOMEM;
  87. else {
  88. /* On successful allocation,
  89. * all entries are zero ('free') */
  90. free_region = 0;
  91. free_size = table_size * PG_SIZE4K;
  92. virtual_mapping_table[0].region_size = table_size;
  93. }
  94. }
  95. if (status)
  96. pr_err("%s: failure, status 0x%x\n", __func__, status);
  97. return status;
  98. }
  99. /*
  100. * ======== dmm_create ========
  101. * Purpose:
  102. * Create a dynamic memory manager object.
  103. */
  104. int dmm_create(struct dmm_object **dmm_manager,
  105. struct dev_object *hdev_obj,
  106. const struct dmm_mgrattrs *mgr_attrts)
  107. {
  108. struct dmm_object *dmm_obj = NULL;
  109. int status = 0;
  110. DBC_REQUIRE(refs > 0);
  111. DBC_REQUIRE(dmm_manager != NULL);
  112. *dmm_manager = NULL;
  113. /* create, zero, and tag a cmm mgr object */
  114. dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
  115. if (dmm_obj != NULL) {
  116. spin_lock_init(&dmm_obj->dmm_lock);
  117. *dmm_manager = dmm_obj;
  118. } else {
  119. status = -ENOMEM;
  120. }
  121. return status;
  122. }
  123. /*
  124. * ======== dmm_destroy ========
  125. * Purpose:
  126. * Release the communication memory manager resources.
  127. */
  128. int dmm_destroy(struct dmm_object *dmm_mgr)
  129. {
  130. struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
  131. int status = 0;
  132. DBC_REQUIRE(refs > 0);
  133. if (dmm_mgr) {
  134. status = dmm_delete_tables(dmm_obj);
  135. if (!status)
  136. kfree(dmm_obj);
  137. } else
  138. status = -EFAULT;
  139. return status;
  140. }
  141. /*
  142. * ======== dmm_delete_tables ========
  143. * Purpose:
  144. * Delete DMM Tables.
  145. */
  146. int dmm_delete_tables(struct dmm_object *dmm_mgr)
  147. {
  148. int status = 0;
  149. DBC_REQUIRE(refs > 0);
  150. /* Delete all DMM tables */
  151. if (dmm_mgr)
  152. vfree(virtual_mapping_table);
  153. else
  154. status = -EFAULT;
  155. return status;
  156. }
  157. /*
  158. * ======== dmm_exit ========
  159. * Purpose:
  160. * Discontinue usage of module; free resources when reference count
  161. * reaches 0.
  162. */
  163. void dmm_exit(void)
  164. {
  165. DBC_REQUIRE(refs > 0);
  166. refs--;
  167. }
  168. /*
  169. * ======== dmm_get_handle ========
  170. * Purpose:
  171. * Return the dynamic memory manager object for this device.
  172. * This is typically called from the client process.
  173. */
  174. int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
  175. {
  176. int status = 0;
  177. struct dev_object *hdev_obj;
  178. DBC_REQUIRE(refs > 0);
  179. DBC_REQUIRE(dmm_manager != NULL);
  180. if (hprocessor != NULL)
  181. status = proc_get_dev_object(hprocessor, &hdev_obj);
  182. else
  183. hdev_obj = dev_get_first(); /* default */
  184. if (!status)
  185. status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
  186. return status;
  187. }
  188. /*
  189. * ======== dmm_init ========
  190. * Purpose:
  191. * Initializes private state of DMM module.
  192. */
  193. bool dmm_init(void)
  194. {
  195. bool ret = true;
  196. DBC_REQUIRE(refs >= 0);
  197. if (ret)
  198. refs++;
  199. DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
  200. virtual_mapping_table = NULL;
  201. table_size = 0;
  202. return ret;
  203. }
  204. /*
  205. * ======== dmm_map_memory ========
  206. * Purpose:
  207. * Add a mapping block to the reserved chunk. DMM assumes that this block
  208. * will be mapped in the DSP/IVA's address space. DMM returns an error if a
  209. * mapping overlaps another one. This function stores the info that will be
  210. * required later while unmapping the block.
  211. */
  212. int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
  213. {
  214. struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
  215. struct map_page *chunk;
  216. int status = 0;
  217. spin_lock(&dmm_obj->dmm_lock);
  218. /* Find the Reserved memory chunk containing the DSP block to
  219. * be mapped */
  220. chunk = (struct map_page *)get_region(addr);
  221. if (chunk != NULL) {
  222. /* Mark the region 'mapped', leave the 'reserved' info as-is */
  223. chunk->mapped = true;
  224. chunk->mapped_size = (size / PG_SIZE4K);
  225. } else
  226. status = -ENOENT;
  227. spin_unlock(&dmm_obj->dmm_lock);
  228. dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
  229. "chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
  230. return status;
  231. }
  232. /*
  233. * ======== dmm_reserve_memory ========
  234. * Purpose:
  235. * Reserve a chunk of virtually contiguous DSP/IVA address space.
  236. */
  237. int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
  238. u32 *prsv_addr)
  239. {
  240. int status = 0;
  241. struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
  242. struct map_page *node;
  243. u32 rsv_addr = 0;
  244. u32 rsv_size = 0;
  245. spin_lock(&dmm_obj->dmm_lock);
  246. /* Try to get a DSP chunk from the free list */
  247. node = get_free_region(size);
  248. if (node != NULL) {
  249. /* DSP chunk of given size is available. */
  250. rsv_addr = DMM_ADDR_VIRTUAL(node);
  251. /* Calculate the number entries to use */
  252. rsv_size = size / PG_SIZE4K;
  253. if (rsv_size < node->region_size) {
  254. /* Mark remainder of free region */
  255. node[rsv_size].mapped = false;
  256. node[rsv_size].reserved = false;
  257. node[rsv_size].region_size =
  258. node->region_size - rsv_size;
  259. node[rsv_size].mapped_size = 0;
  260. }
  261. /* get_region will return first fit chunk. But we only use what
  262. is requested. */
  263. node->mapped = false;
  264. node->reserved = true;
  265. node->region_size = rsv_size;
  266. node->mapped_size = 0;
  267. /* Return the chunk's starting address */
  268. *prsv_addr = rsv_addr;
  269. } else
  270. /*dSP chunk of given size is not available */
  271. status = -ENOMEM;
  272. spin_unlock(&dmm_obj->dmm_lock);
  273. dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
  274. "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
  275. prsv_addr, status, rsv_addr, rsv_size);
  276. return status;
  277. }
  278. /*
  279. * ======== dmm_un_map_memory ========
  280. * Purpose:
  281. * Remove the mapped block from the reserved chunk.
  282. */
  283. int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
  284. {
  285. struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
  286. struct map_page *chunk;
  287. int status = 0;
  288. spin_lock(&dmm_obj->dmm_lock);
  289. chunk = get_mapped_region(addr);
  290. if (chunk == NULL)
  291. status = -ENOENT;
  292. if (!status) {
  293. /* Unmap the region */
  294. *psize = chunk->mapped_size * PG_SIZE4K;
  295. chunk->mapped = false;
  296. chunk->mapped_size = 0;
  297. }
  298. spin_unlock(&dmm_obj->dmm_lock);
  299. dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
  300. "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
  301. return status;
  302. }
  303. /*
  304. * ======== dmm_un_reserve_memory ========
  305. * Purpose:
  306. * Free a chunk of reserved DSP/IVA address space.
  307. */
  308. int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
  309. {
  310. struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
  311. struct map_page *chunk;
  312. u32 i;
  313. int status = 0;
  314. u32 chunk_size;
  315. spin_lock(&dmm_obj->dmm_lock);
  316. /* Find the chunk containing the reserved address */
  317. chunk = get_mapped_region(rsv_addr);
  318. if (chunk == NULL)
  319. status = -ENOENT;
  320. if (!status) {
  321. /* Free all the mapped pages for this reserved region */
  322. i = 0;
  323. while (i < chunk->region_size) {
  324. if (chunk[i].mapped) {
  325. /* Remove mapping from the page tables. */
  326. chunk_size = chunk[i].mapped_size;
  327. /* Clear the mapping flags */
  328. chunk[i].mapped = false;
  329. chunk[i].mapped_size = 0;
  330. i += chunk_size;
  331. } else
  332. i++;
  333. }
  334. /* Clear the flags (mark the region 'free') */
  335. chunk->reserved = false;
  336. /* NOTE: We do NOT coalesce free regions here.
  337. * Free regions are coalesced in get_region(), as it traverses
  338. *the whole mapping table
  339. */
  340. }
  341. spin_unlock(&dmm_obj->dmm_lock);
  342. dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
  343. __func__, dmm_mgr, rsv_addr, status, chunk);
  344. return status;
  345. }
  346. /*
  347. * ======== get_region ========
  348. * Purpose:
  349. * Returns a region containing the specified memory region
  350. */
  351. static struct map_page *get_region(u32 addr)
  352. {
  353. struct map_page *curr_region = NULL;
  354. u32 i = 0;
  355. if (virtual_mapping_table != NULL) {
  356. /* find page mapped by this address */
  357. i = DMM_ADDR_TO_INDEX(addr);
  358. if (i < table_size)
  359. curr_region = virtual_mapping_table + i;
  360. }
  361. dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
  362. __func__, curr_region, free_region, free_size);
  363. return curr_region;
  364. }
  365. /*
  366. * ======== get_free_region ========
  367. * Purpose:
  368. * Returns the requested free region
  369. */
  370. static struct map_page *get_free_region(u32 len)
  371. {
  372. struct map_page *curr_region = NULL;
  373. u32 i = 0;
  374. u32 region_size = 0;
  375. u32 next_i = 0;
  376. if (virtual_mapping_table == NULL)
  377. return curr_region;
  378. if (len > free_size) {
  379. /* Find the largest free region
  380. * (coalesce during the traversal) */
  381. while (i < table_size) {
  382. region_size = virtual_mapping_table[i].region_size;
  383. next_i = i + region_size;
  384. if (virtual_mapping_table[i].reserved == false) {
  385. /* Coalesce, if possible */
  386. if (next_i < table_size &&
  387. virtual_mapping_table[next_i].reserved
  388. == false) {
  389. virtual_mapping_table[i].region_size +=
  390. virtual_mapping_table
  391. [next_i].region_size;
  392. continue;
  393. }
  394. region_size *= PG_SIZE4K;
  395. if (region_size > free_size) {
  396. free_region = i;
  397. free_size = region_size;
  398. }
  399. }
  400. i = next_i;
  401. }
  402. }
  403. if (len <= free_size) {
  404. curr_region = virtual_mapping_table + free_region;
  405. free_region += (len / PG_SIZE4K);
  406. free_size -= len;
  407. }
  408. return curr_region;
  409. }
  410. /*
  411. * ======== get_mapped_region ========
  412. * Purpose:
  413. * Returns the requestedmapped region
  414. */
  415. static struct map_page *get_mapped_region(u32 addrs)
  416. {
  417. u32 i = 0;
  418. struct map_page *curr_region = NULL;
  419. if (virtual_mapping_table == NULL)
  420. return curr_region;
  421. i = DMM_ADDR_TO_INDEX(addrs);
  422. if (i < table_size && (virtual_mapping_table[i].mapped ||
  423. virtual_mapping_table[i].reserved))
  424. curr_region = virtual_mapping_table + i;
  425. return curr_region;
  426. }
  427. #ifdef DSP_DMM_DEBUG
  428. u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
  429. {
  430. struct map_page *curr_node = NULL;
  431. u32 i;
  432. u32 freemem = 0;
  433. u32 bigsize = 0;
  434. spin_lock(&dmm_mgr->dmm_lock);
  435. if (virtual_mapping_table != NULL) {
  436. for (i = 0; i < table_size; i +=
  437. virtual_mapping_table[i].region_size) {
  438. curr_node = virtual_mapping_table + i;
  439. if (curr_node->reserved) {
  440. /*printk("RESERVED size = 0x%x, "
  441. "Map size = 0x%x\n",
  442. (curr_node->region_size * PG_SIZE4K),
  443. (curr_node->mapped == false) ? 0 :
  444. (curr_node->mapped_size * PG_SIZE4K));
  445. */
  446. } else {
  447. /* printk("UNRESERVED size = 0x%x\n",
  448. (curr_node->region_size * PG_SIZE4K));
  449. */
  450. freemem += (curr_node->region_size * PG_SIZE4K);
  451. if (curr_node->region_size > bigsize)
  452. bigsize = curr_node->region_size;
  453. }
  454. }
  455. }
  456. spin_unlock(&dmm_mgr->dmm_lock);
  457. printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
  458. freemem / (1024 * 1024));
  459. printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
  460. (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
  461. printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
  462. (bigsize * PG_SIZE4K / (1024 * 1024)));
  463. return 0;
  464. }
  465. #endif