PageRenderTime 64ms CodeModel.GetById 35ms RepoModel.GetById 0ms app.codeStats 0ms

/kernel/bmeas-dma.c

https://gitlab.com/marcoroda20/psbtrain-device-drivers
C | 298 lines | 170 code | 36 blank | 92 comment | 19 complexity | bb202c84f3a836313abf435f93ad238c MD5 | raw file
  1. /*
  2. * Copyright CERN 2013
  3. * Author: Daniel Oberson
  4. *
  5. * DMA of bmeas
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/types.h>
  11. #include <linux/list.h>
  12. #include <linux/mm.h>
  13. #include <linux/slab.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/scatterlist.h>
  16. #include <linux/fmc.h>
  17. #include "bmeas.h"
  18. /*
  19. * calculate_nents
  20. *
  21. * It calculates the number of pages
  22. */
  23. static int calculate_n_pages(unsigned int n_data, unsigned int n_bytes_data)
  24. {
  25. int n_pages;
  26. n_pages = n_data*n_bytes_data/PAGE_SIZE;
  27. if (n_pages*PAGE_SIZE<n_data*n_bytes_data)
  28. n_pages += 1;
  29. return n_pages;
  30. }
  31. /*
  32. * setup_dma_scatter
  33. *
  34. * Initialize each element of the scatter list
  35. */
  36. static void setup_dma_scatter(struct bmeas_dev *bmeasdev,
  37. int size_dma_buffer)
  38. {
  39. struct scatterlist *sg;
  40. int bytesleft = 0;
  41. void *bufp = NULL;
  42. int mapbytes;
  43. int i;
  44. for_each_sg(bmeasdev->sgt.sgl, sg, bmeasdev->sgt.nents, i) {
  45. /*printk(KERN_DEBUG "Init sg number : %d \n",i);*/
  46. if (i==0)
  47. {
  48. bytesleft = size_dma_buffer;
  49. bufp = bmeasdev->dma_buffer;
  50. }
  51. if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
  52. {
  53. mapbytes = bytesleft;
  54. }
  55. else
  56. {
  57. mapbytes = PAGE_SIZE - offset_in_page(bufp);
  58. }
  59. //TODO Check if this test is needed
  60. if (is_vmalloc_addr(bufp))
  61. sg_set_page(sg, vmalloc_to_page(bufp), mapbytes,
  62. offset_in_page(bufp));
  63. else
  64. sg_set_buf(sg, bufp, mapbytes);
  65. /* Configure next values */
  66. bufp += mapbytes;
  67. bytesleft -= mapbytes;
  68. /*printk(KERN_DEBUG "sg item (%p(+0x%lx), len:%d, left:%d)\n",
  69. virt_to_page(bufp), offset_in_page(bufp),
  70. mapbytes, bytesleft);*/
  71. }
  72. }
  73. void cfg_start_dma(struct bmeas_dev *bmeasdev)
  74. {
  75. int i;
  76. struct scatterlist *sg;
  77. uint32_t dev_mem_offset;
  78. dma_addr_t tmp;
  79. unsigned long test;
  80. //Switch block number
  81. if (bmeasdev->block_nber==0)
  82. dev_mem_offset = 0;
  83. else
  84. dev_mem_offset = DEFAULT_DMA_DATA_LENGTH*SIZE_DMA_DATA;
  85. // Configure DMA items
  86. for_each_sg(bmeasdev->sgt.sgl, sg, bmeasdev->sgt.nents, i) {
  87. //printk(KERN_DEBUG "DMA item : %d \n",i);
  88. // Prepare DMA item
  89. bmeasdev->items[i].start_addr = dev_mem_offset;
  90. bmeasdev->items[i].dma_addr_l = sg_dma_address(sg) & 0xFFFFFFFF;
  91. bmeasdev->items[i].dma_addr_h = (uint64_t)sg_dma_address(sg) >> 32;
  92. bmeasdev->items[i].dma_len = sg_dma_len(sg);
  93. dev_mem_offset += bmeasdev->items[i].dma_len;
  94. if (!sg_is_last(sg)) {// more transfers
  95. // uint64_t so it works on 32 and 64 bit
  96. //printk(KERN_DEBUG "not last sg : %d \n",i);
  97. tmp = bmeasdev->dma_list_item;
  98. tmp += (sizeof(struct bmeasd_dma_item) * ( i + 1 ));
  99. bmeasdev->items[i].next_addr_l = ((uint64_t)tmp) & 0xFFFFFFFF;
  100. bmeasdev->items[i].next_addr_h = ((uint64_t)tmp) >> 32;
  101. bmeasdev->items[i].attribute = 0x1; // more items chained
  102. } else {
  103. //printk(KERN_DEBUG "No more transfer");
  104. bmeasdev->items[i].attribute = 0x0; // last item
  105. }
  106. // The first item is written on the device
  107. if (i == 0) {
  108. test = sizeof(struct bmeasd_dma_item);
  109. /*printk(KERN_DEBUG "bmeasdev->dma_list_item : 0x%llx \n",bmeasdev->dma_list_item);
  110. printk(KERN_DEBUG "sizeof(struct bmeasd_dma_item) : 0x%llx \n",test);
  111. printk(KERN_DEBUG "Start adress : 0x%08x \n"
  112. "DMA addr_l : 0x%08x \n"
  113. "DMA addr_h : 0x%08x \n"
  114. "DMA len : %d \n"
  115. "DMA n_addr_l : 0x%08x \n"
  116. "DMA n_addr_h : 0x%08x \n"
  117. "Attribut : %d \n",
  118. bmeasdev->items[i].start_addr,bmeasdev->items[i].dma_addr_l,bmeasdev->items[i].dma_addr_h,
  119. bmeasdev->items[i].dma_len,bmeasdev->items[i].next_addr_l,bmeasdev->items[i].next_addr_h,bmeasdev->items[i].attribute);
  120. */
  121. bmeas_hardware_write(bmeasdev->fmc,
  122. BMEAS_DMA_ADDR,bmeasdev->items[i].start_addr);
  123. bmeas_hardware_write(bmeasdev->fmc,
  124. BMEAS_DMA_ADDR_L,bmeasdev->items[i].dma_addr_l);
  125. bmeas_hardware_write(bmeasdev->fmc,
  126. BMEAS_DMA_ADDR_H,bmeasdev->items[i].dma_addr_h);
  127. bmeas_hardware_write(bmeasdev->fmc,
  128. BMEAS_DMA_LEN,bmeasdev->items[i].dma_len);
  129. bmeas_hardware_write(bmeasdev->fmc,
  130. BMEAS_DMA_NEXT_L,bmeasdev->items[i].next_addr_l);
  131. bmeas_hardware_write(bmeasdev->fmc,
  132. BMEAS_DMA_NEXT_H,bmeasdev->items[i].next_addr_h);
  133. // Chain another transfer or not
  134. bmeas_hardware_write(bmeasdev->fmc,
  135. BMEAS_DMA_BR_LAST,bmeasdev->items[i].attribute);
  136. }
  137. }
  138. }
  139. /*
  140. * bmeas_map_dma
  141. * @fmc: fmc_device
  142. *
  143. * n_data: number of data to transfer
  144. *
  145. * Map a scatter/gather table for the DMA transfer from the peak-detector.
  146. * The DMA controller can store a single item, but more then one transfer
  147. * could be necessary because one item is 4Kbytes.
  148. */
  149. int bmeas_map_dma(struct bmeas_dev *bmeasdev,
  150. unsigned int n_data, unsigned int n_bytes_data)
  151. {
  152. struct bmeasd_dma_item *items;
  153. unsigned int pages;
  154. int i, count, size, err;
  155. struct scatterlist *sg;
  156. uint32_t dev_mem_offset = 0;
  157. dma_addr_t tmp;
  158. unsigned long test;
  159. printk(KERN_DEBUG "BMeas_map_dma -> bmeasdev : %p \n",bmeasdev);
  160. //Init DMA buffer
  161. //bmeasdev->dma_buffer_len = n_data*n_bytes_data;
  162. bmeasdev->dma_buffer = kzalloc(bmeasdev->dma_buffer_len, GFP_ATOMIC);
  163. //Calculate number of pages
  164. pages = calculate_n_pages(n_data,n_bytes_data);
  165. // Create sglists for the transfers
  166. err = sg_alloc_table(&bmeasdev->sgt, pages, GFP_ATOMIC);
  167. if (err) {
  168. dev_err(&bmeasdev->fmc->dev, "Cannot allocate sg table (%i pages)\n", pages);
  169. goto out;
  170. }
  171. // Limited to 32-bit (kernel limit)
  172. size = sizeof(*items) * bmeasdev->sgt.nents;
  173. items = kzalloc(size, GFP_ATOMIC);
  174. if (!items) {
  175. dev_err(&bmeasdev->fmc->dev, "Cannot allocate coherent dma memory\n");
  176. goto out_mem;
  177. }
  178. bmeasdev->items = items;
  179. bmeasdev->dma_list_item = dma_map_single(bmeasdev->fmc->hwdev, items, size,
  180. DMA_TO_DEVICE);
  181. if (!bmeasdev->dma_list_item) {
  182. goto out_free;
  183. }
  184. //printk(KERN_DEBUG "Nents size : %08x \n",size);
  185. // Setup the scatter list for the provided block
  186. setup_dma_scatter(bmeasdev,bmeasdev->dma_buffer_len);
  187. //printk(KERN_DEBUG "Init dma_buffer for a size : %08x \n",bmeasdev->dma_buffer_len);
  188. // Map DMA buffers
  189. //printk(KERN_DEBUG "bmeasdev->fmc->hwdev : %08x bmeasdev->sgt.sgl : %08x bmeasdev->sgt.nents : %08x \n",
  190. // bmeasdev->fmc->hwdev,bmeasdev->sgt.sgl,bmeasdev->sgt.nents);
  191. count = dma_map_sg(bmeasdev->fmc->hwdev,bmeasdev->sgt.sgl,bmeasdev->sgt.nents,DMA_FROM_DEVICE);
  192. if (!count) {
  193. dev_err(&bmeasdev->fmc->dev, "cannot map dma memory\n");
  194. goto out_map;
  195. }
  196. return 0;
  197. out_map:
  198. dma_unmap_single(bmeasdev->fmc->hwdev, bmeasdev->dma_list_item, size,
  199. DMA_TO_DEVICE);
  200. out_free:
  201. kfree(bmeasdev->items);
  202. out_mem:
  203. sg_free_table(&bmeasdev->sgt);
  204. out:
  205. return -ENOMEM;
  206. }
  207. /*
  208. * bmeas_dma_done
  209. * @fmc: pointer to fmc device
  210. *
  211. *
  212. */
  213. void bmeas_dma_done(struct bmeas_dev *bmeasdev)
  214. {
  215. int size_dma_bufer,i;
  216. bmeasdev->dmatag+=1;
  217. /*printk(KERN_DEBUG "DMA done : \n");
  218. if (bmeasdev->dma_buffer != NULL)
  219. {
  220. // Sync data to CPU
  221. //dma_sync_sg_for_cpu(bmeasdev->fmc->hwdev,bmeasdev->sgt.sgl,bmeasdev->sgt.nents,DMA_FROM_DEVICE);
  222. for (i=0;i<bmeasdev->dma_buffer_len/SIZE_DMA_DATA;i++)
  223. {
  224. //printk(KERN_DEBUG "Buffer[%d] : %d \n",i,*(bmeasdev->dma_buffer+i));
  225. if ((i>=0)&(i<8))
  226. printk(KERN_DEBUG "Buffer[%d] : %d \n",i,*(bmeasdev->dma_buffer+i));
  227. if (i>bmeasdev->dma_buffer_len/SIZE_DMA_DATA-10)
  228. printk(KERN_DEBUG "Buffer[%d] : %d \n",i,*(bmeasdev->dma_buffer+i));
  229. }
  230. }*/
  231. }
  232. /*
  233. * bmeas_unmap_dma
  234. * @bmeasdev: pointer to bmeas device
  235. *
  236. * It unmaps DMA.
  237. */
  238. void bmeas_unmap_dma(struct bmeas_dev *bmeasdev)
  239. {
  240. unsigned int size;
  241. printk(KERN_DEBUG "Unmap DMA\n");
  242. if (bmeasdev->items != NULL)
  243. {
  244. size = sizeof(struct bmeasd_dma_item) * bmeasdev->sgt.nents;
  245. dma_unmap_single(bmeasdev->fmc->hwdev, bmeasdev->dma_list_item, size,
  246. DMA_TO_DEVICE);
  247. dma_unmap_sg(bmeasdev->fmc->hwdev, bmeasdev->sgt.sgl, bmeasdev->sgt.nents,
  248. DMA_FROM_DEVICE);
  249. kfree(bmeasdev->items);
  250. bmeasdev->items = NULL;
  251. bmeasdev->dma_list_item = 0;
  252. }
  253. if (bmeasdev->dma_buffer != NULL)
  254. {
  255. kfree(bmeasdev->dma_buffer);
  256. bmeasdev->dma_buffer = NULL;
  257. }
  258. if (bmeasdev->sgt.sgl != NULL)
  259. sg_free_table(&bmeasdev->sgt);
  260. }