/Ethereal-msm8939-beta9/arch/metag/include/asm/dma-mapping.h

https://bitbucket.org/MilosStamenkovic95/etherealos · C Header · 190 lines · 148 code · 37 blank · 5 comment · 11 complexity · f2aec3be9c0251265a03247f03579c3e MD5 · raw file

  1. #ifndef _ASM_METAG_DMA_MAPPING_H
  2. #define _ASM_METAG_DMA_MAPPING_H
  3. #include <linux/mm.h>
  4. #include <asm/cache.h>
  5. #include <asm/io.h>
  6. #include <linux/scatterlist.h>
  7. #include <asm/bug.h>
  8. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  9. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  10. void *dma_alloc_coherent(struct device *dev, size_t size,
  11. dma_addr_t *dma_handle, gfp_t flag);
  12. void dma_free_coherent(struct device *dev, size_t size,
  13. void *vaddr, dma_addr_t dma_handle);
  14. void dma_sync_for_device(void *vaddr, size_t size, int dma_direction);
  15. void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction);
  16. int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
  17. void *cpu_addr, dma_addr_t dma_addr, size_t size);
  18. int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
  19. void *cpu_addr, dma_addr_t dma_addr, size_t size);
  20. static inline dma_addr_t
  21. dma_map_single(struct device *dev, void *ptr, size_t size,
  22. enum dma_data_direction direction)
  23. {
  24. BUG_ON(!valid_dma_direction(direction));
  25. WARN_ON(size == 0);
  26. dma_sync_for_device(ptr, size, direction);
  27. return virt_to_phys(ptr);
  28. }
  29. static inline void
  30. dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
  31. enum dma_data_direction direction)
  32. {
  33. BUG_ON(!valid_dma_direction(direction));
  34. dma_sync_for_cpu(phys_to_virt(dma_addr), size, direction);
  35. }
  36. static inline int
  37. dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
  38. enum dma_data_direction direction)
  39. {
  40. struct scatterlist *sg;
  41. int i;
  42. BUG_ON(!valid_dma_direction(direction));
  43. WARN_ON(nents == 0 || sglist[0].length == 0);
  44. for_each_sg(sglist, sg, nents, i) {
  45. BUG_ON(!sg_page(sg));
  46. sg->dma_address = sg_phys(sg);
  47. dma_sync_for_device(sg_virt(sg), sg->length, direction);
  48. }
  49. return nents;
  50. }
  51. static inline dma_addr_t
  52. dma_map_page(struct device *dev, struct page *page, unsigned long offset,
  53. size_t size, enum dma_data_direction direction)
  54. {
  55. BUG_ON(!valid_dma_direction(direction));
  56. dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
  57. direction);
  58. return page_to_phys(page) + offset;
  59. }
  60. static inline void
  61. dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
  62. enum dma_data_direction direction)
  63. {
  64. BUG_ON(!valid_dma_direction(direction));
  65. dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
  66. }
  67. static inline void
  68. dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries,
  69. enum dma_data_direction direction)
  70. {
  71. struct scatterlist *sg;
  72. int i;
  73. BUG_ON(!valid_dma_direction(direction));
  74. WARN_ON(nhwentries == 0 || sglist[0].length == 0);
  75. for_each_sg(sglist, sg, nhwentries, i) {
  76. BUG_ON(!sg_page(sg));
  77. sg->dma_address = sg_phys(sg);
  78. dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
  79. }
  80. }
  81. static inline void
  82. dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
  83. enum dma_data_direction direction)
  84. {
  85. dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
  86. }
  87. static inline void
  88. dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
  89. size_t size, enum dma_data_direction direction)
  90. {
  91. dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
  92. }
  93. static inline void
  94. dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
  95. unsigned long offset, size_t size,
  96. enum dma_data_direction direction)
  97. {
  98. dma_sync_for_cpu(phys_to_virt(dma_handle)+offset, size,
  99. direction);
  100. }
  101. static inline void
  102. dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
  103. unsigned long offset, size_t size,
  104. enum dma_data_direction direction)
  105. {
  106. dma_sync_for_device(phys_to_virt(dma_handle)+offset, size,
  107. direction);
  108. }
  109. static inline void
  110. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
  111. enum dma_data_direction direction)
  112. {
  113. int i;
  114. for (i = 0; i < nelems; i++, sg++)
  115. dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
  116. }
  117. static inline void
  118. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
  119. enum dma_data_direction direction)
  120. {
  121. int i;
  122. for (i = 0; i < nelems; i++, sg++)
  123. dma_sync_for_device(sg_virt(sg), sg->length, direction);
  124. }
  125. static inline int
  126. dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  127. {
  128. return 0;
  129. }
  130. #define dma_supported(dev, mask) (1)
  131. static inline int
  132. dma_set_mask(struct device *dev, u64 mask)
  133. {
  134. if (!dev->dma_mask || !dma_supported(dev, mask))
  135. return -EIO;
  136. *dev->dma_mask = mask;
  137. return 0;
  138. }
  139. /*
  140. * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
  141. * do any flushing here.
  142. */
  143. static inline void
  144. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  145. enum dma_data_direction direction)
  146. {
  147. }
  148. /* drivers/base/dma-mapping.c */
  149. extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
  150. void *cpu_addr, dma_addr_t dma_addr,
  151. size_t size);
  152. #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
  153. #endif