PageRenderTime 64ms CodeModel.GetById 23ms app.highlight 36ms RepoModel.GetById 1ms app.codeStats 1ms

/Ethereal-msm8939-beta9/arch/metag/include/asm/dma-mapping.h

https://bitbucket.org/MilosStamenkovic95/etherealos
C Header | 190 lines | 148 code | 37 blank | 5 comment | 11 complexity | f2aec3be9c0251265a03247f03579c3e MD5 | raw file
  1#ifndef _ASM_METAG_DMA_MAPPING_H
  2#define _ASM_METAG_DMA_MAPPING_H
  3
  4#include <linux/mm.h>
  5
  6#include <asm/cache.h>
  7#include <asm/io.h>
  8#include <linux/scatterlist.h>
  9#include <asm/bug.h>
 10
 11#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 12#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 13
 14void *dma_alloc_coherent(struct device *dev, size_t size,
 15			 dma_addr_t *dma_handle, gfp_t flag);
 16
 17void dma_free_coherent(struct device *dev, size_t size,
 18		       void *vaddr, dma_addr_t dma_handle);
 19
 20void dma_sync_for_device(void *vaddr, size_t size, int dma_direction);
 21void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction);
 22
 23int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
 24		      void *cpu_addr, dma_addr_t dma_addr, size_t size);
 25
 26int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
 27			  void *cpu_addr, dma_addr_t dma_addr, size_t size);
 28
 29static inline dma_addr_t
 30dma_map_single(struct device *dev, void *ptr, size_t size,
 31	       enum dma_data_direction direction)
 32{
 33	BUG_ON(!valid_dma_direction(direction));
 34	WARN_ON(size == 0);
 35	dma_sync_for_device(ptr, size, direction);
 36	return virt_to_phys(ptr);
 37}
 38
 39static inline void
 40dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
 41		 enum dma_data_direction direction)
 42{
 43	BUG_ON(!valid_dma_direction(direction));
 44	dma_sync_for_cpu(phys_to_virt(dma_addr), size, direction);
 45}
 46
 47static inline int
 48dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
 49	   enum dma_data_direction direction)
 50{
 51	struct scatterlist *sg;
 52	int i;
 53
 54	BUG_ON(!valid_dma_direction(direction));
 55	WARN_ON(nents == 0 || sglist[0].length == 0);
 56
 57	for_each_sg(sglist, sg, nents, i) {
 58		BUG_ON(!sg_page(sg));
 59
 60		sg->dma_address = sg_phys(sg);
 61		dma_sync_for_device(sg_virt(sg), sg->length, direction);
 62	}
 63
 64	return nents;
 65}
 66
 67static inline dma_addr_t
 68dma_map_page(struct device *dev, struct page *page, unsigned long offset,
 69	     size_t size, enum dma_data_direction direction)
 70{
 71	BUG_ON(!valid_dma_direction(direction));
 72	dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
 73			    direction);
 74	return page_to_phys(page) + offset;
 75}
 76
 77static inline void
 78dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
 79	       enum dma_data_direction direction)
 80{
 81	BUG_ON(!valid_dma_direction(direction));
 82	dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
 83}
 84
 85
 86static inline void
 87dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries,
 88	     enum dma_data_direction direction)
 89{
 90	struct scatterlist *sg;
 91	int i;
 92
 93	BUG_ON(!valid_dma_direction(direction));
 94	WARN_ON(nhwentries == 0 || sglist[0].length == 0);
 95
 96	for_each_sg(sglist, sg, nhwentries, i) {
 97		BUG_ON(!sg_page(sg));
 98
 99		sg->dma_address = sg_phys(sg);
100		dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
101	}
102}
103
104static inline void
105dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
106			enum dma_data_direction direction)
107{
108	dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
109}
110
111static inline void
112dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
113			   size_t size, enum dma_data_direction direction)
114{
115	dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
116}
117
118static inline void
119dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
120			      unsigned long offset, size_t size,
121			      enum dma_data_direction direction)
122{
123	dma_sync_for_cpu(phys_to_virt(dma_handle)+offset, size,
124			 direction);
125}
126
127static inline void
128dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
129				 unsigned long offset, size_t size,
130				 enum dma_data_direction direction)
131{
132	dma_sync_for_device(phys_to_virt(dma_handle)+offset, size,
133			    direction);
134}
135
136static inline void
137dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
138		    enum dma_data_direction direction)
139{
140	int i;
141	for (i = 0; i < nelems; i++, sg++)
142		dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
143}
144
145static inline void
146dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
147		       enum dma_data_direction direction)
148{
149	int i;
150	for (i = 0; i < nelems; i++, sg++)
151		dma_sync_for_device(sg_virt(sg), sg->length, direction);
152}
153
154static inline int
155dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
156{
157	return 0;
158}
159
160#define dma_supported(dev, mask)        (1)
161
162static inline int
163dma_set_mask(struct device *dev, u64 mask)
164{
165	if (!dev->dma_mask || !dma_supported(dev, mask))
166		return -EIO;
167
168	*dev->dma_mask = mask;
169
170	return 0;
171}
172
173/*
174 * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
175 * do any flushing here.
176 */
177static inline void
178dma_cache_sync(struct device *dev, void *vaddr, size_t size,
179	       enum dma_data_direction direction)
180{
181}
182
183/* drivers/base/dma-mapping.c */
184extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
185				  void *cpu_addr, dma_addr_t dma_addr,
186				  size_t size);
187
188#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
189
190#endif