/arch/frv/include/asm/dma-mapping.h
C++ Header | 143 lines | 106 code | 28 blank | 9 comment | 6 complexity | f9fc5a8c46c55a39983034b73218b821 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
1#ifndef _ASM_DMA_MAPPING_H 2#define _ASM_DMA_MAPPING_H 3 4#include <linux/device.h> 5#include <asm/cache.h> 6#include <asm/cacheflush.h> 7#include <asm/scatterlist.h> 8#include <asm/io.h> 9 10/* 11 * See Documentation/DMA-API.txt for the description of how the 12 * following DMA API should work. 13 */ 14 15#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 16#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 17 18extern unsigned long __nongprelbss dma_coherent_mem_start; 19extern unsigned long __nongprelbss dma_coherent_mem_end; 20 21void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); 22void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); 23 24extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, 25 enum dma_data_direction direction); 26 27static inline 28void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 29 enum dma_data_direction direction) 30{ 31 BUG_ON(direction == DMA_NONE); 32} 33 34extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 35 enum dma_data_direction direction); 36 37static inline 38void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 39 enum dma_data_direction direction) 40{ 41 BUG_ON(direction == DMA_NONE); 42} 43 44extern 45dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, 46 size_t size, enum dma_data_direction direction); 47 48static inline 49void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 50 enum dma_data_direction direction) 51{ 52 BUG_ON(direction == DMA_NONE); 53} 54 55 56static inline 57void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 58 enum dma_data_direction direction) 59{ 60} 61 62static inline 63void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, 64 enum dma_data_direction direction) 65{ 66 flush_write_buffers(); 67} 68 69static inline 70void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, 71 unsigned long offset, size_t size, 72 enum dma_data_direction direction) 73{ 74} 75 76static inline 77void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, 78 unsigned long offset, size_t size, 79 enum dma_data_direction direction) 80{ 81 flush_write_buffers(); 82} 83 84static inline 85void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 86 enum dma_data_direction direction) 87{ 88} 89 90static inline 91void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 92 enum dma_data_direction direction) 93{ 94 flush_write_buffers(); 95} 96 97static inline 98int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 99{ 100 return 0; 101} 102 103static inline 104int dma_supported(struct device *dev, u64 mask) 105{ 106 /* 107 * we fall back to GFP_DMA when the mask isn't all 1s, 108 * so we can't guarantee allocations that must be 109 * within a tighter range than GFP_DMA.. 110 */ 111 if (mask < 0x00ffffff) 112 return 0; 113 114 return 1; 115} 116 117static inline 118int dma_set_mask(struct device *dev, u64 mask) 119{ 120 if (!dev->dma_mask || !dma_supported(dev, mask)) 121 return -EIO; 122 123 *dev->dma_mask = mask; 124 125 return 0; 126} 127 128static inline 129int dma_get_cache_alignment(void) 130{ 131 return 1 << L1_CACHE_SHIFT; 132} 133 134#define dma_is_consistent(d, h) (1) 135 136static inline 137void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 138 enum dma_data_direction direction) 139{ 140 flush_write_buffers(); 141} 142 143#endif /* _ASM_DMA_MAPPING_H */