Blame view
include/linux/swiotlb.h
2.81 KB
b24413180
|
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
1648993fb
|
2 3 |
#ifndef __LINUX_SWIOTLB_H #define __LINUX_SWIOTLB_H |
386744425
|
4 5 |
#include <linux/dma-direction.h> #include <linux/init.h> |
1648993fb
|
6 |
#include <linux/types.h> |
f51778db0
|
7 |
#include <linux/limits.h> |
1648993fb
|
8 9 |
struct device; |
386744425
|
10 |
struct page; |
1648993fb
|
11 |
struct scatterlist; |
ae7871be1
|
12 13 14 |
enum swiotlb_force { SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */ SWIOTLB_FORCE, /* swiotlb=force */ |
fff5d9922
|
15 |
SWIOTLB_NO_FORCE, /* swiotlb=noforce */ |
ae7871be1
|
16 |
}; |
0016fdee9
|
17 18 19 20 21 22 |
/* * Maximum allowable number of contiguous slabs to map, * must be a power of 2. What is the appropriate value ? * The complexity of {map,unmap}_single is linearly dependent on this value. */ #define IO_TLB_SEGSIZE 128 |
0016fdee9
|
23 24 25 26 27 |
/* * log of the size of each IO TLB slab. The number of slabs is command line * controllable. */ #define IO_TLB_SHIFT 11 |
ad32e8cb8
|
28 |
extern void swiotlb_init(int verbose); |
ac2cbab21
|
29 |
int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); |
f21ffe9f6
|
30 |
extern unsigned long swiotlb_nr_tbl(void); |
c729de8fc
|
31 |
unsigned long swiotlb_size_or_default(void); |
74838b753
|
32 |
extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); |
61b82bbf6
|
33 |
extern int swiotlb_late_init_with_default_size(size_t default_size); |
c7753208a
|
34 |
extern void __init swiotlb_update_mem_attributes(void); |
1648993fb
|
35 |
|
d7ef1533a
|
36 37 38 39 40 41 42 |
/* * Enumeration for sync targets */ enum dma_sync_target { SYNC_FOR_CPU = 0, SYNC_FOR_DEVICE = 1, }; |
e05ed4d1f
|
43 |
|
fc0021aa3
|
44 45 46 |
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, size_t mapping_size, size_t alloc_size, enum dma_data_direction dir, unsigned long attrs); |
d7ef1533a
|
47 |
|
61ca08c32
|
48 49 |
extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, |
3fc1ca006
|
50 51 52 |
size_t mapping_size, size_t alloc_size, enum dma_data_direction dir, |
0443fa003
|
53 |
unsigned long attrs); |
d7ef1533a
|
54 |
|
fbfda893e
|
55 56 |
extern void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, |
d7ef1533a
|
57 58 |
size_t size, enum dma_data_direction dir, enum dma_sync_target target); |
4a47cbae0
|
59 60 |
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs); |
5740afdb6
|
61 |
#ifdef CONFIG_SWIOTLB |
55897af63
|
62 63 64 65 66 67 68 |
extern enum swiotlb_force swiotlb_force; extern phys_addr_t io_tlb_start, io_tlb_end; static inline bool is_swiotlb_buffer(phys_addr_t paddr) { return paddr >= io_tlb_start && paddr < io_tlb_end; } |
55897af63
|
69 |
void __init swiotlb_exit(void); |
7453c549f
|
70 |
unsigned int swiotlb_max_segment(void); |
abe420bfa
|
71 |
size_t swiotlb_max_mapping_size(struct device *dev); |
492366f7b
|
72 |
bool is_swiotlb_active(void); |
5740afdb6
|
73 |
#else |
55897af63
|
74 75 76 77 78 |
#define swiotlb_force SWIOTLB_NO_FORCE static inline bool is_swiotlb_buffer(phys_addr_t paddr) { return false; } |
55897af63
|
79 80 81 82 83 84 85 |
static inline void swiotlb_exit(void) { } static inline unsigned int swiotlb_max_segment(void) { return 0; } |
abe420bfa
|
86 87 88 89 |
static inline size_t swiotlb_max_mapping_size(struct device *dev) { return SIZE_MAX; } |
492366f7b
|
90 91 92 93 94 |
static inline bool is_swiotlb_active(void) { return false; } |
55897af63
|
95 |
#endif /* CONFIG_SWIOTLB */ |
5740afdb6
|
96 |
|
ad32e8cb8
|
97 |
extern void swiotlb_print_info(void); |
7453c549f
|
98 |
extern void swiotlb_set_max_segment(unsigned int); |
9c5a36214
|
99 |
|
1648993fb
|
100 |
#endif /* __LINUX_SWIOTLB_H */ |