Blame view
include/linux/vmalloc.h
5.9 KB
1da177e4c
|
1 2 3 4 |
#ifndef _LINUX_VMALLOC_H #define _LINUX_VMALLOC_H #include <linux/spinlock.h> |
db64fe022
|
5 |
#include <linux/init.h> |
13ba3fcbb
|
6 |
#include <linux/list.h> |
1da177e4c
|
7 |
#include <asm/page.h> /* pgprot_t */ |
13ba3fcbb
|
8 |
#include <linux/rbtree.h> |
1da177e4c
|
9 |
|
605d9288b
|
10 |
struct vm_area_struct; /* vma defining user mapping in mm_types.h */ |
833423143
|
11 |
|
605d9288b
|
12 |
/* bits in flags of vmalloc's vm_struct below */ |
20fc02b47
|
13 14 15 16 17 18 |
#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ #define VM_ALLOC 0x00000002 /* vmalloc() */ #define VM_MAP 0x00000004 /* vmap()ed pages */ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
71394fe50
|
19 |
#define VM_NO_GUARD 0x00000040 /* don't add guard page */ |
a5af5aa8b
|
20 |
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ |
1da177e4c
|
21 |
/* bits [20..32] reserved for arch specific ioremap internals */ |
fd195c49f
|
22 23 24 25 26 27 28 |
/* * Maximum alignment for ioremap() regions. * Can be overriden by arch-specific value. */ #ifndef IOREMAP_MAX_ORDER #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ #endif |
1da177e4c
|
29 |
struct vm_struct { |
2b4ac44e7
|
30 |
struct vm_struct *next; |
1da177e4c
|
31 32 33 34 35 |
void *addr; unsigned long size; unsigned long flags; struct page **pages; unsigned int nr_pages; |
ffa71f33a
|
36 |
phys_addr_t phys_addr; |
5e6cafc83
|
37 |
const void *caller; |
1da177e4c
|
38 |
}; |
13ba3fcbb
|
39 40 41 42 43 44 45 46 47 48 |
struct vmap_area { unsigned long va_start; unsigned long va_end; unsigned long flags; struct rb_node rb_node; /* address sorted rbtree */ struct list_head list; /* address sorted list */ struct list_head purge_list; /* "lazy purge" list */ struct vm_struct *vm; struct rcu_head rcu_head; }; |
1da177e4c
|
49 50 51 |
/* * Highlevel APIs for driver use */ |
db64fe022
|
52 53 54 55 56 57 58 59 60 61 62 63 |
extern void vm_unmap_ram(const void *mem, unsigned int count); extern void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot); extern void vm_unmap_aliases(void); #ifdef CONFIG_MMU extern void __init vmalloc_init(void); #else static inline void vmalloc_init(void) { } #endif |
1da177e4c
|
64 |
extern void *vmalloc(unsigned long size); |
e1ca7788d
|
65 |
extern void *vzalloc(unsigned long size); |
833423143
|
66 |
extern void *vmalloc_user(unsigned long size); |
930fc45a4
|
67 |
extern void *vmalloc_node(unsigned long size, int node); |
e1ca7788d
|
68 |
extern void *vzalloc_node(unsigned long size, int node); |
1da177e4c
|
69 70 |
extern void *vmalloc_exec(unsigned long size); extern void *vmalloc_32(unsigned long size); |
833423143
|
71 |
extern void *vmalloc_32_user(unsigned long size); |
dd0fc66fb
|
72 |
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); |
d0a21265d
|
73 74 |
extern void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, |
cb9e3c292
|
75 76 |
pgprot_t prot, unsigned long vm_flags, int node, const void *caller); |
b3bdda02a
|
77 |
extern void vfree(const void *addr); |
1da177e4c
|
78 79 80 |
extern void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot); |
b3bdda02a
|
81 |
extern void vunmap(const void *addr); |
833423143
|
82 |
|
e69e9d4ae
|
83 84 85 |
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, void *kaddr, unsigned long size); |
833423143
|
86 87 |
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); |
1eeb66a1b
|
88 |
void vmalloc_sync_all(void); |
1da177e4c
|
89 90 91 92 |
/* * Lowlevel-APIs (not for driver use!) */ |
9585116ba
|
93 94 95 |
static inline size_t get_vm_area_size(const struct vm_struct *area) { |
71394fe50
|
96 97 98 99 100 |
if (!(area->flags & VM_NO_GUARD)) /* return actual size without guard page */ return area->size - PAGE_SIZE; else return area->size; |
9585116ba
|
101 |
} |
1da177e4c
|
102 |
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); |
230169693
|
103 |
extern struct vm_struct *get_vm_area_caller(unsigned long size, |
5e6cafc83
|
104 |
unsigned long flags, const void *caller); |
1da177e4c
|
105 106 |
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end); |
c29686129
|
107 108 109 |
extern struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, |
5e6cafc83
|
110 |
const void *caller); |
b3bdda02a
|
111 |
extern struct vm_struct *remove_vm_area(const void *addr); |
e9da6e990
|
112 |
extern struct vm_struct *find_vm_area(const void *addr); |
c19c03fc7
|
113 |
|
1da177e4c
|
114 |
extern int map_vm_area(struct vm_struct *area, pgprot_t prot, |
f6f8ed473
|
115 |
struct page **pages); |
b554cb426
|
116 |
#ifdef CONFIG_MMU |
8fc489850
|
117 118 119 |
extern int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages); extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); |
c19c03fc7
|
120 |
extern void unmap_kernel_range(unsigned long addr, unsigned long size); |
b554cb426
|
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
#else static inline int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) { return size >> PAGE_SHIFT; } static inline void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) { } static inline void unmap_kernel_range(unsigned long addr, unsigned long size) { } #endif |
1da177e4c
|
137 |
|
5f4352fbf
|
138 |
/* Allocate/destroy a 'vmalloc' VM area. */ |
cd12909cb
|
139 |
extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes); |
5f4352fbf
|
140 |
extern void free_vm_area(struct vm_struct *area); |
69beeb1d3
|
141 142 143 |
/* for /dev/kmem */ extern long vread(char *buf, char *addr, unsigned long count); extern long vwrite(char *buf, char *addr, unsigned long count); |
1da177e4c
|
144 145 146 |
/* * Internals. Dont't use.. */ |
f1c4069e1
|
147 |
extern struct list_head vmap_area_list; |
be9b7335e
|
148 |
extern __init void vm_area_add_early(struct vm_struct *vm); |
c0c0a2937
|
149 |
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); |
1da177e4c
|
150 |
|
4f8b02b4e
|
151 |
#ifdef CONFIG_SMP |
b554cb426
|
152 |
# ifdef CONFIG_MMU |
ca23e405e
|
153 154 |
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, |
ec3f64fc9
|
155 |
size_t align); |
ca23e405e
|
156 157 |
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); |
b554cb426
|
158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
# else static inline struct vm_struct ** pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, size_t align) { return NULL; } static inline void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) { } # endif |
4f8b02b4e
|
172 |
#endif |
ca23e405e
|
173 |
|
db3808c1b
|
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
struct vmalloc_info { unsigned long used; unsigned long largest_chunk; }; #ifdef CONFIG_MMU #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) extern void get_vmalloc_info(struct vmalloc_info *vmi); #else #define VMALLOC_TOTAL 0UL #define get_vmalloc_info(vmi) \ do { \ (vmi)->used = 0; \ (vmi)->largest_chunk = 0; \ } while (0) #endif |
1da177e4c
|
191 |
#endif /* _LINUX_VMALLOC_H */ |