Blame view
drivers/android/binder_alloc_selftest.c
7.96 KB
9c92ab619 treewide: Replace... |
1 |
// SPDX-License-Identifier: GPL-2.0-only |
4175e2b46 android: binder: ... |
2 3 4 5 6 |
/* binder_alloc_selftest.c * * Android IPC Subsystem * * Copyright (C) 2017 Google, Inc. |
4175e2b46 android: binder: ... |
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
*/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mm_types.h> #include <linux/err.h> #include "binder_alloc.h" #define BUFFER_NUM 5 #define BUFFER_MIN_SIZE (PAGE_SIZE / 8) static bool binder_selftest_run = true; static int binder_selftest_failures; static DEFINE_MUTEX(binder_selftest_lock); /** * enum buf_end_align_type - Page alignment of a buffer * end with regard to the end of the previous buffer. * * In the pictures below, buf2 refers to the buffer we * are aligning. buf1 refers to previous buffer by addr. * Symbol [ means the start of a buffer, ] means the end * of a buffer, and | means page boundaries. */ enum buf_end_align_type { /** * @SAME_PAGE_UNALIGNED: The end of this buffer is on * the same page as the end of the previous buffer and * is not page aligned. Examples: * buf1 ][ buf2 ][ ... * buf1 ]|[ buf2 ][ ... */ SAME_PAGE_UNALIGNED = 0, /** * @SAME_PAGE_ALIGNED: When the end of the previous buffer * is not page aligned, the end of this buffer is on the * same page as the end of the previous buffer and is page * aligned. When the previous buffer is page aligned, the * end of this buffer is aligned to the next page boundary. * Examples: * buf1 ][ buf2 ]| ... * buf1 ]|[ buf2 ]| ... */ SAME_PAGE_ALIGNED, /** * @NEXT_PAGE_UNALIGNED: The end of this buffer is on * the page next to the end of the previous buffer and * is not page aligned. Examples: * buf1 ][ buf2 | buf2 ][ ... * buf1 ]|[ buf2 | buf2 ][ ... */ NEXT_PAGE_UNALIGNED, /** * @NEXT_PAGE_ALIGNED: The end of this buffer is on * the page next to the end of the previous buffer and * is page aligned. Examples: * buf1 ][ buf2 | buf2 ]| ... * buf1 ]|[ buf2 | buf2 ]| ... */ NEXT_PAGE_ALIGNED, /** * @NEXT_NEXT_UNALIGNED: The end of this buffer is on * the page that follows the page after the end of the * previous buffer and is not page aligned. Examples: * buf1 ][ buf2 | buf2 | buf2 ][ ... * buf1 ]|[ buf2 | buf2 | buf2 ][ ... */ NEXT_NEXT_UNALIGNED, LOOP_END, }; static void pr_err_size_seq(size_t *sizes, int *seq) { int i; pr_err("alloc sizes: "); for (i = 0; i < BUFFER_NUM; i++) pr_cont("[%zu]", sizes[i]); pr_cont(" "); pr_err("free seq: "); for (i = 0; i < BUFFER_NUM; i++) pr_cont("[%d]", seq[i]); pr_cont(" "); } static bool check_buffer_pages_allocated(struct binder_alloc *alloc, struct binder_buffer *buffer, size_t size) { |
36f309379 binder: fix spars... |
98 99 |
void __user *page_addr; void __user *end; |
4175e2b46 android: binder: ... |
100 |
int page_index; |
36f309379 binder: fix spars... |
101 |
end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); |
bde4a19fc binder: use users... |
102 |
page_addr = buffer->user_data; |
74310e06b android: binder: ... |
103 |
for (; page_addr < end; page_addr += PAGE_SIZE) { |
4175e2b46 android: binder: ... |
104 |
page_index = (page_addr - alloc->buffer) / PAGE_SIZE; |
f2517eb76 android: binder: ... |
105 106 107 108 109 110 |
if (!alloc->pages[page_index].page_ptr || !list_empty(&alloc->pages[page_index].lru)) { pr_err("expect alloc but is %s at page index %d ", alloc->pages[page_index].page_ptr ? "lru" : "free", page_index); |
4175e2b46 android: binder: ... |
111 112 113 114 115 116 117 118 119 120 121 122 123 |
return false; } } return true; } static void binder_selftest_alloc_buf(struct binder_alloc *alloc, struct binder_buffer *buffers[], size_t *sizes, int *seq) { int i; for (i = 0; i < BUFFER_NUM; i++) { |
261e7818f binder: print war... |
124 |
buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0); |
4175e2b46 android: binder: ... |
125 126 127 128 129 130 131 132 133 134 135 |
if (IS_ERR(buffers[i]) || !check_buffer_pages_allocated(alloc, buffers[i], sizes[i])) { pr_err_size_seq(sizes, seq); binder_selftest_failures++; } } } static void binder_selftest_free_buf(struct binder_alloc *alloc, struct binder_buffer *buffers[], |
f2517eb76 android: binder: ... |
136 |
size_t *sizes, int *seq, size_t end) |
4175e2b46 android: binder: ... |
137 138 139 140 141 |
{ int i; for (i = 0; i < BUFFER_NUM; i++) binder_alloc_free_buf(alloc, buffers[seq[i]]); |
f2517eb76 android: binder: ... |
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
for (i = 0; i < end / PAGE_SIZE; i++) { /** * Error message on a free page can be false positive * if binder shrinker ran during binder_alloc_free_buf * calls above. */ if (list_empty(&alloc->pages[i].lru)) { pr_err_size_seq(sizes, seq); pr_err("expect lru but is %s at page index %d ", alloc->pages[i].page_ptr ? "alloc" : "free", i); binder_selftest_failures++; } } } static void binder_selftest_free_page(struct binder_alloc *alloc) { int i; unsigned long count; while ((count = list_lru_count(&binder_alloc_lru))) { list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, NULL, count); } |
4175e2b46 android: binder: ... |
167 |
for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) { |
f2517eb76 android: binder: ... |
168 169 170 171 172 |
if (alloc->pages[i].page_ptr) { pr_err("expect free but is %s at page index %d ", list_empty(&alloc->pages[i].lru) ? "alloc" : "lru", i); |
4175e2b46 android: binder: ... |
173 174 175 176 177 178 |
binder_selftest_failures++; } } } static void binder_selftest_alloc_free(struct binder_alloc *alloc, |
f2517eb76 android: binder: ... |
179 |
size_t *sizes, int *seq, size_t end) |
4175e2b46 android: binder: ... |
180 181 182 183 |
{ struct binder_buffer *buffers[BUFFER_NUM]; binder_selftest_alloc_buf(alloc, buffers, sizes, seq); |
f2517eb76 android: binder: ... |
184 185 186 187 188 189 190 191 192 193 |
binder_selftest_free_buf(alloc, buffers, sizes, seq, end); /* Allocate from lru. */ binder_selftest_alloc_buf(alloc, buffers, sizes, seq); if (list_lru_count(&binder_alloc_lru)) pr_err("lru list should be empty but is not "); binder_selftest_free_buf(alloc, buffers, sizes, seq, end); binder_selftest_free_page(alloc); |
4175e2b46 android: binder: ... |
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 |
} static bool is_dup(int *seq, int index, int val) { int i; for (i = 0; i < index; i++) { if (seq[i] == val) return true; } return false; } /* Generate BUFFER_NUM factorial free orders. */ static void binder_selftest_free_seq(struct binder_alloc *alloc, |
f2517eb76 android: binder: ... |
209 210 |
size_t *sizes, int *seq, int index, size_t end) |
4175e2b46 android: binder: ... |
211 212 213 214 |
{ int i; if (index == BUFFER_NUM) { |
f2517eb76 android: binder: ... |
215 |
binder_selftest_alloc_free(alloc, sizes, seq, end); |
4175e2b46 android: binder: ... |
216 217 218 219 220 221 |
return; } for (i = 0; i < BUFFER_NUM; i++) { if (is_dup(seq, index, i)) continue; seq[index] = i; |
f2517eb76 android: binder: ... |
222 |
binder_selftest_free_seq(alloc, sizes, seq, index + 1, end); |
4175e2b46 android: binder: ... |
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 |
} } static void binder_selftest_alloc_size(struct binder_alloc *alloc, size_t *end_offset) { int i; int seq[BUFFER_NUM] = {0}; size_t front_sizes[BUFFER_NUM]; size_t back_sizes[BUFFER_NUM]; size_t last_offset, offset = 0; for (i = 0; i < BUFFER_NUM; i++) { last_offset = offset; offset = end_offset[i]; front_sizes[i] = offset - last_offset; back_sizes[BUFFER_NUM - i - 1] = front_sizes[i]; } /* * Buffers share the first or last few pages. * Only BUFFER_NUM - 1 buffer sizes are adjustable since * we need one giant buffer before getting to the last page. */ |
74310e06b android: binder: ... |
246 |
back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1]; |
f2517eb76 android: binder: ... |
247 248 249 |
binder_selftest_free_seq(alloc, front_sizes, seq, 0, end_offset[BUFFER_NUM - 1]); binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size); |
4175e2b46 android: binder: ... |
250 251 252 253 254 255 256 257 258 259 260 261 262 263 |
} static void binder_selftest_alloc_offset(struct binder_alloc *alloc, size_t *end_offset, int index) { int align; size_t end, prev; if (index == BUFFER_NUM) { binder_selftest_alloc_size(alloc, end_offset); return; } prev = index == 0 ? 0 : end_offset[index - 1]; end = prev; |
74310e06b android: binder: ... |
264 |
BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE); |
4175e2b46 android: binder: ... |
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 |
for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) { if (align % 2) end = ALIGN(end, PAGE_SIZE); else end += BUFFER_MIN_SIZE; end_offset[index] = end; binder_selftest_alloc_offset(alloc, end_offset, index + 1); } } /** * binder_selftest_alloc() - Test alloc and free of buffer pages. * @alloc: Pointer to alloc struct. * * Allocate BUFFER_NUM buffers to cover all page alignment cases, * then free them in all orders possible. Check that pages are |
f2517eb76 android: binder: ... |
282 283 |
* correctly allocated, put onto lru when buffers are freed, and * are freed when binder_alloc_free_page is called. |
4175e2b46 android: binder: ... |
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 |
*/ void binder_selftest_alloc(struct binder_alloc *alloc) { size_t end_offset[BUFFER_NUM]; if (!binder_selftest_run) return; mutex_lock(&binder_selftest_lock); if (!binder_selftest_run || !alloc->vma) goto done; pr_info("STARTED "); binder_selftest_alloc_offset(alloc, end_offset, 0); binder_selftest_run = false; if (binder_selftest_failures > 0) pr_info("%d tests FAILED ", binder_selftest_failures); else pr_info("PASSED "); done: mutex_unlock(&binder_selftest_lock); } |