Commit ccd35fb9f4da856b105ea0f1e0cab3702e8ae6ba
1 parent
786a5e15b6
Exists in
master
and in
7 other branches
kernel: kmem_ptr_validate considered harmful
This is a nasty and error prone API. It is no longer used, remove it. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Showing 5 changed files with 1 additions and 99 deletions Side-by-side Diff
include/linux/slab.h
... | ... | @@ -106,8 +106,6 @@ |
106 | 106 | void kmem_cache_free(struct kmem_cache *, void *); |
107 | 107 | unsigned int kmem_cache_size(struct kmem_cache *); |
108 | 108 | const char *kmem_cache_name(struct kmem_cache *); |
109 | -int kern_ptr_validate(const void *ptr, unsigned long size); | |
110 | -int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); | |
111 | 109 | |
112 | 110 | /* |
113 | 111 | * Please use this macro to create slab caches. Simply specify the |
mm/slab.c
... | ... | @@ -2781,7 +2781,7 @@ |
2781 | 2781 | /* |
2782 | 2782 | * Map pages beginning at addr to the given cache and slab. This is required |
2783 | 2783 | * for the slab allocator to be able to lookup the cache and slab of a |
2784 | - * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging. | |
2784 | + * virtual address for kfree, ksize, and slab debugging. | |
2785 | 2785 | */ |
2786 | 2786 | static void slab_map_pages(struct kmem_cache *cache, struct slab *slab, |
2787 | 2787 | void *addr) |
... | ... | @@ -3659,36 +3659,6 @@ |
3659 | 3659 | } |
3660 | 3660 | EXPORT_SYMBOL(kmem_cache_alloc_notrace); |
3661 | 3661 | #endif |
3662 | - | |
3663 | -/** | |
3664 | - * kmem_ptr_validate - check if an untrusted pointer might be a slab entry. | |
3665 | - * @cachep: the cache we're checking against | |
3666 | - * @ptr: pointer to validate | |
3667 | - * | |
3668 | - * This verifies that the untrusted pointer looks sane; | |
3669 | - * it is _not_ a guarantee that the pointer is actually | |
3670 | - * part of the slab cache in question, but it at least | |
3671 | - * validates that the pointer can be dereferenced and | |
3672 | - * looks half-way sane. | |
3673 | - * | |
3674 | - * Currently only used for dentry validation. | |
3675 | - */ | |
3676 | -int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) | |
3677 | -{ | |
3678 | - unsigned long size = cachep->buffer_size; | |
3679 | - struct page *page; | |
3680 | - | |
3681 | - if (unlikely(!kern_ptr_validate(ptr, size))) | |
3682 | - goto out; | |
3683 | - page = virt_to_page(ptr); | |
3684 | - if (unlikely(!PageSlab(page))) | |
3685 | - goto out; | |
3686 | - if (unlikely(page_get_cache(page) != cachep)) | |
3687 | - goto out; | |
3688 | - return 1; | |
3689 | -out: | |
3690 | - return 0; | |
3691 | -} | |
3692 | 3662 | |
3693 | 3663 | #ifdef CONFIG_NUMA |
3694 | 3664 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) |
mm/slob.c
mm/slub.c
... | ... | @@ -1917,17 +1917,6 @@ |
1917 | 1917 | } |
1918 | 1918 | EXPORT_SYMBOL(kmem_cache_free); |
1919 | 1919 | |
1920 | -/* Figure out on which slab page the object resides */ | |
1921 | -static struct page *get_object_page(const void *x) | |
1922 | -{ | |
1923 | - struct page *page = virt_to_head_page(x); | |
1924 | - | |
1925 | - if (!PageSlab(page)) | |
1926 | - return NULL; | |
1927 | - | |
1928 | - return page; | |
1929 | -} | |
1930 | - | |
1931 | 1920 | /* |
1932 | 1921 | * Object placement in a slab is made very easy because we always start at |
1933 | 1922 | * offset 0. If we tune the size of the object to the alignment then we can |
... | ... | @@ -2384,35 +2373,6 @@ |
2384 | 2373 | s->offset, flags); |
2385 | 2374 | return 0; |
2386 | 2375 | } |
2387 | - | |
2388 | -/* | |
2389 | - * Check if a given pointer is valid | |
2390 | - */ | |
2391 | -int kmem_ptr_validate(struct kmem_cache *s, const void *object) | |
2392 | -{ | |
2393 | - struct page *page; | |
2394 | - | |
2395 | - if (!kern_ptr_validate(object, s->size)) | |
2396 | - return 0; | |
2397 | - | |
2398 | - page = get_object_page(object); | |
2399 | - | |
2400 | - if (!page || s != page->slab) | |
2401 | - /* No slab or wrong slab */ | |
2402 | - return 0; | |
2403 | - | |
2404 | - if (!check_valid_pointer(s, page, object)) | |
2405 | - return 0; | |
2406 | - | |
2407 | - /* | |
2408 | - * We could also check if the object is on the slabs freelist. | |
2409 | - * But this would be too expensive and it seems that the main | |
2410 | - * purpose of kmem_ptr_valid() is to check if the object belongs | |
2411 | - * to a certain slab. | |
2412 | - */ | |
2413 | - return 1; | |
2414 | -} | |
2415 | -EXPORT_SYMBOL(kmem_ptr_validate); | |
2416 | 2376 | |
2417 | 2377 | /* |
2418 | 2378 | * Determine the size of a slab object |
mm/util.c
... | ... | @@ -186,27 +186,6 @@ |
186 | 186 | } |
187 | 187 | EXPORT_SYMBOL(kzfree); |
188 | 188 | |
189 | -int kern_ptr_validate(const void *ptr, unsigned long size) | |
190 | -{ | |
191 | - unsigned long addr = (unsigned long)ptr; | |
192 | - unsigned long min_addr = PAGE_OFFSET; | |
193 | - unsigned long align_mask = sizeof(void *) - 1; | |
194 | - | |
195 | - if (unlikely(addr < min_addr)) | |
196 | - goto out; | |
197 | - if (unlikely(addr > (unsigned long)high_memory - size)) | |
198 | - goto out; | |
199 | - if (unlikely(addr & align_mask)) | |
200 | - goto out; | |
201 | - if (unlikely(!kern_addr_valid(addr))) | |
202 | - goto out; | |
203 | - if (unlikely(!kern_addr_valid(addr + size - 1))) | |
204 | - goto out; | |
205 | - return 1; | |
206 | -out: | |
207 | - return 0; | |
208 | -} | |
209 | - | |
210 | 189 | /* |
211 | 190 | * strndup_user - duplicate an existing string from user space |
212 | 191 | * @s: The string to duplicate |