Blame view

mm/kmemcheck.c 2.92 KB
b1eeab676   Vegard Nossum   kmemcheck: add ho...
1
  #include <linux/gfp.h>
2dff44052   Vegard Nossum   kmemcheck: add mm...
2
3
4
  #include <linux/mm_types.h>
  #include <linux/mm.h>
  #include <linux/slab.h>
07f361b2b   Joonsoo Kim   mm/slab_common: m...
5
  #include "slab.h"
2dff44052   Vegard Nossum   kmemcheck: add mm...
6
  #include <linux/kmemcheck.h>
b1eeab676   Vegard Nossum   kmemcheck: add ho...
7
  void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
2dff44052   Vegard Nossum   kmemcheck: add mm...
8
9
10
11
12
13
14
15
16
17
18
  {
  	struct page *shadow;
  	int pages;
  	int i;
  
  	pages = 1 << order;
  
  	/*
  	 * With kmemcheck enabled, we need to allocate a memory area for the
  	 * shadow bits as well.
  	 */
b1eeab676   Vegard Nossum   kmemcheck: add ho...
19
  	shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
2dff44052   Vegard Nossum   kmemcheck: add mm...
20
21
  	if (!shadow) {
  		if (printk_ratelimit())
1170532bb   Joe Perches   mm: convert print...
22
23
  			pr_err("kmemcheck: failed to allocate shadow bitmap
  ");
2dff44052   Vegard Nossum   kmemcheck: add mm...
24
25
26
27
28
29
30
31
32
33
34
35
  		return;
  	}
  
  	for(i = 0; i < pages; ++i)
  		page[i].shadow = page_address(&shadow[i]);
  
  	/*
  	 * Mark it as non-present for the MMU so that our accesses to
  	 * this memory will trigger a page fault and let us analyze
  	 * the memory accesses.
  	 */
  	kmemcheck_hide_pages(page, pages);
2dff44052   Vegard Nossum   kmemcheck: add mm...
36
  }
b1eeab676   Vegard Nossum   kmemcheck: add ho...
37
  void kmemcheck_free_shadow(struct page *page, int order)
2dff44052   Vegard Nossum   kmemcheck: add mm...
38
39
40
41
  {
  	struct page *shadow;
  	int pages;
  	int i;
b1eeab676   Vegard Nossum   kmemcheck: add ho...
42
43
  	if (!kmemcheck_page_is_tracked(page))
  		return;
2dff44052   Vegard Nossum   kmemcheck: add mm...
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
  	pages = 1 << order;
  
  	kmemcheck_show_pages(page, pages);
  
  	shadow = virt_to_page(page[0].shadow);
  
  	for(i = 0; i < pages; ++i)
  		page[i].shadow = NULL;
  
  	__free_pages(shadow, order);
  }
  
  void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  			  size_t size)
  {
0142eae3a   Jesper Dangaard Brouer   mm: kmemcheck ski...
59
60
  	if (unlikely(!object)) /* Skip object if allocation failed */
  		return;
2dff44052   Vegard Nossum   kmemcheck: add mm...
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
  	/*
  	 * Has already been memset(), which initializes the shadow for us
  	 * as well.
  	 */
  	if (gfpflags & __GFP_ZERO)
  		return;
  
  	/* No need to initialize the shadow of a non-tracked slab. */
  	if (s->flags & SLAB_NOTRACK)
  		return;
  
  	if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
  		/*
  		 * Allow notracked objects to be allocated from
  		 * tracked caches. Note however that these objects
  		 * will still get page faults on access, they just
  		 * won't ever be flagged as uninitialized. If page
  		 * faults are not acceptable, the slab cache itself
  		 * should be marked NOTRACK.
  		 */
  		kmemcheck_mark_initialized(object, size);
  	} else if (!s->ctor) {
  		/*
  		 * New objects should be marked uninitialized before
  		 * they're returned to the called.
  		 */
  		kmemcheck_mark_uninitialized(object, size);
  	}
  }
  
  void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
  {
  	/* TODO: RCU freeing is unsupported for now; hide false positives. */
  	if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
  		kmemcheck_mark_freed(object, size);
  }
b1eeab676   Vegard Nossum   kmemcheck: add ho...
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
  
  void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
  			       gfp_t gfpflags)
  {
  	int pages;
  
  	if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
  		return;
  
  	pages = 1 << order;
  
  	/*
  	 * NOTE: We choose to track GFP_ZERO pages too; in fact, they
  	 * can become uninitialized by copying uninitialized memory
  	 * into them.
  	 */
  
  	/* XXX: Can use zone->node for node? */
  	kmemcheck_alloc_shadow(page, order, gfpflags, -1);
  
  	if (gfpflags & __GFP_ZERO)
  		kmemcheck_mark_initialized_pages(page, pages);
  	else
  		kmemcheck_mark_uninitialized_pages(page, pages);
  }