Commit 99d7e48e8cb867f303439ad40e995e203841bd94
Committed by
Dave Airlie
1 parent
550e2d9270
Exists in
master
and in
7 other branches
drm: Add memory manager debug function
drm_mm_debug_table will print the memory manager state in table allowing to give a snapshot of the manager at given point in time. Usefull for debugging. Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Showing 2 changed files with 21 additions and 0 deletions Inline Diff
drivers/gpu/drm/drm_mm.c
1 | /************************************************************************** | 1 | /************************************************************************** |
2 | * | 2 | * |
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. | 3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. |
4 | * All Rights Reserved. | 4 | * All Rights Reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the | 7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including | 8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, | 9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to | 10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to | 11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: | 12 | * the following conditions: |
13 | * | 13 | * |
14 | * The above copyright notice and this permission notice (including the | 14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions | 15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. | 16 | * of the Software. |
17 | * | 17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * | 25 | * |
26 | * | 26 | * |
27 | **************************************************************************/ | 27 | **************************************************************************/ |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * Generic simple memory manager implementation. Intended to be used as a base | 30 | * Generic simple memory manager implementation. Intended to be used as a base |
31 | * class implementation for more advanced memory managers. | 31 | * class implementation for more advanced memory managers. |
32 | * | 32 | * |
33 | * Note that the algorithm used is quite simple and there might be substantial | 33 | * Note that the algorithm used is quite simple and there might be substantial |
34 | * performance gains if a smarter free list is implemented. Currently it is just an | 34 | * performance gains if a smarter free list is implemented. Currently it is just an |
35 | * unordered stack of free regions. This could easily be improved if an RB-tree | 35 | * unordered stack of free regions. This could easily be improved if an RB-tree |
36 | * is used instead. At least if we expect heavy fragmentation. | 36 | * is used instead. At least if we expect heavy fragmentation. |
37 | * | 37 | * |
38 | * Aligned allocations can also see improvement. | 38 | * Aligned allocations can also see improvement. |
39 | * | 39 | * |
40 | * Authors: | 40 | * Authors: |
41 | * Thomas Hellström <thomas-at-tungstengraphics-dot-com> | 41 | * Thomas Hellström <thomas-at-tungstengraphics-dot-com> |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include "drmP.h" | 44 | #include "drmP.h" |
45 | #include "drm_mm.h" | 45 | #include "drm_mm.h" |
46 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
47 | #include <linux/seq_file.h> | 47 | #include <linux/seq_file.h> |
48 | 48 | ||
49 | #define MM_UNUSED_TARGET 4 | 49 | #define MM_UNUSED_TARGET 4 |
50 | 50 | ||
51 | unsigned long drm_mm_tail_space(struct drm_mm *mm) | 51 | unsigned long drm_mm_tail_space(struct drm_mm *mm) |
52 | { | 52 | { |
53 | struct list_head *tail_node; | 53 | struct list_head *tail_node; |
54 | struct drm_mm_node *entry; | 54 | struct drm_mm_node *entry; |
55 | 55 | ||
56 | tail_node = mm->ml_entry.prev; | 56 | tail_node = mm->ml_entry.prev; |
57 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); | 57 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); |
58 | if (!entry->free) | 58 | if (!entry->free) |
59 | return 0; | 59 | return 0; |
60 | 60 | ||
61 | return entry->size; | 61 | return entry->size; |
62 | } | 62 | } |
63 | 63 | ||
64 | int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) | 64 | int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) |
65 | { | 65 | { |
66 | struct list_head *tail_node; | 66 | struct list_head *tail_node; |
67 | struct drm_mm_node *entry; | 67 | struct drm_mm_node *entry; |
68 | 68 | ||
69 | tail_node = mm->ml_entry.prev; | 69 | tail_node = mm->ml_entry.prev; |
70 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); | 70 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); |
71 | if (!entry->free) | 71 | if (!entry->free) |
72 | return -ENOMEM; | 72 | return -ENOMEM; |
73 | 73 | ||
74 | if (entry->size <= size) | 74 | if (entry->size <= size) |
75 | return -ENOMEM; | 75 | return -ENOMEM; |
76 | 76 | ||
77 | entry->size -= size; | 77 | entry->size -= size; |
78 | return 0; | 78 | return 0; |
79 | } | 79 | } |
80 | 80 | ||
81 | static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) | 81 | static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) |
82 | { | 82 | { |
83 | struct drm_mm_node *child; | 83 | struct drm_mm_node *child; |
84 | 84 | ||
85 | if (atomic) | 85 | if (atomic) |
86 | child = kmalloc(sizeof(*child), GFP_ATOMIC); | 86 | child = kmalloc(sizeof(*child), GFP_ATOMIC); |
87 | else | 87 | else |
88 | child = kmalloc(sizeof(*child), GFP_KERNEL); | 88 | child = kmalloc(sizeof(*child), GFP_KERNEL); |
89 | 89 | ||
90 | if (unlikely(child == NULL)) { | 90 | if (unlikely(child == NULL)) { |
91 | spin_lock(&mm->unused_lock); | 91 | spin_lock(&mm->unused_lock); |
92 | if (list_empty(&mm->unused_nodes)) | 92 | if (list_empty(&mm->unused_nodes)) |
93 | child = NULL; | 93 | child = NULL; |
94 | else { | 94 | else { |
95 | child = | 95 | child = |
96 | list_entry(mm->unused_nodes.next, | 96 | list_entry(mm->unused_nodes.next, |
97 | struct drm_mm_node, fl_entry); | 97 | struct drm_mm_node, fl_entry); |
98 | list_del(&child->fl_entry); | 98 | list_del(&child->fl_entry); |
99 | --mm->num_unused; | 99 | --mm->num_unused; |
100 | } | 100 | } |
101 | spin_unlock(&mm->unused_lock); | 101 | spin_unlock(&mm->unused_lock); |
102 | } | 102 | } |
103 | return child; | 103 | return child; |
104 | } | 104 | } |
105 | 105 | ||
106 | /* drm_mm_pre_get() - pre allocate drm_mm_node structure | 106 | /* drm_mm_pre_get() - pre allocate drm_mm_node structure |
107 | * drm_mm: memory manager struct we are pre-allocating for | 107 | * drm_mm: memory manager struct we are pre-allocating for |
108 | * | 108 | * |
109 | * Returns 0 on success or -ENOMEM if allocation fails. | 109 | * Returns 0 on success or -ENOMEM if allocation fails. |
110 | */ | 110 | */ |
111 | int drm_mm_pre_get(struct drm_mm *mm) | 111 | int drm_mm_pre_get(struct drm_mm *mm) |
112 | { | 112 | { |
113 | struct drm_mm_node *node; | 113 | struct drm_mm_node *node; |
114 | 114 | ||
115 | spin_lock(&mm->unused_lock); | 115 | spin_lock(&mm->unused_lock); |
116 | while (mm->num_unused < MM_UNUSED_TARGET) { | 116 | while (mm->num_unused < MM_UNUSED_TARGET) { |
117 | spin_unlock(&mm->unused_lock); | 117 | spin_unlock(&mm->unused_lock); |
118 | node = kmalloc(sizeof(*node), GFP_KERNEL); | 118 | node = kmalloc(sizeof(*node), GFP_KERNEL); |
119 | spin_lock(&mm->unused_lock); | 119 | spin_lock(&mm->unused_lock); |
120 | 120 | ||
121 | if (unlikely(node == NULL)) { | 121 | if (unlikely(node == NULL)) { |
122 | int ret = (mm->num_unused < 2) ? -ENOMEM : 0; | 122 | int ret = (mm->num_unused < 2) ? -ENOMEM : 0; |
123 | spin_unlock(&mm->unused_lock); | 123 | spin_unlock(&mm->unused_lock); |
124 | return ret; | 124 | return ret; |
125 | } | 125 | } |
126 | ++mm->num_unused; | 126 | ++mm->num_unused; |
127 | list_add_tail(&node->fl_entry, &mm->unused_nodes); | 127 | list_add_tail(&node->fl_entry, &mm->unused_nodes); |
128 | } | 128 | } |
129 | spin_unlock(&mm->unused_lock); | 129 | spin_unlock(&mm->unused_lock); |
130 | return 0; | 130 | return 0; |
131 | } | 131 | } |
132 | EXPORT_SYMBOL(drm_mm_pre_get); | 132 | EXPORT_SYMBOL(drm_mm_pre_get); |
133 | 133 | ||
134 | static int drm_mm_create_tail_node(struct drm_mm *mm, | 134 | static int drm_mm_create_tail_node(struct drm_mm *mm, |
135 | unsigned long start, | 135 | unsigned long start, |
136 | unsigned long size, int atomic) | 136 | unsigned long size, int atomic) |
137 | { | 137 | { |
138 | struct drm_mm_node *child; | 138 | struct drm_mm_node *child; |
139 | 139 | ||
140 | child = drm_mm_kmalloc(mm, atomic); | 140 | child = drm_mm_kmalloc(mm, atomic); |
141 | if (unlikely(child == NULL)) | 141 | if (unlikely(child == NULL)) |
142 | return -ENOMEM; | 142 | return -ENOMEM; |
143 | 143 | ||
144 | child->free = 1; | 144 | child->free = 1; |
145 | child->size = size; | 145 | child->size = size; |
146 | child->start = start; | 146 | child->start = start; |
147 | child->mm = mm; | 147 | child->mm = mm; |
148 | 148 | ||
149 | list_add_tail(&child->ml_entry, &mm->ml_entry); | 149 | list_add_tail(&child->ml_entry, &mm->ml_entry); |
150 | list_add_tail(&child->fl_entry, &mm->fl_entry); | 150 | list_add_tail(&child->fl_entry, &mm->fl_entry); |
151 | 151 | ||
152 | return 0; | 152 | return 0; |
153 | } | 153 | } |
154 | 154 | ||
155 | int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic) | 155 | int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic) |
156 | { | 156 | { |
157 | struct list_head *tail_node; | 157 | struct list_head *tail_node; |
158 | struct drm_mm_node *entry; | 158 | struct drm_mm_node *entry; |
159 | 159 | ||
160 | tail_node = mm->ml_entry.prev; | 160 | tail_node = mm->ml_entry.prev; |
161 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); | 161 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); |
162 | if (!entry->free) { | 162 | if (!entry->free) { |
163 | return drm_mm_create_tail_node(mm, entry->start + entry->size, | 163 | return drm_mm_create_tail_node(mm, entry->start + entry->size, |
164 | size, atomic); | 164 | size, atomic); |
165 | } | 165 | } |
166 | entry->size += size; | 166 | entry->size += size; |
167 | return 0; | 167 | return 0; |
168 | } | 168 | } |
169 | 169 | ||
170 | static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, | 170 | static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, |
171 | unsigned long size, | 171 | unsigned long size, |
172 | int atomic) | 172 | int atomic) |
173 | { | 173 | { |
174 | struct drm_mm_node *child; | 174 | struct drm_mm_node *child; |
175 | 175 | ||
176 | child = drm_mm_kmalloc(parent->mm, atomic); | 176 | child = drm_mm_kmalloc(parent->mm, atomic); |
177 | if (unlikely(child == NULL)) | 177 | if (unlikely(child == NULL)) |
178 | return NULL; | 178 | return NULL; |
179 | 179 | ||
180 | INIT_LIST_HEAD(&child->fl_entry); | 180 | INIT_LIST_HEAD(&child->fl_entry); |
181 | 181 | ||
182 | child->free = 0; | 182 | child->free = 0; |
183 | child->size = size; | 183 | child->size = size; |
184 | child->start = parent->start; | 184 | child->start = parent->start; |
185 | child->mm = parent->mm; | 185 | child->mm = parent->mm; |
186 | 186 | ||
187 | list_add_tail(&child->ml_entry, &parent->ml_entry); | 187 | list_add_tail(&child->ml_entry, &parent->ml_entry); |
188 | INIT_LIST_HEAD(&child->fl_entry); | 188 | INIT_LIST_HEAD(&child->fl_entry); |
189 | 189 | ||
190 | parent->size -= size; | 190 | parent->size -= size; |
191 | parent->start += size; | 191 | parent->start += size; |
192 | return child; | 192 | return child; |
193 | } | 193 | } |
194 | 194 | ||
195 | 195 | ||
196 | struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, | 196 | struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, |
197 | unsigned long size, | 197 | unsigned long size, |
198 | unsigned alignment, | 198 | unsigned alignment, |
199 | int atomic) | 199 | int atomic) |
200 | { | 200 | { |
201 | 201 | ||
202 | struct drm_mm_node *align_splitoff = NULL; | 202 | struct drm_mm_node *align_splitoff = NULL; |
203 | unsigned tmp = 0; | 203 | unsigned tmp = 0; |
204 | 204 | ||
205 | if (alignment) | 205 | if (alignment) |
206 | tmp = node->start % alignment; | 206 | tmp = node->start % alignment; |
207 | 207 | ||
208 | if (tmp) { | 208 | if (tmp) { |
209 | align_splitoff = | 209 | align_splitoff = |
210 | drm_mm_split_at_start(node, alignment - tmp, atomic); | 210 | drm_mm_split_at_start(node, alignment - tmp, atomic); |
211 | if (unlikely(align_splitoff == NULL)) | 211 | if (unlikely(align_splitoff == NULL)) |
212 | return NULL; | 212 | return NULL; |
213 | } | 213 | } |
214 | 214 | ||
215 | if (node->size == size) { | 215 | if (node->size == size) { |
216 | list_del_init(&node->fl_entry); | 216 | list_del_init(&node->fl_entry); |
217 | node->free = 0; | 217 | node->free = 0; |
218 | } else { | 218 | } else { |
219 | node = drm_mm_split_at_start(node, size, atomic); | 219 | node = drm_mm_split_at_start(node, size, atomic); |
220 | } | 220 | } |
221 | 221 | ||
222 | if (align_splitoff) | 222 | if (align_splitoff) |
223 | drm_mm_put_block(align_splitoff); | 223 | drm_mm_put_block(align_splitoff); |
224 | 224 | ||
225 | return node; | 225 | return node; |
226 | } | 226 | } |
227 | EXPORT_SYMBOL(drm_mm_get_block_generic); | 227 | EXPORT_SYMBOL(drm_mm_get_block_generic); |
228 | 228 | ||
229 | struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node, | 229 | struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node, |
230 | unsigned long size, | 230 | unsigned long size, |
231 | unsigned alignment, | 231 | unsigned alignment, |
232 | unsigned long start, | 232 | unsigned long start, |
233 | unsigned long end, | 233 | unsigned long end, |
234 | int atomic) | 234 | int atomic) |
235 | { | 235 | { |
236 | struct drm_mm_node *align_splitoff = NULL; | 236 | struct drm_mm_node *align_splitoff = NULL; |
237 | unsigned tmp = 0; | 237 | unsigned tmp = 0; |
238 | unsigned wasted = 0; | 238 | unsigned wasted = 0; |
239 | 239 | ||
240 | if (node->start < start) | 240 | if (node->start < start) |
241 | wasted += start - node->start; | 241 | wasted += start - node->start; |
242 | if (alignment) | 242 | if (alignment) |
243 | tmp = ((node->start + wasted) % alignment); | 243 | tmp = ((node->start + wasted) % alignment); |
244 | 244 | ||
245 | if (tmp) | 245 | if (tmp) |
246 | wasted += alignment - tmp; | 246 | wasted += alignment - tmp; |
247 | if (wasted) { | 247 | if (wasted) { |
248 | align_splitoff = drm_mm_split_at_start(node, wasted, atomic); | 248 | align_splitoff = drm_mm_split_at_start(node, wasted, atomic); |
249 | if (unlikely(align_splitoff == NULL)) | 249 | if (unlikely(align_splitoff == NULL)) |
250 | return NULL; | 250 | return NULL; |
251 | } | 251 | } |
252 | 252 | ||
253 | if (node->size == size) { | 253 | if (node->size == size) { |
254 | list_del_init(&node->fl_entry); | 254 | list_del_init(&node->fl_entry); |
255 | node->free = 0; | 255 | node->free = 0; |
256 | } else { | 256 | } else { |
257 | node = drm_mm_split_at_start(node, size, atomic); | 257 | node = drm_mm_split_at_start(node, size, atomic); |
258 | } | 258 | } |
259 | 259 | ||
260 | if (align_splitoff) | 260 | if (align_splitoff) |
261 | drm_mm_put_block(align_splitoff); | 261 | drm_mm_put_block(align_splitoff); |
262 | 262 | ||
263 | return node; | 263 | return node; |
264 | } | 264 | } |
265 | EXPORT_SYMBOL(drm_mm_get_block_range_generic); | 265 | EXPORT_SYMBOL(drm_mm_get_block_range_generic); |
266 | 266 | ||
267 | /* | 267 | /* |
268 | * Put a block. Merge with the previous and / or next block if they are free. | 268 | * Put a block. Merge with the previous and / or next block if they are free. |
269 | * Otherwise add to the free stack. | 269 | * Otherwise add to the free stack. |
270 | */ | 270 | */ |
271 | 271 | ||
272 | void drm_mm_put_block(struct drm_mm_node *cur) | 272 | void drm_mm_put_block(struct drm_mm_node *cur) |
273 | { | 273 | { |
274 | 274 | ||
275 | struct drm_mm *mm = cur->mm; | 275 | struct drm_mm *mm = cur->mm; |
276 | struct list_head *cur_head = &cur->ml_entry; | 276 | struct list_head *cur_head = &cur->ml_entry; |
277 | struct list_head *root_head = &mm->ml_entry; | 277 | struct list_head *root_head = &mm->ml_entry; |
278 | struct drm_mm_node *prev_node = NULL; | 278 | struct drm_mm_node *prev_node = NULL; |
279 | struct drm_mm_node *next_node; | 279 | struct drm_mm_node *next_node; |
280 | 280 | ||
281 | int merged = 0; | 281 | int merged = 0; |
282 | 282 | ||
283 | if (cur_head->prev != root_head) { | 283 | if (cur_head->prev != root_head) { |
284 | prev_node = | 284 | prev_node = |
285 | list_entry(cur_head->prev, struct drm_mm_node, ml_entry); | 285 | list_entry(cur_head->prev, struct drm_mm_node, ml_entry); |
286 | if (prev_node->free) { | 286 | if (prev_node->free) { |
287 | prev_node->size += cur->size; | 287 | prev_node->size += cur->size; |
288 | merged = 1; | 288 | merged = 1; |
289 | } | 289 | } |
290 | } | 290 | } |
291 | if (cur_head->next != root_head) { | 291 | if (cur_head->next != root_head) { |
292 | next_node = | 292 | next_node = |
293 | list_entry(cur_head->next, struct drm_mm_node, ml_entry); | 293 | list_entry(cur_head->next, struct drm_mm_node, ml_entry); |
294 | if (next_node->free) { | 294 | if (next_node->free) { |
295 | if (merged) { | 295 | if (merged) { |
296 | prev_node->size += next_node->size; | 296 | prev_node->size += next_node->size; |
297 | list_del(&next_node->ml_entry); | 297 | list_del(&next_node->ml_entry); |
298 | list_del(&next_node->fl_entry); | 298 | list_del(&next_node->fl_entry); |
299 | spin_lock(&mm->unused_lock); | 299 | spin_lock(&mm->unused_lock); |
300 | if (mm->num_unused < MM_UNUSED_TARGET) { | 300 | if (mm->num_unused < MM_UNUSED_TARGET) { |
301 | list_add(&next_node->fl_entry, | 301 | list_add(&next_node->fl_entry, |
302 | &mm->unused_nodes); | 302 | &mm->unused_nodes); |
303 | ++mm->num_unused; | 303 | ++mm->num_unused; |
304 | } else | 304 | } else |
305 | kfree(next_node); | 305 | kfree(next_node); |
306 | spin_unlock(&mm->unused_lock); | 306 | spin_unlock(&mm->unused_lock); |
307 | } else { | 307 | } else { |
308 | next_node->size += cur->size; | 308 | next_node->size += cur->size; |
309 | next_node->start = cur->start; | 309 | next_node->start = cur->start; |
310 | merged = 1; | 310 | merged = 1; |
311 | } | 311 | } |
312 | } | 312 | } |
313 | } | 313 | } |
314 | if (!merged) { | 314 | if (!merged) { |
315 | cur->free = 1; | 315 | cur->free = 1; |
316 | list_add(&cur->fl_entry, &mm->fl_entry); | 316 | list_add(&cur->fl_entry, &mm->fl_entry); |
317 | } else { | 317 | } else { |
318 | list_del(&cur->ml_entry); | 318 | list_del(&cur->ml_entry); |
319 | spin_lock(&mm->unused_lock); | 319 | spin_lock(&mm->unused_lock); |
320 | if (mm->num_unused < MM_UNUSED_TARGET) { | 320 | if (mm->num_unused < MM_UNUSED_TARGET) { |
321 | list_add(&cur->fl_entry, &mm->unused_nodes); | 321 | list_add(&cur->fl_entry, &mm->unused_nodes); |
322 | ++mm->num_unused; | 322 | ++mm->num_unused; |
323 | } else | 323 | } else |
324 | kfree(cur); | 324 | kfree(cur); |
325 | spin_unlock(&mm->unused_lock); | 325 | spin_unlock(&mm->unused_lock); |
326 | } | 326 | } |
327 | } | 327 | } |
328 | 328 | ||
329 | EXPORT_SYMBOL(drm_mm_put_block); | 329 | EXPORT_SYMBOL(drm_mm_put_block); |
330 | 330 | ||
331 | struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, | 331 | struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, |
332 | unsigned long size, | 332 | unsigned long size, |
333 | unsigned alignment, int best_match) | 333 | unsigned alignment, int best_match) |
334 | { | 334 | { |
335 | struct list_head *list; | 335 | struct list_head *list; |
336 | const struct list_head *free_stack = &mm->fl_entry; | 336 | const struct list_head *free_stack = &mm->fl_entry; |
337 | struct drm_mm_node *entry; | 337 | struct drm_mm_node *entry; |
338 | struct drm_mm_node *best; | 338 | struct drm_mm_node *best; |
339 | unsigned long best_size; | 339 | unsigned long best_size; |
340 | unsigned wasted; | 340 | unsigned wasted; |
341 | 341 | ||
342 | best = NULL; | 342 | best = NULL; |
343 | best_size = ~0UL; | 343 | best_size = ~0UL; |
344 | 344 | ||
345 | list_for_each(list, free_stack) { | 345 | list_for_each(list, free_stack) { |
346 | entry = list_entry(list, struct drm_mm_node, fl_entry); | 346 | entry = list_entry(list, struct drm_mm_node, fl_entry); |
347 | wasted = 0; | 347 | wasted = 0; |
348 | 348 | ||
349 | if (entry->size < size) | 349 | if (entry->size < size) |
350 | continue; | 350 | continue; |
351 | 351 | ||
352 | if (alignment) { | 352 | if (alignment) { |
353 | register unsigned tmp = entry->start % alignment; | 353 | register unsigned tmp = entry->start % alignment; |
354 | if (tmp) | 354 | if (tmp) |
355 | wasted += alignment - tmp; | 355 | wasted += alignment - tmp; |
356 | } | 356 | } |
357 | 357 | ||
358 | if (entry->size >= size + wasted) { | 358 | if (entry->size >= size + wasted) { |
359 | if (!best_match) | 359 | if (!best_match) |
360 | return entry; | 360 | return entry; |
361 | if (size < best_size) { | 361 | if (size < best_size) { |
362 | best = entry; | 362 | best = entry; |
363 | best_size = entry->size; | 363 | best_size = entry->size; |
364 | } | 364 | } |
365 | } | 365 | } |
366 | } | 366 | } |
367 | 367 | ||
368 | return best; | 368 | return best; |
369 | } | 369 | } |
370 | EXPORT_SYMBOL(drm_mm_search_free); | 370 | EXPORT_SYMBOL(drm_mm_search_free); |
371 | 371 | ||
372 | struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, | 372 | struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, |
373 | unsigned long size, | 373 | unsigned long size, |
374 | unsigned alignment, | 374 | unsigned alignment, |
375 | unsigned long start, | 375 | unsigned long start, |
376 | unsigned long end, | 376 | unsigned long end, |
377 | int best_match) | 377 | int best_match) |
378 | { | 378 | { |
379 | struct list_head *list; | 379 | struct list_head *list; |
380 | const struct list_head *free_stack = &mm->fl_entry; | 380 | const struct list_head *free_stack = &mm->fl_entry; |
381 | struct drm_mm_node *entry; | 381 | struct drm_mm_node *entry; |
382 | struct drm_mm_node *best; | 382 | struct drm_mm_node *best; |
383 | unsigned long best_size; | 383 | unsigned long best_size; |
384 | unsigned wasted; | 384 | unsigned wasted; |
385 | 385 | ||
386 | best = NULL; | 386 | best = NULL; |
387 | best_size = ~0UL; | 387 | best_size = ~0UL; |
388 | 388 | ||
389 | list_for_each(list, free_stack) { | 389 | list_for_each(list, free_stack) { |
390 | entry = list_entry(list, struct drm_mm_node, fl_entry); | 390 | entry = list_entry(list, struct drm_mm_node, fl_entry); |
391 | wasted = 0; | 391 | wasted = 0; |
392 | 392 | ||
393 | if (entry->size < size) | 393 | if (entry->size < size) |
394 | continue; | 394 | continue; |
395 | 395 | ||
396 | if (entry->start > end || (entry->start+entry->size) < start) | 396 | if (entry->start > end || (entry->start+entry->size) < start) |
397 | continue; | 397 | continue; |
398 | 398 | ||
399 | if (entry->start < start) | 399 | if (entry->start < start) |
400 | wasted += start - entry->start; | 400 | wasted += start - entry->start; |
401 | 401 | ||
402 | if (alignment) { | 402 | if (alignment) { |
403 | register unsigned tmp = (entry->start + wasted) % alignment; | 403 | register unsigned tmp = (entry->start + wasted) % alignment; |
404 | if (tmp) | 404 | if (tmp) |
405 | wasted += alignment - tmp; | 405 | wasted += alignment - tmp; |
406 | } | 406 | } |
407 | 407 | ||
408 | if (entry->size >= size + wasted) { | 408 | if (entry->size >= size + wasted) { |
409 | if (!best_match) | 409 | if (!best_match) |
410 | return entry; | 410 | return entry; |
411 | if (size < best_size) { | 411 | if (size < best_size) { |
412 | best = entry; | 412 | best = entry; |
413 | best_size = entry->size; | 413 | best_size = entry->size; |
414 | } | 414 | } |
415 | } | 415 | } |
416 | } | 416 | } |
417 | 417 | ||
418 | return best; | 418 | return best; |
419 | } | 419 | } |
420 | EXPORT_SYMBOL(drm_mm_search_free_in_range); | 420 | EXPORT_SYMBOL(drm_mm_search_free_in_range); |
421 | 421 | ||
422 | int drm_mm_clean(struct drm_mm * mm) | 422 | int drm_mm_clean(struct drm_mm * mm) |
423 | { | 423 | { |
424 | struct list_head *head = &mm->ml_entry; | 424 | struct list_head *head = &mm->ml_entry; |
425 | 425 | ||
426 | return (head->next->next == head); | 426 | return (head->next->next == head); |
427 | } | 427 | } |
428 | EXPORT_SYMBOL(drm_mm_clean); | 428 | EXPORT_SYMBOL(drm_mm_clean); |
429 | 429 | ||
430 | int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | 430 | int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) |
431 | { | 431 | { |
432 | INIT_LIST_HEAD(&mm->ml_entry); | 432 | INIT_LIST_HEAD(&mm->ml_entry); |
433 | INIT_LIST_HEAD(&mm->fl_entry); | 433 | INIT_LIST_HEAD(&mm->fl_entry); |
434 | INIT_LIST_HEAD(&mm->unused_nodes); | 434 | INIT_LIST_HEAD(&mm->unused_nodes); |
435 | mm->num_unused = 0; | 435 | mm->num_unused = 0; |
436 | spin_lock_init(&mm->unused_lock); | 436 | spin_lock_init(&mm->unused_lock); |
437 | 437 | ||
438 | return drm_mm_create_tail_node(mm, start, size, 0); | 438 | return drm_mm_create_tail_node(mm, start, size, 0); |
439 | } | 439 | } |
440 | EXPORT_SYMBOL(drm_mm_init); | 440 | EXPORT_SYMBOL(drm_mm_init); |
441 | 441 | ||
442 | void drm_mm_takedown(struct drm_mm * mm) | 442 | void drm_mm_takedown(struct drm_mm * mm) |
443 | { | 443 | { |
444 | struct list_head *bnode = mm->fl_entry.next; | 444 | struct list_head *bnode = mm->fl_entry.next; |
445 | struct drm_mm_node *entry; | 445 | struct drm_mm_node *entry; |
446 | struct drm_mm_node *next; | 446 | struct drm_mm_node *next; |
447 | 447 | ||
448 | entry = list_entry(bnode, struct drm_mm_node, fl_entry); | 448 | entry = list_entry(bnode, struct drm_mm_node, fl_entry); |
449 | 449 | ||
450 | if (entry->ml_entry.next != &mm->ml_entry || | 450 | if (entry->ml_entry.next != &mm->ml_entry || |
451 | entry->fl_entry.next != &mm->fl_entry) { | 451 | entry->fl_entry.next != &mm->fl_entry) { |
452 | DRM_ERROR("Memory manager not clean. Delaying takedown\n"); | 452 | DRM_ERROR("Memory manager not clean. Delaying takedown\n"); |
453 | return; | 453 | return; |
454 | } | 454 | } |
455 | 455 | ||
456 | list_del(&entry->fl_entry); | 456 | list_del(&entry->fl_entry); |
457 | list_del(&entry->ml_entry); | 457 | list_del(&entry->ml_entry); |
458 | kfree(entry); | 458 | kfree(entry); |
459 | 459 | ||
460 | spin_lock(&mm->unused_lock); | 460 | spin_lock(&mm->unused_lock); |
461 | list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) { | 461 | list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) { |
462 | list_del(&entry->fl_entry); | 462 | list_del(&entry->fl_entry); |
463 | kfree(entry); | 463 | kfree(entry); |
464 | --mm->num_unused; | 464 | --mm->num_unused; |
465 | } | 465 | } |
466 | spin_unlock(&mm->unused_lock); | 466 | spin_unlock(&mm->unused_lock); |
467 | 467 | ||
468 | BUG_ON(mm->num_unused != 0); | 468 | BUG_ON(mm->num_unused != 0); |
469 | } | 469 | } |
470 | EXPORT_SYMBOL(drm_mm_takedown); | 470 | EXPORT_SYMBOL(drm_mm_takedown); |
471 | 471 | ||
472 | void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) | ||
473 | { | ||
474 | struct drm_mm_node *entry; | ||
475 | int total_used = 0, total_free = 0, total = 0; | ||
476 | |||
477 | list_for_each_entry(entry, &mm->ml_entry, ml_entry) { | ||
478 | printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n", | ||
479 | prefix, entry->start, entry->start + entry->size, | ||
480 | entry->size, entry->free ? "free" : "used"); | ||
481 | total += entry->size; | ||
482 | if (entry->free) | ||
483 | total_free += entry->size; | ||
484 | else | ||
485 | total_used += entry->size; | ||
486 | } | ||
487 | printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total, | ||
488 | total_used, total_free); | ||
489 | } | ||
490 | EXPORT_SYMBOL(drm_mm_debug_table); | ||
491 | |||
472 | #if defined(CONFIG_DEBUG_FS) | 492 | #if defined(CONFIG_DEBUG_FS) |
473 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | 493 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) |
474 | { | 494 | { |
475 | struct drm_mm_node *entry; | 495 | struct drm_mm_node *entry; |
476 | int total_used = 0, total_free = 0, total = 0; | 496 | int total_used = 0, total_free = 0, total = 0; |
477 | 497 | ||
478 | list_for_each_entry(entry, &mm->ml_entry, ml_entry) { | 498 | list_for_each_entry(entry, &mm->ml_entry, ml_entry) { |
479 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); | 499 | seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); |
480 | total += entry->size; | 500 | total += entry->size; |
481 | if (entry->free) | 501 | if (entry->free) |
482 | total_free += entry->size; | 502 | total_free += entry->size; |
483 | else | 503 | else |
484 | total_used += entry->size; | 504 | total_used += entry->size; |
485 | } | 505 | } |
486 | seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free); | 506 | seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free); |
487 | return 0; | 507 | return 0; |
488 | } | 508 | } |
489 | EXPORT_SYMBOL(drm_mm_dump_table); | 509 | EXPORT_SYMBOL(drm_mm_dump_table); |
490 | #endif | 510 | #endif |
491 | 511 |
include/drm/drm_mm.h
1 | /************************************************************************** | 1 | /************************************************************************** |
2 | * | 2 | * |
3 | * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. | 3 | * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. |
4 | * All Rights Reserved. | 4 | * All Rights Reserved. |
5 | * | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the | 7 | * copy of this software and associated documentation files (the |
8 | * "Software"), to deal in the Software without restriction, including | 8 | * "Software"), to deal in the Software without restriction, including |
9 | * without limitation the rights to use, copy, modify, merge, publish, | 9 | * without limitation the rights to use, copy, modify, merge, publish, |
10 | * distribute, sub license, and/or sell copies of the Software, and to | 10 | * distribute, sub license, and/or sell copies of the Software, and to |
11 | * permit persons to whom the Software is furnished to do so, subject to | 11 | * permit persons to whom the Software is furnished to do so, subject to |
12 | * the following conditions: | 12 | * the following conditions: |
13 | * | 13 | * |
14 | * The above copyright notice and this permission notice (including the | 14 | * The above copyright notice and this permission notice (including the |
15 | * next paragraph) shall be included in all copies or substantial portions | 15 | * next paragraph) shall be included in all copies or substantial portions |
16 | * of the Software. | 16 | * of the Software. |
17 | * | 17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
25 | * | 25 | * |
26 | * | 26 | * |
27 | **************************************************************************/ | 27 | **************************************************************************/ |
28 | /* | 28 | /* |
29 | * Authors: | 29 | * Authors: |
30 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | 30 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #ifndef _DRM_MM_H_ | 33 | #ifndef _DRM_MM_H_ |
34 | #define _DRM_MM_H_ | 34 | #define _DRM_MM_H_ |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * Generic range manager structs | 37 | * Generic range manager structs |
38 | */ | 38 | */ |
39 | #include <linux/list.h> | 39 | #include <linux/list.h> |
40 | #ifdef CONFIG_DEBUG_FS | 40 | #ifdef CONFIG_DEBUG_FS |
41 | #include <linux/seq_file.h> | 41 | #include <linux/seq_file.h> |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | struct drm_mm_node { | 44 | struct drm_mm_node { |
45 | struct list_head fl_entry; | 45 | struct list_head fl_entry; |
46 | struct list_head ml_entry; | 46 | struct list_head ml_entry; |
47 | int free; | 47 | int free; |
48 | unsigned long start; | 48 | unsigned long start; |
49 | unsigned long size; | 49 | unsigned long size; |
50 | struct drm_mm *mm; | 50 | struct drm_mm *mm; |
51 | void *private; | 51 | void *private; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | struct drm_mm { | 54 | struct drm_mm { |
55 | struct list_head fl_entry; | 55 | struct list_head fl_entry; |
56 | struct list_head ml_entry; | 56 | struct list_head ml_entry; |
57 | struct list_head unused_nodes; | 57 | struct list_head unused_nodes; |
58 | int num_unused; | 58 | int num_unused; |
59 | spinlock_t unused_lock; | 59 | spinlock_t unused_lock; |
60 | }; | 60 | }; |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * Basic range manager support (drm_mm.c) | 63 | * Basic range manager support (drm_mm.c) |
64 | */ | 64 | */ |
65 | extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, | 65 | extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, |
66 | unsigned long size, | 66 | unsigned long size, |
67 | unsigned alignment, | 67 | unsigned alignment, |
68 | int atomic); | 68 | int atomic); |
69 | extern struct drm_mm_node *drm_mm_get_block_range_generic( | 69 | extern struct drm_mm_node *drm_mm_get_block_range_generic( |
70 | struct drm_mm_node *node, | 70 | struct drm_mm_node *node, |
71 | unsigned long size, | 71 | unsigned long size, |
72 | unsigned alignment, | 72 | unsigned alignment, |
73 | unsigned long start, | 73 | unsigned long start, |
74 | unsigned long end, | 74 | unsigned long end, |
75 | int atomic); | 75 | int atomic); |
76 | static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, | 76 | static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, |
77 | unsigned long size, | 77 | unsigned long size, |
78 | unsigned alignment) | 78 | unsigned alignment) |
79 | { | 79 | { |
80 | return drm_mm_get_block_generic(parent, size, alignment, 0); | 80 | return drm_mm_get_block_generic(parent, size, alignment, 0); |
81 | } | 81 | } |
82 | static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent, | 82 | static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent, |
83 | unsigned long size, | 83 | unsigned long size, |
84 | unsigned alignment) | 84 | unsigned alignment) |
85 | { | 85 | { |
86 | return drm_mm_get_block_generic(parent, size, alignment, 1); | 86 | return drm_mm_get_block_generic(parent, size, alignment, 1); |
87 | } | 87 | } |
88 | static inline struct drm_mm_node *drm_mm_get_block_range( | 88 | static inline struct drm_mm_node *drm_mm_get_block_range( |
89 | struct drm_mm_node *parent, | 89 | struct drm_mm_node *parent, |
90 | unsigned long size, | 90 | unsigned long size, |
91 | unsigned alignment, | 91 | unsigned alignment, |
92 | unsigned long start, | 92 | unsigned long start, |
93 | unsigned long end) | 93 | unsigned long end) |
94 | { | 94 | { |
95 | return drm_mm_get_block_range_generic(parent, size, alignment, | 95 | return drm_mm_get_block_range_generic(parent, size, alignment, |
96 | start, end, 0); | 96 | start, end, 0); |
97 | } | 97 | } |
98 | static inline struct drm_mm_node *drm_mm_get_block_atomic_range( | 98 | static inline struct drm_mm_node *drm_mm_get_block_atomic_range( |
99 | struct drm_mm_node *parent, | 99 | struct drm_mm_node *parent, |
100 | unsigned long size, | 100 | unsigned long size, |
101 | unsigned alignment, | 101 | unsigned alignment, |
102 | unsigned long start, | 102 | unsigned long start, |
103 | unsigned long end) | 103 | unsigned long end) |
104 | { | 104 | { |
105 | return drm_mm_get_block_range_generic(parent, size, alignment, | 105 | return drm_mm_get_block_range_generic(parent, size, alignment, |
106 | start, end, 1); | 106 | start, end, 1); |
107 | } | 107 | } |
108 | extern void drm_mm_put_block(struct drm_mm_node *cur); | 108 | extern void drm_mm_put_block(struct drm_mm_node *cur); |
109 | extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, | 109 | extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, |
110 | unsigned long size, | 110 | unsigned long size, |
111 | unsigned alignment, | 111 | unsigned alignment, |
112 | int best_match); | 112 | int best_match); |
113 | extern struct drm_mm_node *drm_mm_search_free_in_range( | 113 | extern struct drm_mm_node *drm_mm_search_free_in_range( |
114 | const struct drm_mm *mm, | 114 | const struct drm_mm *mm, |
115 | unsigned long size, | 115 | unsigned long size, |
116 | unsigned alignment, | 116 | unsigned alignment, |
117 | unsigned long start, | 117 | unsigned long start, |
118 | unsigned long end, | 118 | unsigned long end, |
119 | int best_match); | 119 | int best_match); |
120 | extern int drm_mm_init(struct drm_mm *mm, unsigned long start, | 120 | extern int drm_mm_init(struct drm_mm *mm, unsigned long start, |
121 | unsigned long size); | 121 | unsigned long size); |
122 | extern void drm_mm_takedown(struct drm_mm *mm); | 122 | extern void drm_mm_takedown(struct drm_mm *mm); |
123 | extern int drm_mm_clean(struct drm_mm *mm); | 123 | extern int drm_mm_clean(struct drm_mm *mm); |
124 | extern unsigned long drm_mm_tail_space(struct drm_mm *mm); | 124 | extern unsigned long drm_mm_tail_space(struct drm_mm *mm); |
125 | extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, | 125 | extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, |
126 | unsigned long size); | 126 | unsigned long size); |
127 | extern int drm_mm_add_space_to_tail(struct drm_mm *mm, | 127 | extern int drm_mm_add_space_to_tail(struct drm_mm *mm, |
128 | unsigned long size, int atomic); | 128 | unsigned long size, int atomic); |
129 | extern int drm_mm_pre_get(struct drm_mm *mm); | 129 | extern int drm_mm_pre_get(struct drm_mm *mm); |
130 | 130 | ||
131 | static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) | 131 | static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) |
132 | { | 132 | { |
133 | return block->mm; | 133 | return block->mm; |
134 | } | 134 | } |
135 | 135 | ||
136 | extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); | ||
136 | #ifdef CONFIG_DEBUG_FS | 137 | #ifdef CONFIG_DEBUG_FS |
137 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); | 138 | int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); |
138 | #endif | 139 | #endif |
139 | 140 | ||
140 | #endif | 141 | #endif |
141 | 142 |