Commit 3b74d18e54e20fc1d398eb391bea5b9aed22aca5

Authored by akpm@linux-foundation.org
Committed by Tony Luck
1 parent a07ee86205

[IA64] rename partial_page

Jens has added a partial_page thing in splice whcih conflicts with the ia64
one.  Rename ia64 out of the way.  (ia64 chose poorly).

Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Tony Luck <tony.luck@intel.com>

Showing 6 changed files with 62 additions and 59 deletions Side-by-side Diff

arch/ia64/ia32/ia32_support.c
... ... @@ -249,11 +249,11 @@
249 249  
250 250 #if PAGE_SHIFT > IA32_PAGE_SHIFT
251 251 {
252   - extern struct kmem_cache *partial_page_cachep;
  252 + extern struct kmem_cache *ia64_partial_page_cachep;
253 253  
254   - partial_page_cachep = kmem_cache_create("partial_page_cache",
255   - sizeof(struct partial_page),
256   - 0, SLAB_PANIC, NULL);
  254 + ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache",
  255 + sizeof(struct ia64_partial_page),
  256 + 0, SLAB_PANIC, NULL);
257 257 }
258 258 #endif
259 259 return 0;
arch/ia64/ia32/ia32priv.h
... ... @@ -25,8 +25,8 @@
25 25 * partially mapped pages provide precise accounting of which 4k sub pages
26 26 * are mapped and which ones are not, thereby improving IA-32 compatibility.
27 27 */
28   -struct partial_page {
29   - struct partial_page *next; /* linked list, sorted by address */
  28 +struct ia64_partial_page {
  29 + struct ia64_partial_page *next; /* linked list, sorted by address */
30 30 struct rb_node pp_rb;
31 31 /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64
32 32 * should suffice.*/
33 33  
34 34  
... ... @@ -34,17 +34,17 @@
34 34 unsigned int base;
35 35 };
36 36  
37   -struct partial_page_list {
38   - struct partial_page *pp_head; /* list head, points to the lowest
  37 +struct ia64_partial_page_list {
  38 + struct ia64_partial_page *pp_head; /* list head, points to the lowest
39 39 * addressed partial page */
40 40 struct rb_root ppl_rb;
41   - struct partial_page *pp_hint; /* pp_hint->next is the last
  41 + struct ia64_partial_page *pp_hint; /* pp_hint->next is the last
42 42 * accessed partial page */
43 43 atomic_t pp_count; /* reference count */
44 44 };
45 45  
46 46 #if PAGE_SHIFT > IA32_PAGE_SHIFT
47   -struct partial_page_list* ia32_init_pp_list (void);
  47 +struct ia64_partial_page_list* ia32_init_pp_list (void);
48 48 #else
49 49 # define ia32_init_pp_list() 0
50 50 #endif
arch/ia64/ia32/sys_ia32.c
... ... @@ -253,17 +253,17 @@
253 253 return ret;
254 254 }
255 255  
256   -/* SLAB cache for partial_page structures */
257   -struct kmem_cache *partial_page_cachep;
  256 +/* SLAB cache for ia64_partial_page structures */
  257 +struct kmem_cache *ia64_partial_page_cachep;
258 258  
259 259 /*
260   - * init partial_page_list.
  260 + * init ia64_partial_page_list.
261 261 * return 0 means kmalloc fail.
262 262 */
263   -struct partial_page_list*
  263 +struct ia64_partial_page_list*
264 264 ia32_init_pp_list(void)
265 265 {
266   - struct partial_page_list *p;
  266 + struct ia64_partial_page_list *p;
267 267  
268 268 if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
269 269 return p;
270 270  
... ... @@ -280,12 +280,12 @@
280 280 * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
281 281 * be used by later __ia32_insert_pp().
282 282 */
283   -static struct partial_page *
284   -__ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
285   - struct partial_page **pprev, struct rb_node ***rb_link,
  283 +static struct ia64_partial_page *
  284 +__ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start,
  285 + struct ia64_partial_page **pprev, struct rb_node ***rb_link,
286 286 struct rb_node **rb_parent)
287 287 {
288   - struct partial_page *pp;
  288 + struct ia64_partial_page *pp;
289 289 struct rb_node **__rb_link, *__rb_parent, *rb_prev;
290 290  
291 291 pp = ppl->pp_hint;
... ... @@ -297,7 +297,7 @@
297 297  
298 298 while (*__rb_link) {
299 299 __rb_parent = *__rb_link;
300   - pp = rb_entry(__rb_parent, struct partial_page, pp_rb);
  300 + pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb);
301 301  
302 302 if (pp->base == start) {
303 303 ppl->pp_hint = pp;
... ... @@ -314,7 +314,7 @@
314 314 *rb_parent = __rb_parent;
315 315 *pprev = NULL;
316 316 if (rb_prev)
317   - *pprev = rb_entry(rb_prev, struct partial_page, pp_rb);
  317 + *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb);
318 318 return NULL;
319 319 }
320 320  
... ... @@ -322,9 +322,9 @@
322 322 * insert @pp into @ppl.
323 323 */
324 324 static void
325   -__ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
326   - struct partial_page *prev, struct rb_node **rb_link,
327   - struct rb_node *rb_parent)
  325 +__ia32_insert_pp(struct ia64_partial_page_list *ppl,
  326 + struct ia64_partial_page *pp, struct ia64_partial_page *prev,
  327 + struct rb_node **rb_link, struct rb_node *rb_parent)
328 328 {
329 329 /* link list */
330 330 if (prev) {
... ... @@ -334,7 +334,7 @@
334 334 ppl->pp_head = pp;
335 335 if (rb_parent)
336 336 pp->next = rb_entry(rb_parent,
337   - struct partial_page, pp_rb);
  337 + struct ia64_partial_page, pp_rb);
338 338 else
339 339 pp->next = NULL;
340 340 }
... ... @@ -350,8 +350,8 @@
350 350 * delete @pp from partial page list @ppl.
351 351 */
352 352 static void
353   -__ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
354   - struct partial_page *prev)
  353 +__ia32_delete_pp(struct ia64_partial_page_list *ppl,
  354 + struct ia64_partial_page *pp, struct ia64_partial_page *prev)
355 355 {
356 356 if (prev) {
357 357 prev->next = pp->next;
358 358  
359 359  
... ... @@ -363,15 +363,15 @@
363 363 ppl->pp_hint = pp->next;
364 364 }
365 365 rb_erase(&pp->pp_rb, &ppl->ppl_rb);
366   - kmem_cache_free(partial_page_cachep, pp);
  366 + kmem_cache_free(ia64_partial_page_cachep, pp);
367 367 }
368 368  
369   -static struct partial_page *
370   -__pp_prev(struct partial_page *pp)
  369 +static struct ia64_partial_page *
  370 +__pp_prev(struct ia64_partial_page *pp)
371 371 {
372 372 struct rb_node *prev = rb_prev(&pp->pp_rb);
373 373 if (prev)
374   - return rb_entry(prev, struct partial_page, pp_rb);
  374 + return rb_entry(prev, struct ia64_partial_page, pp_rb);
375 375 else
376 376 return NULL;
377 377 }
... ... @@ -383,7 +383,7 @@
383 383 static void
384 384 __ia32_delete_pp_range(unsigned int start, unsigned int end)
385 385 {
386   - struct partial_page *pp, *prev;
  386 + struct ia64_partial_page *pp, *prev;
387 387 struct rb_node **rb_link, *rb_parent;
388 388  
389 389 if (start >= end)
... ... @@ -401,7 +401,7 @@
401 401 }
402 402  
403 403 while (pp && pp->base < end) {
404   - struct partial_page *tmp = pp->next;
  404 + struct ia64_partial_page *tmp = pp->next;
405 405 __ia32_delete_pp(current->thread.ppl, pp, prev);
406 406 pp = tmp;
407 407 }
... ... @@ -414,7 +414,7 @@
414 414 static int
415 415 __ia32_set_pp(unsigned int start, unsigned int end, int flags)
416 416 {
417   - struct partial_page *pp, *prev;
  417 + struct ia64_partial_page *pp, *prev;
418 418 struct rb_node ** rb_link, *rb_parent;
419 419 unsigned int pstart, start_bit, end_bit, i;
420 420  
... ... @@ -450,8 +450,8 @@
450 450 return 0;
451 451 }
452 452  
453   - /* new a partial_page */
454   - pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
  453 + /* new a ia64_partial_page */
  454 + pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
455 455 if (!pp)
456 456 return -ENOMEM;
457 457 pp->base = pstart;
... ... @@ -504,7 +504,7 @@
504 504 static int
505 505 __ia32_unset_pp(unsigned int start, unsigned int end)
506 506 {
507   - struct partial_page *pp, *prev;
  507 + struct ia64_partial_page *pp, *prev;
508 508 struct rb_node ** rb_link, *rb_parent;
509 509 unsigned int pstart, start_bit, end_bit, i;
510 510 struct vm_area_struct *vma;
... ... @@ -532,8 +532,8 @@
532 532 return -ENOMEM;
533 533 }
534 534  
535   - /* new a partial_page */
536   - pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
  535 + /* new a ia64_partial_page */
  536 + pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
537 537 if (!pp)
538 538 return -ENOMEM;
539 539 pp->base = pstart;
... ... @@ -605,7 +605,7 @@
605 605 static int
606 606 __ia32_compare_pp(unsigned int start, unsigned int end)
607 607 {
608   - struct partial_page *pp, *prev;
  608 + struct ia64_partial_page *pp, *prev;
609 609 struct rb_node ** rb_link, *rb_parent;
610 610 unsigned int pstart, start_bit, end_bit, size;
611 611 unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
612 612  
613 613  
... ... @@ -682,13 +682,13 @@
682 682 }
683 683  
684 684 static void
685   -__ia32_drop_pp_list(struct partial_page_list *ppl)
  685 +__ia32_drop_pp_list(struct ia64_partial_page_list *ppl)
686 686 {
687   - struct partial_page *pp = ppl->pp_head;
  687 + struct ia64_partial_page *pp = ppl->pp_head;
688 688  
689 689 while (pp) {
690   - struct partial_page *next = pp->next;
691   - kmem_cache_free(partial_page_cachep, pp);
  690 + struct ia64_partial_page *next = pp->next;
  691 + kmem_cache_free(ia64_partial_page_cachep, pp);
692 692 pp = next;
693 693 }
694 694  
695 695  
... ... @@ -696,9 +696,9 @@
696 696 }
697 697  
698 698 void
699   -ia32_drop_partial_page_list(struct task_struct *task)
  699 +ia32_drop_ia64_partial_page_list(struct task_struct *task)
700 700 {
701   - struct partial_page_list* ppl = task->thread.ppl;
  701 + struct ia64_partial_page_list* ppl = task->thread.ppl;
702 702  
703 703 if (ppl && atomic_dec_and_test(&ppl->pp_count))
704 704 __ia32_drop_pp_list(ppl);
705 705  
... ... @@ -708,9 +708,9 @@
708 708 * Copy current->thread.ppl to ppl (already initialized).
709 709 */
710 710 static int
711   -__ia32_copy_pp_list(struct partial_page_list *ppl)
  711 +__ia32_copy_pp_list(struct ia64_partial_page_list *ppl)
712 712 {
713   - struct partial_page *pp, *tmp, *prev;
  713 + struct ia64_partial_page *pp, *tmp, *prev;
714 714 struct rb_node **rb_link, *rb_parent;
715 715  
716 716 ppl->pp_head = NULL;
... ... @@ -721,7 +721,7 @@
721 721 prev = NULL;
722 722  
723 723 for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
724   - tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
  724 + tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
725 725 if (!tmp)
726 726 return -ENOMEM;
727 727 *tmp = *pp;
... ... @@ -734,7 +734,8 @@
734 734 }
735 735  
736 736 int
737   -ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags)
  737 +ia32_copy_ia64_partial_page_list(struct task_struct *p,
  738 + unsigned long clone_flags)
738 739 {
739 740 int retval = 0;
740 741  
arch/ia64/kernel/process.c
... ... @@ -499,7 +499,8 @@
499 499  
500 500 /* Copy partially mapped page list */
501 501 if (!retval)
502   - retval = ia32_copy_partial_page_list(p, clone_flags);
  502 + retval = ia32_copy_ia64_partial_page_list(p,
  503 + clone_flags);
503 504 }
504 505 #endif
505 506  
... ... @@ -728,7 +729,7 @@
728 729 ia64_drop_fpu(current);
729 730 #ifdef CONFIG_IA32_SUPPORT
730 731 if (IS_IA32_PROCESS(task_pt_regs(current))) {
731   - ia32_drop_partial_page_list(current);
  732 + ia32_drop_ia64_partial_page_list(current);
732 733 current->thread.task_size = IA32_PAGE_OFFSET;
733 734 set_fs(USER_DS);
734 735 }
... ... @@ -754,7 +755,7 @@
754 755 pfm_release_debug_registers(current);
755 756 #endif
756 757 if (IS_IA32_PROCESS(task_pt_regs(current)))
757   - ia32_drop_partial_page_list(current);
  758 + ia32_drop_ia64_partial_page_list(current);
758 759 }
759 760  
760 761 unsigned long
include/asm-ia64/ia32.h
... ... @@ -27,11 +27,12 @@
27 27 extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
28 28 sigset_t *set, struct pt_regs *regs);
29 29 #if PAGE_SHIFT > IA32_PAGE_SHIFT
30   -extern int ia32_copy_partial_page_list (struct task_struct *, unsigned long);
31   -extern void ia32_drop_partial_page_list (struct task_struct *);
  30 +extern int ia32_copy_ia64_partial_page_list(struct task_struct *,
  31 + unsigned long);
  32 +extern void ia32_drop_ia64_partial_page_list(struct task_struct *);
32 33 #else
33   -# define ia32_copy_partial_page_list(a1, a2) 0
34   -# define ia32_drop_partial_page_list(a1) do { ; } while (0)
  34 +# define ia32_copy_ia64_partial_page_list(a1, a2) 0
  35 +# define ia32_drop_ia64_partial_page_list(a1) do { ; } while (0)
35 36 #endif
36 37  
37 38 #endif /* !__ASSEMBLY__ */
include/asm-ia64/processor.h
... ... @@ -220,7 +220,7 @@
220 220  
221 221 #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
222 222  
223   -struct partial_page_list;
  223 +struct ia64_partial_page_list;
224 224 #endif
225 225  
226 226 struct thread_struct {
... ... @@ -242,7 +242,7 @@
242 242 __u64 fdr; /* IA32 fp except. data reg */
243 243 __u64 old_k1; /* old value of ar.k1 */
244 244 __u64 old_iob; /* old IOBase value */
245   - struct partial_page_list *ppl; /* partial page list for 4K page size issue */
  245 + struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */
246 246 /* cached TLS descriptors. */
247 247 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
248 248