Commit 3d2d827f5ca5e32816194119d5c980c7e04474a6
Committed by
Linus Torvalds
1 parent
425fbf047c
mm: move use_mm/unuse_mm from aio.c to mm/
Anyone who wants to do copy to/from user from a kernel thread, needs use_mm (like what fs/aio has). Move that into mm/, to make reusing and exporting easier down the line, and make aio use it. Next intended user, besides aio, will be vhost-net. Acked-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 4 changed files with 66 additions and 47 deletions Side-by-side Diff
fs/aio.c
... | ... | @@ -24,6 +24,7 @@ |
24 | 24 | #include <linux/file.h> |
25 | 25 | #include <linux/mm.h> |
26 | 26 | #include <linux/mman.h> |
27 | +#include <linux/mmu_context.h> | |
27 | 28 | #include <linux/slab.h> |
28 | 29 | #include <linux/timer.h> |
29 | 30 | #include <linux/aio.h> |
... | ... | @@ -34,7 +35,6 @@ |
34 | 35 | |
35 | 36 | #include <asm/kmap_types.h> |
36 | 37 | #include <asm/uaccess.h> |
37 | -#include <asm/mmu_context.h> | |
38 | 38 | |
39 | 39 | #if DEBUG > 1 |
40 | 40 | #define dprintk printk |
... | ... | @@ -592,51 +592,6 @@ |
592 | 592 | |
593 | 593 | rcu_read_unlock(); |
594 | 594 | return ret; |
595 | -} | |
596 | - | |
597 | -/* | |
598 | - * use_mm | |
599 | - * Makes the calling kernel thread take on the specified | |
600 | - * mm context. | |
601 | - * Called by the retry thread execute retries within the | |
602 | - * iocb issuer's mm context, so that copy_from/to_user | |
603 | - * operations work seamlessly for aio. | |
604 | - * (Note: this routine is intended to be called only | |
605 | - * from a kernel thread context) | |
606 | - */ | |
607 | -static void use_mm(struct mm_struct *mm) | |
608 | -{ | |
609 | - struct mm_struct *active_mm; | |
610 | - struct task_struct *tsk = current; | |
611 | - | |
612 | - task_lock(tsk); | |
613 | - active_mm = tsk->active_mm; | |
614 | - atomic_inc(&mm->mm_count); | |
615 | - tsk->mm = mm; | |
616 | - tsk->active_mm = mm; | |
617 | - switch_mm(active_mm, mm, tsk); | |
618 | - task_unlock(tsk); | |
619 | - | |
620 | - mmdrop(active_mm); | |
621 | -} | |
622 | - | |
623 | -/* | |
624 | - * unuse_mm | |
625 | - * Reverses the effect of use_mm, i.e. releases the | |
626 | - * specified mm context which was earlier taken on | |
627 | - * by the calling kernel thread | |
628 | - * (Note: this routine is intended to be called only | |
629 | - * from a kernel thread context) | |
630 | - */ | |
631 | -static void unuse_mm(struct mm_struct *mm) | |
632 | -{ | |
633 | - struct task_struct *tsk = current; | |
634 | - | |
635 | - task_lock(tsk); | |
636 | - tsk->mm = NULL; | |
637 | - /* active_mm is still 'mm' */ | |
638 | - enter_lazy_tlb(mm, tsk); | |
639 | - task_unlock(tsk); | |
640 | 595 | } |
641 | 596 | |
642 | 597 | /* |
include/linux/mmu_context.h
mm/Makefile
... | ... | @@ -11,7 +11,7 @@ |
11 | 11 | maccess.o page_alloc.o page-writeback.o \ |
12 | 12 | readahead.o swap.o truncate.o vmscan.o shmem.o \ |
13 | 13 | prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ |
14 | - page_isolation.o mm_init.o $(mmu-y) | |
14 | + page_isolation.o mm_init.o mmu_context.o $(mmu-y) | |
15 | 15 | obj-y += init-mm.o |
16 | 16 | |
17 | 17 | obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o |
mm/mmu_context.c
1 | +/* Copyright (C) 2009 Red Hat, Inc. | |
2 | + * | |
3 | + * See ../COPYING for licensing terms. | |
4 | + */ | |
5 | + | |
6 | +#include <linux/mm.h> | |
7 | +#include <linux/mmu_context.h> | |
8 | +#include <linux/sched.h> | |
9 | + | |
10 | +#include <asm/mmu_context.h> | |
11 | + | |
12 | +/* | |
13 | + * use_mm | |
14 | + * Makes the calling kernel thread take on the specified | |
15 | + * mm context. | |
16 | + * Called by the retry thread execute retries within the | |
17 | + * iocb issuer's mm context, so that copy_from/to_user | |
18 | + * operations work seamlessly for aio. | |
19 | + * (Note: this routine is intended to be called only | |
20 | + * from a kernel thread context) | |
21 | + */ | |
22 | +void use_mm(struct mm_struct *mm) | |
23 | +{ | |
24 | + struct mm_struct *active_mm; | |
25 | + struct task_struct *tsk = current; | |
26 | + | |
27 | + task_lock(tsk); | |
28 | + active_mm = tsk->active_mm; | |
29 | + atomic_inc(&mm->mm_count); | |
30 | + tsk->mm = mm; | |
31 | + tsk->active_mm = mm; | |
32 | + switch_mm(active_mm, mm, tsk); | |
33 | + task_unlock(tsk); | |
34 | + | |
35 | + mmdrop(active_mm); | |
36 | +} | |
37 | + | |
38 | +/* | |
39 | + * unuse_mm | |
40 | + * Reverses the effect of use_mm, i.e. releases the | |
41 | + * specified mm context which was earlier taken on | |
42 | + * by the calling kernel thread | |
43 | + * (Note: this routine is intended to be called only | |
44 | + * from a kernel thread context) | |
45 | + */ | |
46 | +void unuse_mm(struct mm_struct *mm) | |
47 | +{ | |
48 | + struct task_struct *tsk = current; | |
49 | + | |
50 | + task_lock(tsk); | |
51 | + tsk->mm = NULL; | |
52 | + /* active_mm is still 'mm' */ | |
53 | + enter_lazy_tlb(mm, tsk); | |
54 | + task_unlock(tsk); | |
55 | +} |