Commit 846a16bf0fc80dc95a414ffce465e3cbf9680247

Authored by Lee Schermerhorn
Committed by Linus Torvalds
1 parent f0be3d32b0

mempolicy: rename mpol_copy to mpol_dup

This patch renames mpol_copy() to mpol_dup() because, well, that's what it
does.  Like, e.g., strdup() for strings, mpol_dup() takes a pointer to an
existing mempolicy, allocates a new one and copies the contents.

In a later patch, I want to use the name mpol_copy() to copy the contents from
one mempolicy to another like, e.g., strcpy() does for strings.

Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 5 changed files with 16 additions and 16 deletions Side-by-side Diff

include/linux/mempolicy.h
... ... @@ -73,10 +73,10 @@
73 73 * Mempolicy objects are reference counted. A mempolicy will be freed when
74 74 * mpol_put() decrements the reference count to zero.
75 75 *
76   - * Copying policy objects:
77   - * mpol_copy() allocates a new mempolicy and copies the specified mempolicy
  76 + * Duplicating policy objects:
  77 + * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
78 78 * to the new storage. The reference count of the new object is initialized
79   - * to 1, representing the caller of mpol_copy().
  79 + * to 1, representing the caller of mpol_dup().
80 80 */
81 81 struct mempolicy {
82 82 atomic_t refcnt;
83 83  
... ... @@ -105,11 +105,11 @@
105 105 __mpol_put(pol);
106 106 }
107 107  
108   -extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
109   -static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
  108 +extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
  109 +static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
110 110 {
111 111 if (pol)
112   - pol = __mpol_copy(pol);
  112 + pol = __mpol_dup(pol);
113 113 return pol;
114 114 }
115 115  
... ... @@ -198,7 +198,7 @@
198 198 {
199 199 }
200 200  
201   -static inline struct mempolicy *mpol_copy(struct mempolicy *old)
  201 +static inline struct mempolicy *mpol_dup(struct mempolicy *old)
202 202 {
203 203 return NULL;
204 204 }
... ... @@ -941,7 +941,7 @@
941 941 cs->mems_generation = cpuset_mems_generation++;
942 942 mutex_unlock(&callback_mutex);
943 943  
944   - cpuset_being_rebound = cs; /* causes mpol_copy() rebind */
  944 + cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
945 945  
946 946 fudge = 10; /* spare mmarray[] slots */
947 947 fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
... ... @@ -992,7 +992,7 @@
992 992 * rebind the vma mempolicies of each mm in mmarray[] to their
993 993 * new cpuset, and release that mm. The mpol_rebind_mm()
994 994 * call takes mmap_sem, which we couldn't take while holding
995   - * tasklist_lock. Forks can happen again now - the mpol_copy()
  995 + * tasklist_lock. Forks can happen again now - the mpol_dup()
996 996 * cpuset_being_rebound check will catch such forks, and rebind
997 997 * their vma mempolicies too. Because we still hold the global
998 998 * cgroup_mutex, we know that no other rebind effort will
... ... @@ -279,7 +279,7 @@
279 279 if (!tmp)
280 280 goto fail_nomem;
281 281 *tmp = *mpnt;
282   - pol = mpol_copy(vma_policy(mpnt));
  282 + pol = mpol_dup(vma_policy(mpnt));
283 283 retval = PTR_ERR(pol);
284 284 if (IS_ERR(pol))
285 285 goto fail_nomem_policy;
... ... @@ -1116,7 +1116,7 @@
1116 1116 p->audit_context = NULL;
1117 1117 cgroup_fork(p);
1118 1118 #ifdef CONFIG_NUMA
1119   - p->mempolicy = mpol_copy(p->mempolicy);
  1119 + p->mempolicy = mpol_dup(p->mempolicy);
1120 1120 if (IS_ERR(p->mempolicy)) {
1121 1121 retval = PTR_ERR(p->mempolicy);
1122 1122 p->mempolicy = NULL;
... ... @@ -1566,15 +1566,15 @@
1566 1566 EXPORT_SYMBOL(alloc_pages_current);
1567 1567  
1568 1568 /*
1569   - * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
  1569 + * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1570 1570 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1571 1571 * with the mems_allowed returned by cpuset_mems_allowed(). This
1572 1572 * keeps mempolicies cpuset relative after its cpuset moves. See
1573 1573 * further kernel/cpuset.c update_nodemask().
1574 1574 */
1575 1575  
1576   -/* Slow path of a mempolicy copy */
1577   -struct mempolicy *__mpol_copy(struct mempolicy *old)
  1576 +/* Slow path of a mempolicy duplicate */
  1577 +struct mempolicy *__mpol_dup(struct mempolicy *old)
1578 1578 {
1579 1579 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1580 1580  
... ... @@ -1810,7 +1810,7 @@
1810 1810 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
1811 1811 }
1812 1812  
1813   - pol = mpol_copy(vma_policy(vma));
  1813 + pol = mpol_dup(vma_policy(vma));
1814 1814 if (IS_ERR(pol)) {
1815 1815 kmem_cache_free(vm_area_cachep, new);
1816 1816 return PTR_ERR(pol);
... ... @@ -2126,7 +2126,7 @@
2126 2126 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2127 2127 if (new_vma) {
2128 2128 *new_vma = *vma;
2129   - pol = mpol_copy(vma_policy(vma));
  2129 + pol = mpol_dup(vma_policy(vma));
2130 2130 if (IS_ERR(pol)) {
2131 2131 kmem_cache_free(vm_area_cachep, new_vma);
2132 2132 return NULL;