Commit e61734c55c24cdf11b07e52a74aec4dc4a7f4bd0

Authored by Tejun Heo
1 parent 6f30558f37

cgroup: remove cgroup->name

cgroup->name handling became quite complicated over time involving
dedicated struct cgroup_name for RCU protection.  Now that cgroup is
on kernfs, we can drop all of it and simply use kernfs_name/path() and
friends.  Replace cgroup->name and all related code with kernfs
name/path constructs.

* Reimplement cgroup_name() and cgroup_path() as thin wrappers on top
  of kernfs counterparts, which involves semantic changes.
  pr_cont_cgroup_name() and pr_cont_cgroup_path() added.

* cgroup->name handling dropped from cgroup_rename().

* All users of cgroup_name/path() updated to the new semantics.  Users
  which were formatting the string just to printk them are converted
  to use pr_cont_cgroup_name/path() instead, which simplifies things
  quite a bit.  As cgroup_name() no longer requires RCU read lock
  around it, RCU lockings which were protecting only cgroup_name() are
  removed.

v2: Comment above oom_info_lock updated as suggested by Michal.

v3: dummy_top doesn't have a kn associated and
    pr_cont_cgroup_name/path() ended up calling the matching kernfs
    functions with NULL kn leading to oops.  Test for NULL kn and
    print "/" if so.  This issue was reported by Fengguang Wu.

v4: Rebased on top of 0ab02ca8f887 ("cgroup: protect modifications to
    cgroup_idr with cgroup_mutex").

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Acked-by: Li Zefan <lizefan@huawei.com>
Cc: Fengguang Wu <fengguang.wu@intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>

Showing 7 changed files with 110 additions and 210 deletions Side-by-side Diff

... ... @@ -241,12 +241,16 @@
241 241 */
242 242 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
243 243 {
244   - int ret;
  244 + char *p;
245 245  
246   - ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
247   - if (ret)
  246 + p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
  247 + if (!p) {
248 248 strncpy(buf, "<unavailable>", buflen);
249   - return ret;
  249 + return -ENAMETOOLONG;
  250 + }
  251 +
  252 + memmove(buf, p, buf + buflen - p);
  253 + return 0;
250 254 }
251 255  
252 256 /**
... ... @@ -112,6 +112,7 @@
112 112 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
113 113 return p;
114 114 }
  115 +EXPORT_SYMBOL_GPL(kernfs_path);
115 116  
116 117 /**
117 118 * pr_cont_kernfs_name - pr_cont name of a kernfs_node
include/linux/cgroup.h
... ... @@ -138,11 +138,6 @@
138 138 CGRP_SANE_BEHAVIOR,
139 139 };
140 140  
141   -struct cgroup_name {
142   - struct rcu_head rcu_head;
143   - char name[];
144   -};
145   -
146 141 struct cgroup {
147 142 unsigned long flags; /* "unsigned long" so bitops work */
148 143  
... ... @@ -179,19 +174,6 @@
179 174 */
180 175 u64 serial_nr;
181 176  
182   - /*
183   - * This is a copy of dentry->d_name, and it's needed because
184   - * we can't use dentry->d_name in cgroup_path().
185   - *
186   - * You must acquire rcu_read_lock() to access cgrp->name, and
187   - * the only place that can change it is rename(), which is
188   - * protected by parent dir's i_mutex.
189   - *
190   - * Normally you should use cgroup_name() wrapper rather than
191   - * access it directly.
192   - */
193   - struct cgroup_name __rcu *name;
194   -
195 177 /* Private pointers for each registered subsystem */
196 178 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
197 179  
... ... @@ -479,12 +461,6 @@
479 461 return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR;
480 462 }
481 463  
482   -/* Caller should hold rcu_read_lock() */
483   -static inline const char *cgroup_name(const struct cgroup *cgrp)
484   -{
485   - return rcu_dereference(cgrp->name)->name;
486   -}
487   -
488 464 /* returns ino associated with a cgroup, 0 indicates unmounted root */
489 465 static inline ino_t cgroup_ino(struct cgroup *cgrp)
490 466 {
491 467  
... ... @@ -503,13 +479,46 @@
503 479  
504 480 struct cgroup_subsys_state *seq_css(struct seq_file *seq);
505 481  
  482 +/*
  483 + * Name / path handling functions. All are thin wrappers around the kernfs
  484 + * counterparts and can be called under any context.
  485 + */
  486 +
  487 +static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
  488 +{
  489 + return kernfs_name(cgrp->kn, buf, buflen);
  490 +}
  491 +
  492 +static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
  493 + size_t buflen)
  494 +{
  495 + return kernfs_path(cgrp->kn, buf, buflen);
  496 +}
  497 +
  498 +static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
  499 +{
  500 + /* dummy_top doesn't have a kn associated */
  501 + if (cgrp->kn)
  502 + pr_cont_kernfs_name(cgrp->kn);
  503 + else
  504 + pr_cont("/");
  505 +}
  506 +
  507 +static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
  508 +{
  509 + /* dummy_top doesn't have a kn associated */
  510 + if (cgrp->kn)
  511 + pr_cont_kernfs_path(cgrp->kn);
  512 + else
  513 + pr_cont("/");
  514 +}
  515 +
  516 +char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
  517 +
506 518 int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
507 519 int cgroup_rm_cftypes(struct cftype *cfts);
508 520  
509 521 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
510   -
511   -int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
512   -int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
513 522  
514 523 int cgroup_task_count(const struct cgroup *cgrp);
515 524  
... ... @@ -145,8 +145,6 @@
145 145 /* hierarchy ID allocation and mapping, protected by cgroup_mutex */
146 146 static DEFINE_IDR(cgroup_hierarchy_idr);
147 147  
148   -static struct cgroup_name root_cgroup_name = { .name = "/" };
149   -
150 148 /*
151 149 * Assign a monotonically increasing serial number to cgroups. It
152 150 * guarantees cgroups with bigger numbers are newer than those with smaller
... ... @@ -888,17 +886,6 @@
888 886 static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
889 887 static const struct file_operations proc_cgroupstats_operations;
890 888  
891   -static struct cgroup_name *cgroup_alloc_name(const char *name_str)
892   -{
893   - struct cgroup_name *name;
894   -
895   - name = kmalloc(sizeof(*name) + strlen(name_str) + 1, GFP_KERNEL);
896   - if (!name)
897   - return NULL;
898   - strcpy(name->name, name_str);
899   - return name;
900   -}
901   -
902 889 static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
903 890 char *buf)
904 891 {
... ... @@ -958,8 +945,6 @@
958 945 cgroup_pidlist_destroy_all(cgrp);
959 946  
960 947 kernfs_put(cgrp->kn);
961   -
962   - kfree(rcu_dereference_raw(cgrp->name));
963 948 kfree(cgrp);
964 949 }
965 950  
... ... @@ -1377,7 +1362,6 @@
1377 1362 INIT_LIST_HEAD(&root->root_list);
1378 1363 root->number_of_cgroups = 1;
1379 1364 cgrp->root = root;
1380   - RCU_INIT_POINTER(cgrp->name, &root_cgroup_name);
1381 1365 init_cgroup_housekeeping(cgrp);
1382 1366 idr_init(&root->cgroup_idr);
1383 1367 }
... ... @@ -1598,57 +1582,6 @@
1598 1582 static struct kobject *cgroup_kobj;
1599 1583  
1600 1584 /**
1601   - * cgroup_path - generate the path of a cgroup
1602   - * @cgrp: the cgroup in question
1603   - * @buf: the buffer to write the path into
1604   - * @buflen: the length of the buffer
1605   - *
1606   - * Writes path of cgroup into buf. Returns 0 on success, -errno on error.
1607   - *
1608   - * We can't generate cgroup path using dentry->d_name, as accessing
1609   - * dentry->name must be protected by irq-unsafe dentry->d_lock or parent
1610   - * inode's i_mutex, while on the other hand cgroup_path() can be called
1611   - * with some irq-safe spinlocks held.
1612   - */
1613   -int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1614   -{
1615   - int ret = -ENAMETOOLONG;
1616   - char *start;
1617   -
1618   - if (!cgrp->parent) {
1619   - if (strlcpy(buf, "/", buflen) >= buflen)
1620   - return -ENAMETOOLONG;
1621   - return 0;
1622   - }
1623   -
1624   - start = buf + buflen - 1;
1625   - *start = '\0';
1626   -
1627   - rcu_read_lock();
1628   - do {
1629   - const char *name = cgroup_name(cgrp);
1630   - int len;
1631   -
1632   - len = strlen(name);
1633   - if ((start -= len) < buf)
1634   - goto out;
1635   - memcpy(start, name, len);
1636   -
1637   - if (--start < buf)
1638   - goto out;
1639   - *start = '/';
1640   -
1641   - cgrp = cgrp->parent;
1642   - } while (cgrp->parent);
1643   - ret = 0;
1644   - memmove(buf, start, buf + buflen - start);
1645   -out:
1646   - rcu_read_unlock();
1647   - return ret;
1648   -}
1649   -EXPORT_SYMBOL_GPL(cgroup_path);
1650   -
1651   -/**
1652 1585 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
1653 1586 * @task: target task
1654 1587 * @buf: the buffer to write the path into
1655 1588  
1656 1589  
1657 1590  
1658 1591  
1659 1592  
1660 1593  
... ... @@ -1659,31 +1592,30 @@
1659 1592 * function grabs cgroup_mutex and shouldn't be used inside locks used by
1660 1593 * cgroup controller callbacks.
1661 1594 *
1662   - * Returns 0 on success, fails with -%ENAMETOOLONG if @buflen is too short.
  1595 + * Return value is the same as kernfs_path().
1663 1596 */
1664   -int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
  1597 +char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
1665 1598 {
1666 1599 struct cgroupfs_root *root;
1667 1600 struct cgroup *cgrp;
1668   - int hierarchy_id = 1, ret = 0;
  1601 + int hierarchy_id = 1;
  1602 + char *path = NULL;
1669 1603  
1670   - if (buflen < 2)
1671   - return -ENAMETOOLONG;
1672   -
1673 1604 mutex_lock(&cgroup_mutex);
1674 1605  
1675 1606 root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
1676 1607  
1677 1608 if (root) {
1678 1609 cgrp = task_cgroup_from_root(task, root);
1679   - ret = cgroup_path(cgrp, buf, buflen);
  1610 + path = cgroup_path(cgrp, buf, buflen);
1680 1611 } else {
1681 1612 /* if no hierarchy exists, everyone is in "/" */
1682   - memcpy(buf, "/", 2);
  1613 + if (strlcpy(buf, "/", buflen) < buflen)
  1614 + path = buf;
1683 1615 }
1684 1616  
1685 1617 mutex_unlock(&cgroup_mutex);
1686   - return ret;
  1618 + return path;
1687 1619 }
1688 1620 EXPORT_SYMBOL_GPL(task_cgroup_path);
1689 1621  
... ... @@ -2211,7 +2143,6 @@
2211 2143 const char *new_name_str)
2212 2144 {
2213 2145 struct cgroup *cgrp = kn->priv;
2214   - struct cgroup_name *name, *old_name;
2215 2146 int ret;
2216 2147  
2217 2148 if (kernfs_type(kn) != KERNFS_DIR)
2218 2149  
2219 2150  
... ... @@ -2226,25 +2157,13 @@
2226 2157 if (cgroup_sane_behavior(cgrp))
2227 2158 return -EPERM;
2228 2159  
2229   - name = cgroup_alloc_name(new_name_str);
2230   - if (!name)
2231   - return -ENOMEM;
2232   -
2233 2160 mutex_lock(&cgroup_tree_mutex);
2234 2161 mutex_lock(&cgroup_mutex);
2235 2162  
2236 2163 ret = kernfs_rename(kn, new_parent, new_name_str);
2237   - if (!ret) {
2238   - old_name = rcu_dereference_protected(cgrp->name, true);
2239   - rcu_assign_pointer(cgrp->name, name);
2240   - } else {
2241   - old_name = name;
2242   - }
2243 2164  
2244 2165 mutex_unlock(&cgroup_mutex);
2245 2166 mutex_unlock(&cgroup_tree_mutex);
2246   -
2247   - kfree_rcu(old_name, rcu_head);
2248 2167 return ret;
2249 2168 }
2250 2169  
2251 2170  
2252 2171  
... ... @@ -3719,14 +3638,13 @@
3719 3638 /**
3720 3639 * cgroup_create - create a cgroup
3721 3640 * @parent: cgroup that will be parent of the new cgroup
3722   - * @name_str: name of the new cgroup
  3641 + * @name: name of the new cgroup
3723 3642 * @mode: mode to set on new cgroup
3724 3643 */
3725   -static long cgroup_create(struct cgroup *parent, const char *name_str,
  3644 +static long cgroup_create(struct cgroup *parent, const char *name,
3726 3645 umode_t mode)
3727 3646 {
3728 3647 struct cgroup *cgrp;
3729   - struct cgroup_name *name;
3730 3648 struct cgroupfs_root *root = parent->root;
3731 3649 int ssid, err;
3732 3650 struct cgroup_subsys *ss;
... ... @@ -3737,13 +3655,6 @@
3737 3655 if (!cgrp)
3738 3656 return -ENOMEM;
3739 3657  
3740   - name = cgroup_alloc_name(name_str);
3741   - if (!name) {
3742   - err = -ENOMEM;
3743   - goto err_free_cgrp;
3744   - }
3745   - rcu_assign_pointer(cgrp->name, name);
3746   -
3747 3658 mutex_lock(&cgroup_tree_mutex);
3748 3659  
3749 3660 /*
... ... @@ -3781,7 +3692,7 @@
3781 3692 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
3782 3693  
3783 3694 /* create the directory */
3784   - kn = kernfs_create_dir(parent->kn, name->name, mode, cgrp);
  3695 + kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
3785 3696 if (IS_ERR(kn)) {
3786 3697 err = PTR_ERR(kn);
3787 3698 goto err_free_id;
... ... @@ -3839,8 +3750,6 @@
3839 3750 mutex_unlock(&cgroup_mutex);
3840 3751 err_unlock_tree:
3841 3752 mutex_unlock(&cgroup_tree_mutex);
3842   - kfree(rcu_dereference_raw(cgrp->name));
3843   -err_free_cgrp:
3844 3753 kfree(cgrp);
3845 3754 return err;
3846 3755  
3847 3756  
... ... @@ -4304,12 +4213,12 @@
4304 4213 {
4305 4214 struct pid *pid;
4306 4215 struct task_struct *tsk;
4307   - char *buf;
  4216 + char *buf, *path;
4308 4217 int retval;
4309 4218 struct cgroupfs_root *root;
4310 4219  
4311 4220 retval = -ENOMEM;
4312   - buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  4221 + buf = kmalloc(PATH_MAX, GFP_KERNEL);
4313 4222 if (!buf)
4314 4223 goto out;
4315 4224  
4316 4225  
... ... @@ -4337,10 +4246,12 @@
4337 4246 root->name);
4338 4247 seq_putc(m, ':');
4339 4248 cgrp = task_cgroup_from_root(tsk, root);
4340   - retval = cgroup_path(cgrp, buf, PAGE_SIZE);
4341   - if (retval < 0)
  4249 + path = cgroup_path(cgrp, buf, PATH_MAX);
  4250 + if (!path) {
  4251 + retval = -ENAMETOOLONG;
4342 4252 goto out_unlock;
4343   - seq_puts(m, buf);
  4253 + }
  4254 + seq_puts(m, path);
4344 4255 seq_putc(m, '\n');
4345 4256 }
4346 4257  
4347 4258  
4348 4259  
... ... @@ -4588,16 +4499,17 @@
4588 4499 while (!list_empty(&release_list)) {
4589 4500 char *argv[3], *envp[3];
4590 4501 int i;
4591   - char *pathbuf = NULL, *agentbuf = NULL;
  4502 + char *pathbuf = NULL, *agentbuf = NULL, *path;
4592 4503 struct cgroup *cgrp = list_entry(release_list.next,
4593 4504 struct cgroup,
4594 4505 release_list);
4595 4506 list_del_init(&cgrp->release_list);
4596 4507 raw_spin_unlock(&release_list_lock);
4597   - pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  4508 + pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
4598 4509 if (!pathbuf)
4599 4510 goto continue_free;
4600   - if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
  4511 + path = cgroup_path(cgrp, pathbuf, PATH_MAX);
  4512 + if (!path)
4601 4513 goto continue_free;
4602 4514 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
4603 4515 if (!agentbuf)
... ... @@ -4605,7 +4517,7 @@
4605 4517  
4606 4518 i = 0;
4607 4519 argv[i++] = agentbuf;
4608   - argv[i++] = pathbuf;
  4520 + argv[i++] = path;
4609 4521 argv[i] = NULL;
4610 4522  
4611 4523 i = 0;
4612 4524  
... ... @@ -4755,7 +4667,12 @@
4755 4667 {
4756 4668 struct cgrp_cset_link *link;
4757 4669 struct css_set *cset;
  4670 + char *name_buf;
4758 4671  
  4672 + name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
  4673 + if (!name_buf)
  4674 + return -ENOMEM;
  4675 +
4759 4676 read_lock(&css_set_lock);
4760 4677 rcu_read_lock();
4761 4678 cset = rcu_dereference(current->cgroups);
4762 4679  
... ... @@ -4763,14 +4680,17 @@
4763 4680 struct cgroup *c = link->cgrp;
4764 4681 const char *name = "?";
4765 4682  
4766   - if (c != cgroup_dummy_top)
4767   - name = cgroup_name(c);
  4683 + if (c != cgroup_dummy_top) {
  4684 + cgroup_name(c, name_buf, NAME_MAX + 1);
  4685 + name = name_buf;
  4686 + }
4768 4687  
4769 4688 seq_printf(seq, "Root %d group %s\n",
4770 4689 c->root->hierarchy_id, name);
4771 4690 }
4772 4691 rcu_read_unlock();
4773 4692 read_unlock(&css_set_lock);
  4693 + kfree(name_buf);
4774 4694 return 0;
4775 4695 }
4776 4696  
... ... @@ -2088,10 +2088,9 @@
2088 2088 parent = parent_cs(parent);
2089 2089  
2090 2090 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2091   - rcu_read_lock();
2092   - printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset %s\n",
2093   - cgroup_name(cs->css.cgroup));
2094   - rcu_read_unlock();
  2091 + printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset ");
  2092 + pr_cont_cgroup_name(cs->css.cgroup);
  2093 + pr_cont("\n");
2095 2094 }
2096 2095 }
2097 2096  
2098 2097  
2099 2098  
2100 2099  
... ... @@ -2619,19 +2618,17 @@
2619 2618 /* Statically allocated to prevent using excess stack. */
2620 2619 static char cpuset_nodelist[CPUSET_NODELIST_LEN];
2621 2620 static DEFINE_SPINLOCK(cpuset_buffer_lock);
2622   -
2623 2621 struct cgroup *cgrp = task_cs(tsk)->css.cgroup;
2624 2622  
2625   - rcu_read_lock();
2626 2623 spin_lock(&cpuset_buffer_lock);
2627 2624  
2628 2625 nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2629 2626 tsk->mems_allowed);
2630   - printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
2631   - tsk->comm, cgroup_name(cgrp), cpuset_nodelist);
  2627 + printk(KERN_INFO "%s cpuset=", tsk->comm);
  2628 + pr_cont_cgroup_name(cgrp);
  2629 + pr_cont(" mems_allowed=%s\n", cpuset_nodelist);
2632 2630  
2633 2631 spin_unlock(&cpuset_buffer_lock);
2634   - rcu_read_unlock();
2635 2632 }
2636 2633  
2637 2634 /*
2638 2635  
... ... @@ -2681,12 +2678,12 @@
2681 2678 {
2682 2679 struct pid *pid;
2683 2680 struct task_struct *tsk;
2684   - char *buf;
  2681 + char *buf, *p;
2685 2682 struct cgroup_subsys_state *css;
2686 2683 int retval;
2687 2684  
2688 2685 retval = -ENOMEM;
2689   - buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  2686 + buf = kmalloc(PATH_MAX, GFP_KERNEL);
2690 2687 if (!buf)
2691 2688 goto out;
2692 2689  
2693 2690  
2694 2691  
2695 2692  
2696 2693  
... ... @@ -2696,14 +2693,16 @@
2696 2693 if (!tsk)
2697 2694 goto out_free;
2698 2695  
  2696 + retval = -ENAMETOOLONG;
2699 2697 rcu_read_lock();
2700 2698 css = task_css(tsk, cpuset_cgrp_id);
2701   - retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
  2699 + p = cgroup_path(css->cgroup, buf, PATH_MAX);
2702 2700 rcu_read_unlock();
2703   - if (retval < 0)
  2701 + if (!p)
2704 2702 goto out_put_task;
2705   - seq_puts(m, buf);
  2703 + seq_puts(m, p);
2706 2704 seq_putc(m, '\n');
  2705 + retval = 0;
2707 2706 out_put_task:
2708 2707 put_task_struct(tsk);
2709 2708 out_free:
kernel/sched/debug.c
... ... @@ -111,8 +111,7 @@
111 111 if (autogroup_path(tg, group_path, PATH_MAX))
112 112 return group_path;
113 113  
114   - cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
115   - return group_path;
  114 + return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
116 115 }
117 116 #endif
118 117  
... ... @@ -1683,15 +1683,8 @@
1683 1683 */
1684 1684 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1685 1685 {
1686   - /*
1687   - * protects memcg_name and makes sure that parallel ooms do not
1688   - * interleave
1689   - */
  1686 + /* oom_info_lock ensures that parallel ooms do not interleave */
1690 1687 static DEFINE_SPINLOCK(oom_info_lock);
1691   - struct cgroup *task_cgrp;
1692   - struct cgroup *mem_cgrp;
1693   - static char memcg_name[PATH_MAX];
1694   - int ret;
1695 1688 struct mem_cgroup *iter;
1696 1689 unsigned int i;
1697 1690  
1698 1691  
1699 1692  
... ... @@ -1701,36 +1694,14 @@
1701 1694 spin_lock(&oom_info_lock);
1702 1695 rcu_read_lock();
1703 1696  
1704   - mem_cgrp = memcg->css.cgroup;
1705   - task_cgrp = task_cgroup(p, memory_cgrp_id);
  1697 + pr_info("Task in ");
  1698 + pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
  1699 + pr_info(" killed as a result of limit of ");
  1700 + pr_cont_cgroup_path(memcg->css.cgroup);
  1701 + pr_info("\n");
1706 1702  
1707   - ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1708   - if (ret < 0) {
1709   - /*
1710   - * Unfortunately, we are unable to convert to a useful name
1711   - * But we'll still print out the usage information
1712   - */
1713   - rcu_read_unlock();
1714   - goto done;
1715   - }
1716 1703 rcu_read_unlock();
1717 1704  
1718   - pr_info("Task in %s killed", memcg_name);
1719   -
1720   - rcu_read_lock();
1721   - ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1722   - if (ret < 0) {
1723   - rcu_read_unlock();
1724   - goto done;
1725   - }
1726   - rcu_read_unlock();
1727   -
1728   - /*
1729   - * Continues from above, so we don't need an KERN_ level
1730   - */
1731   - pr_cont(" as a result of limit of %s\n", memcg_name);
1732   -done:
1733   -
1734 1705 pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
1735 1706 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1736 1707 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
... ... @@ -1745,13 +1716,8 @@
1745 1716 res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
1746 1717  
1747 1718 for_each_mem_cgroup_tree(iter, memcg) {
1748   - pr_info("Memory cgroup stats");
1749   -
1750   - rcu_read_lock();
1751   - ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX);
1752   - if (!ret)
1753   - pr_cont(" for %s", memcg_name);
1754   - rcu_read_unlock();
  1719 + pr_info("Memory cgroup stats for ");
  1720 + pr_cont_cgroup_path(iter->css.cgroup);
1755 1721 pr_cont(":");
1756 1722  
1757 1723 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
... ... @@ -3401,7 +3367,7 @@
3401 3367 struct kmem_cache *s)
3402 3368 {
3403 3369 struct kmem_cache *new = NULL;
3404   - static char *tmp_name = NULL;
  3370 + static char *tmp_path = NULL, *tmp_name = NULL;
3405 3371 static DEFINE_MUTEX(mutex); /* protects tmp_name */
3406 3372  
3407 3373 BUG_ON(!memcg_can_account_kmem(memcg));
3408 3374  
3409 3375  
3410 3376  
... ... @@ -3413,18 +3379,20 @@
3413 3379 * This static temporary buffer is used to prevent from
3414 3380 * pointless shortliving allocation.
3415 3381 */
3416   - if (!tmp_name) {
3417   - tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
  3382 + if (!tmp_path || !tmp_name) {
  3383 + if (!tmp_path)
  3384 + tmp_path = kmalloc(PATH_MAX, GFP_KERNEL);
3418 3385 if (!tmp_name)
  3386 + tmp_name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
  3387 + if (!tmp_path || !tmp_name)
3419 3388 goto out;
3420 3389 }
3421 3390  
3422   - rcu_read_lock();
3423   - snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name,
3424   - memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup));
3425   - rcu_read_unlock();
  3391 + cgroup_name(memcg->css.cgroup, tmp_name, NAME_MAX + 1);
  3392 + snprintf(tmp_path, PATH_MAX, "%s(%d:%s)", s->name,
  3393 + memcg_cache_id(memcg), tmp_name);
3426 3394  
3427   - new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
  3395 + new = kmem_cache_create_memcg(memcg, tmp_path, s->object_size, s->align,
3428 3396 (s->flags & ~SLAB_PANIC), s->ctor, s);
3429 3397 if (new)
3430 3398 new->allocflags |= __GFP_KMEMCG;