Commit ef209f15980360f6945873df3cd710c5f62f2a3e

Authored by Gao feng
Committed by David S. Miller
1 parent 936597631d

net: cgroup: fix access the unallocated memory in netprio cgroup

there are some out of bound accesses in netprio cgroup.

now before accessing the dev->priomap.priomap array,we only check
if the dev->priomap exist.and because we don't want to see
additional bound checkings in fast path, so we should make sure
that dev->priomap is null or array size of dev->priomap.priomap
is equal to max_prioidx + 1;

so in write_priomap logic,we should call extend_netdev_table when
dev->priomap is null and dev->priomap.priomap_len < max_len.
and in cgrp_create->update_netdev_tables logic,we should call
extend_netdev_table only when dev->priomap exist and
dev->priomap.priomap_len < max_len.

and it's not needed to call update_netdev_tables in write_priomap,
we can only allocate the net device's priomap which we change through
net_prio.ifpriomap.

this patch also add a return value for update_netdev_tables &
extend_netdev_table, so when new_priomap is allocated failed,
write_priomap will stop to access the priomap,and return -ENOMEM
back to the userspace to tell the user what happend.

Change From v3:
1. add rtnl protect when reading max_prioidx in write_priomap.

2. only call extend_netdev_table when map->priomap_len < max_len,
   this will make sure array size of dev->map->priomap always
   bigger than any prioidx.

3. add a function write_update_netdev_table to make codes clear.

Change From v2:
1. protect extend_netdev_table by RTNL.
2. when extend_netdev_table failed,call dev_put to reduce device's refcount.

Signed-off-by: Gao feng <gaofeng@cn.fujitsu.com>
Cc: Neil Horman <nhorman@tuxdriver.com>
Cc: Eric Dumazet <edumazet@google.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 1 changed file with 54 additions and 17 deletions Side-by-side Diff

net/core/netprio_cgroup.c
... ... @@ -65,7 +65,7 @@
65 65 spin_unlock_irqrestore(&prioidx_map_lock, flags);
66 66 }
67 67  
68   -static void extend_netdev_table(struct net_device *dev, u32 new_len)
  68 +static int extend_netdev_table(struct net_device *dev, u32 new_len)
69 69 {
70 70 size_t new_size = sizeof(struct netprio_map) +
71 71 ((sizeof(u32) * new_len));
... ... @@ -77,7 +77,7 @@
77 77  
78 78 if (!new_priomap) {
79 79 pr_warn("Unable to alloc new priomap!\n");
80   - return;
  80 + return -ENOMEM;
81 81 }
82 82  
83 83 for (i = 0;
84 84  
85 85  
86 86  
87 87  
88 88  
89 89  
90 90  
91 91  
92 92  
93 93  
94 94  
95 95  
... ... @@ -90,46 +90,79 @@
90 90 rcu_assign_pointer(dev->priomap, new_priomap);
91 91 if (old_priomap)
92 92 kfree_rcu(old_priomap, rcu);
  93 + return 0;
93 94 }
94 95  
95   -static void update_netdev_tables(void)
  96 +static int write_update_netdev_table(struct net_device *dev)
96 97 {
  98 + int ret = 0;
  99 + u32 max_len;
  100 + struct netprio_map *map;
  101 +
  102 + rtnl_lock();
  103 + max_len = atomic_read(&max_prioidx) + 1;
  104 + map = rtnl_dereference(dev->priomap);
  105 + if (!map || map->priomap_len < max_len)
  106 + ret = extend_netdev_table(dev, max_len);
  107 + rtnl_unlock();
  108 +
  109 + return ret;
  110 +}
  111 +
  112 +static int update_netdev_tables(void)
  113 +{
  114 + int ret = 0;
97 115 struct net_device *dev;
98   - u32 max_len = atomic_read(&max_prioidx) + 1;
  116 + u32 max_len;
99 117 struct netprio_map *map;
100 118  
101 119 rtnl_lock();
  120 + max_len = atomic_read(&max_prioidx) + 1;
102 121 for_each_netdev(&init_net, dev) {
103 122 map = rtnl_dereference(dev->priomap);
104   - if ((!map) ||
105   - (map->priomap_len < max_len))
106   - extend_netdev_table(dev, max_len);
  123 + /*
  124 + * don't allocate priomap if we didn't
  125 + * change net_prio.ifpriomap (map == NULL),
  126 + * this will speed up skb_update_prio.
  127 + */
  128 + if (map && map->priomap_len < max_len) {
  129 + ret = extend_netdev_table(dev, max_len);
  130 + if (ret < 0)
  131 + break;
  132 + }
107 133 }
108 134 rtnl_unlock();
  135 + return ret;
109 136 }
110 137  
111 138 static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
112 139 {
113 140 struct cgroup_netprio_state *cs;
114   - int ret;
  141 + int ret = -EINVAL;
115 142  
116 143 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
117 144 if (!cs)
118 145 return ERR_PTR(-ENOMEM);
119 146  
120   - if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) {
121   - kfree(cs);
122   - return ERR_PTR(-EINVAL);
123   - }
  147 + if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx)
  148 + goto out;
124 149  
125 150 ret = get_prioidx(&cs->prioidx);
126   - if (ret != 0) {
  151 + if (ret < 0) {
127 152 pr_warn("No space in priority index array\n");
128   - kfree(cs);
129   - return ERR_PTR(ret);
  153 + goto out;
130 154 }
131 155  
  156 + ret = update_netdev_tables();
  157 + if (ret < 0) {
  158 + put_prioidx(cs->prioidx);
  159 + goto out;
  160 + }
  161 +
132 162 return &cs->css;
  163 +out:
  164 + kfree(cs);
  165 + return ERR_PTR(ret);
133 166 }
134 167  
135 168 static void cgrp_destroy(struct cgroup *cgrp)
136 169  
... ... @@ -221,13 +254,17 @@
221 254 if (!dev)
222 255 goto out_free_devname;
223 256  
224   - update_netdev_tables();
225   - ret = 0;
  257 + ret = write_update_netdev_table(dev);
  258 + if (ret < 0)
  259 + goto out_put_dev;
  260 +
226 261 rcu_read_lock();
227 262 map = rcu_dereference(dev->priomap);
228 263 if (map)
229 264 map->priomap[prioidx] = priority;
230 265 rcu_read_unlock();
  266 +
  267 +out_put_dev:
231 268 dev_put(dev);
232 269  
233 270 out_free_devname: