Commit c24e43d83b7aedb3effef54627448253e22a0140
Committed by
David S. Miller
1 parent
3d81e7118d
net: sched: flower: track rtnl lock state
Use 'rtnl_held' flag to track if caller holds rtnl lock. Propagate the flag to internal functions that need to know rtnl lock state. Take rtnl lock before calling tcf APIs that require it (hw offload, bind filter, etc.). Signed-off-by: Vlad Buslov <vladbu@mellanox.com> Reviewed-by: Stefano Brivio <sbrivio@redhat.com> Acked-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 1 changed file with 56 additions and 26 deletions Side-by-side Diff
net/sched/cls_flower.c
... | ... | @@ -374,11 +374,14 @@ |
374 | 374 | } |
375 | 375 | |
376 | 376 | static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, |
377 | - struct netlink_ext_ack *extack) | |
377 | + bool rtnl_held, struct netlink_ext_ack *extack) | |
378 | 378 | { |
379 | 379 | struct tc_cls_flower_offload cls_flower = {}; |
380 | 380 | struct tcf_block *block = tp->chain->block; |
381 | 381 | |
382 | + if (!rtnl_held) | |
383 | + rtnl_lock(); | |
384 | + | |
382 | 385 | tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); |
383 | 386 | cls_flower.command = TC_CLSFLOWER_DESTROY; |
384 | 387 | cls_flower.cookie = (unsigned long) f; |
385 | 388 | |
386 | 389 | |
387 | 390 | |
388 | 391 | |
... | ... | @@ -387,20 +390,28 @@ |
387 | 390 | spin_lock(&tp->lock); |
388 | 391 | tcf_block_offload_dec(block, &f->flags); |
389 | 392 | spin_unlock(&tp->lock); |
393 | + | |
394 | + if (!rtnl_held) | |
395 | + rtnl_unlock(); | |
390 | 396 | } |
391 | 397 | |
392 | 398 | static int fl_hw_replace_filter(struct tcf_proto *tp, |
393 | - struct cls_fl_filter *f, | |
399 | + struct cls_fl_filter *f, bool rtnl_held, | |
394 | 400 | struct netlink_ext_ack *extack) |
395 | 401 | { |
396 | 402 | struct tc_cls_flower_offload cls_flower = {}; |
397 | 403 | struct tcf_block *block = tp->chain->block; |
398 | 404 | bool skip_sw = tc_skip_sw(f->flags); |
399 | - int err; | |
405 | + int err = 0; | |
400 | 406 | |
407 | + if (!rtnl_held) | |
408 | + rtnl_lock(); | |
409 | + | |
401 | 410 | cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts)); |
402 | - if (!cls_flower.rule) | |
403 | - return -ENOMEM; | |
411 | + if (!cls_flower.rule) { | |
412 | + err = -ENOMEM; | |
413 | + goto errout; | |
414 | + } | |
404 | 415 | |
405 | 416 | tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); |
406 | 417 | cls_flower.command = TC_CLSFLOWER_REPLACE; |
407 | 418 | |
408 | 419 | |
409 | 420 | |
410 | 421 | |
411 | 422 | |
412 | 423 | |
413 | 424 | |
... | ... | @@ -413,37 +424,48 @@ |
413 | 424 | err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); |
414 | 425 | if (err) { |
415 | 426 | kfree(cls_flower.rule); |
416 | - if (skip_sw) { | |
427 | + if (skip_sw) | |
417 | 428 | NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); |
418 | - return err; | |
419 | - } | |
420 | - return 0; | |
429 | + else | |
430 | + err = 0; | |
431 | + goto errout; | |
421 | 432 | } |
422 | 433 | |
423 | 434 | err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw); |
424 | 435 | kfree(cls_flower.rule); |
425 | 436 | |
426 | 437 | if (err < 0) { |
427 | - fl_hw_destroy_filter(tp, f, NULL); | |
428 | - return err; | |
438 | + fl_hw_destroy_filter(tp, f, true, NULL); | |
439 | + goto errout; | |
429 | 440 | } else if (err > 0) { |
430 | 441 | f->in_hw_count = err; |
442 | + err = 0; | |
431 | 443 | spin_lock(&tp->lock); |
432 | 444 | tcf_block_offload_inc(block, &f->flags); |
433 | 445 | spin_unlock(&tp->lock); |
434 | 446 | } |
435 | 447 | |
436 | - if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) | |
437 | - return -EINVAL; | |
448 | + if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) { | |
449 | + err = -EINVAL; | |
450 | + goto errout; | |
451 | + } | |
438 | 452 | |
439 | - return 0; | |
453 | +errout: | |
454 | + if (!rtnl_held) | |
455 | + rtnl_unlock(); | |
456 | + | |
457 | + return err; | |
440 | 458 | } |
441 | 459 | |
442 | -static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) | |
460 | +static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, | |
461 | + bool rtnl_held) | |
443 | 462 | { |
444 | 463 | struct tc_cls_flower_offload cls_flower = {}; |
445 | 464 | struct tcf_block *block = tp->chain->block; |
446 | 465 | |
466 | + if (!rtnl_held) | |
467 | + rtnl_lock(); | |
468 | + | |
447 | 469 | tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL); |
448 | 470 | cls_flower.command = TC_CLSFLOWER_STATS; |
449 | 471 | cls_flower.cookie = (unsigned long) f; |
... | ... | @@ -454,6 +476,9 @@ |
454 | 476 | tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes, |
455 | 477 | cls_flower.stats.pkts, |
456 | 478 | cls_flower.stats.lastused); |
479 | + | |
480 | + if (!rtnl_held) | |
481 | + rtnl_unlock(); | |
457 | 482 | } |
458 | 483 | |
459 | 484 | static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp) |
... | ... | @@ -511,7 +536,8 @@ |
511 | 536 | } |
512 | 537 | |
513 | 538 | static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, |
514 | - bool *last, struct netlink_ext_ack *extack) | |
539 | + bool *last, bool rtnl_held, | |
540 | + struct netlink_ext_ack *extack) | |
515 | 541 | { |
516 | 542 | struct cls_fl_head *head = fl_head_dereference(tp); |
517 | 543 | bool async = tcf_exts_get_net(&f->exts); |
... | ... | @@ -533,7 +559,7 @@ |
533 | 559 | |
534 | 560 | *last = fl_mask_put(head, f->mask, async); |
535 | 561 | if (!tc_skip_hw(f->flags)) |
536 | - fl_hw_destroy_filter(tp, f, extack); | |
562 | + fl_hw_destroy_filter(tp, f, rtnl_held, extack); | |
537 | 563 | tcf_unbind_filter(tp, &f->res); |
538 | 564 | __fl_put(f); |
539 | 565 | |
... | ... | @@ -561,7 +587,7 @@ |
561 | 587 | |
562 | 588 | list_for_each_entry_safe(mask, next_mask, &head->masks, list) { |
563 | 589 | list_for_each_entry_safe(f, next, &mask->filters, list) { |
564 | - __fl_delete(tp, f, &last, extack); | |
590 | + __fl_delete(tp, f, &last, rtnl_held, extack); | |
565 | 591 | if (last) |
566 | 592 | break; |
567 | 593 | } |
568 | 594 | |
569 | 595 | |
570 | 596 | |
... | ... | @@ -1401,19 +1427,23 @@ |
1401 | 1427 | struct cls_fl_filter *f, struct fl_flow_mask *mask, |
1402 | 1428 | unsigned long base, struct nlattr **tb, |
1403 | 1429 | struct nlattr *est, bool ovr, |
1404 | - struct fl_flow_tmplt *tmplt, | |
1430 | + struct fl_flow_tmplt *tmplt, bool rtnl_held, | |
1405 | 1431 | struct netlink_ext_ack *extack) |
1406 | 1432 | { |
1407 | 1433 | int err; |
1408 | 1434 | |
1409 | - err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, true, | |
1435 | + err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held, | |
1410 | 1436 | extack); |
1411 | 1437 | if (err < 0) |
1412 | 1438 | return err; |
1413 | 1439 | |
1414 | 1440 | if (tb[TCA_FLOWER_CLASSID]) { |
1415 | 1441 | f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); |
1442 | + if (!rtnl_held) | |
1443 | + rtnl_lock(); | |
1416 | 1444 | tcf_bind_filter(tp, &f->res, base); |
1445 | + if (!rtnl_held) | |
1446 | + rtnl_unlock(); | |
1417 | 1447 | } |
1418 | 1448 | |
1419 | 1449 | err = fl_set_key(net, tb, &f->key, &mask->key, extack); |
... | ... | @@ -1492,7 +1522,7 @@ |
1492 | 1522 | } |
1493 | 1523 | |
1494 | 1524 | err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr, |
1495 | - tp->chain->tmplt_priv, extack); | |
1525 | + tp->chain->tmplt_priv, rtnl_held, extack); | |
1496 | 1526 | if (err) |
1497 | 1527 | goto errout; |
1498 | 1528 | |
... | ... | @@ -1501,7 +1531,7 @@ |
1501 | 1531 | goto errout; |
1502 | 1532 | |
1503 | 1533 | if (!tc_skip_hw(fnew->flags)) { |
1504 | - err = fl_hw_replace_filter(tp, fnew, extack); | |
1534 | + err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack); | |
1505 | 1535 | if (err) |
1506 | 1536 | goto errout_mask; |
1507 | 1537 | } |
... | ... | @@ -1545,7 +1575,7 @@ |
1545 | 1575 | |
1546 | 1576 | fl_mask_put(head, fold->mask, true); |
1547 | 1577 | if (!tc_skip_hw(fold->flags)) |
1548 | - fl_hw_destroy_filter(tp, fold, NULL); | |
1578 | + fl_hw_destroy_filter(tp, fold, rtnl_held, NULL); | |
1549 | 1579 | tcf_unbind_filter(tp, &fold->res); |
1550 | 1580 | tcf_exts_get_net(&fold->exts); |
1551 | 1581 | /* Caller holds reference to fold, so refcnt is always > 0 |
... | ... | @@ -1602,7 +1632,7 @@ |
1602 | 1632 | errout_hw: |
1603 | 1633 | spin_unlock(&tp->lock); |
1604 | 1634 | if (!tc_skip_hw(fnew->flags)) |
1605 | - fl_hw_destroy_filter(tp, fnew, NULL); | |
1635 | + fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL); | |
1606 | 1636 | errout_mask: |
1607 | 1637 | fl_mask_put(head, fnew->mask, true); |
1608 | 1638 | errout: |
... | ... | @@ -1626,7 +1656,7 @@ |
1626 | 1656 | bool last_on_mask; |
1627 | 1657 | int err = 0; |
1628 | 1658 | |
1629 | - err = __fl_delete(tp, f, &last_on_mask, extack); | |
1659 | + err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack); | |
1630 | 1660 | *last = list_empty(&head->masks); |
1631 | 1661 | __fl_put(f); |
1632 | 1662 | |
... | ... | @@ -2270,7 +2300,7 @@ |
2270 | 2300 | spin_unlock(&tp->lock); |
2271 | 2301 | |
2272 | 2302 | if (!skip_hw) |
2273 | - fl_hw_update_stats(tp, f); | |
2303 | + fl_hw_update_stats(tp, f, rtnl_held); | |
2274 | 2304 | |
2275 | 2305 | if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) |
2276 | 2306 | goto nla_put_failure; |