Commit b2552b8c40fa89210070c6e3487b35f10608d6c5
Committed by
David S. Miller
1 parent
061775583e
net: sched: flower: track filter deletion with flag
In order to prevent double deletion of filter by concurrent tasks when rtnl lock is not used for synchronization, add 'deleted' filter field. Check value of this field when modifying filters and return error if concurrent deletion is detected. Refactor __fl_delete() to accept pointer to 'last' boolean as argument, and return error code as function return value instead. This is necessary to signal concurrent filter delete to caller. Signed-off-by: Vlad Buslov <vladbu@mellanox.com> Reviewed-by: Stefano Brivio <sbrivio@redhat.com> Acked-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 1 changed file with 29 additions and 10 deletions Side-by-side Diff
net/sched/cls_flower.c
... | ... | @@ -110,6 +110,7 @@ |
110 | 110 | * synchronization. Use atomic reference counter to be concurrency-safe. |
111 | 111 | */ |
112 | 112 | refcount_t refcnt; |
113 | + bool deleted; | |
113 | 114 | }; |
114 | 115 | |
115 | 116 | static const struct rhashtable_params mask_ht_params = { |
... | ... | @@ -458,6 +459,8 @@ |
458 | 459 | if (!refcount_dec_and_test(&f->refcnt)) |
459 | 460 | return; |
460 | 461 | |
462 | + WARN_ON(!f->deleted); | |
463 | + | |
461 | 464 | if (tcf_exts_get_net(&f->exts)) |
462 | 465 | tcf_queue_work(&f->rwork, fl_destroy_filter_work); |
463 | 466 | else |
464 | 467 | |
465 | 468 | |
466 | 469 | |
467 | 470 | |
... | ... | @@ -495,22 +498,29 @@ |
495 | 498 | return f; |
496 | 499 | } |
497 | 500 | |
498 | -static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, | |
499 | - struct netlink_ext_ack *extack) | |
501 | +static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, | |
502 | + bool *last, struct netlink_ext_ack *extack) | |
500 | 503 | { |
501 | 504 | struct cls_fl_head *head = fl_head_dereference(tp); |
502 | 505 | bool async = tcf_exts_get_net(&f->exts); |
503 | - bool last; | |
504 | 506 | |
507 | + *last = false; | |
508 | + | |
509 | + if (f->deleted) | |
510 | + return -ENOENT; | |
511 | + | |
512 | + f->deleted = true; | |
513 | + rhashtable_remove_fast(&f->mask->ht, &f->ht_node, | |
514 | + f->mask->filter_ht_params); | |
505 | 515 | idr_remove(&head->handle_idr, f->handle); |
506 | 516 | list_del_rcu(&f->list); |
507 | - last = fl_mask_put(head, f->mask, async); | |
517 | + *last = fl_mask_put(head, f->mask, async); | |
508 | 518 | if (!tc_skip_hw(f->flags)) |
509 | 519 | fl_hw_destroy_filter(tp, f, extack); |
510 | 520 | tcf_unbind_filter(tp, &f->res); |
511 | 521 | __fl_put(f); |
512 | 522 | |
513 | - return last; | |
523 | + return 0; | |
514 | 524 | } |
515 | 525 | |
516 | 526 | static void fl_destroy_sleepable(struct work_struct *work) |
517 | 527 | |
... | ... | @@ -530,10 +540,12 @@ |
530 | 540 | struct cls_fl_head *head = fl_head_dereference(tp); |
531 | 541 | struct fl_flow_mask *mask, *next_mask; |
532 | 542 | struct cls_fl_filter *f, *next; |
543 | + bool last; | |
533 | 544 | |
534 | 545 | list_for_each_entry_safe(mask, next_mask, &head->masks, list) { |
535 | 546 | list_for_each_entry_safe(f, next, &mask->filters, list) { |
536 | - if (__fl_delete(tp, f, extack)) | |
547 | + __fl_delete(tp, f, &last, extack); | |
548 | + if (last) | |
537 | 549 | break; |
538 | 550 | } |
539 | 551 | } |
... | ... | @@ -1444,6 +1456,12 @@ |
1444 | 1456 | |
1445 | 1457 | refcount_inc(&fnew->refcnt); |
1446 | 1458 | if (fold) { |
1459 | + /* Fold filter was deleted concurrently. Retry lookup. */ | |
1460 | + if (fold->deleted) { | |
1461 | + err = -EAGAIN; | |
1462 | + goto errout_hw; | |
1463 | + } | |
1464 | + | |
1447 | 1465 | fnew->handle = handle; |
1448 | 1466 | |
1449 | 1467 | err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node, |
... | ... | @@ -1456,6 +1474,7 @@ |
1456 | 1474 | fold->mask->filter_ht_params); |
1457 | 1475 | idr_replace(&head->handle_idr, fnew, fnew->handle); |
1458 | 1476 | list_replace_rcu(&fold->list, &fnew->list); |
1477 | + fold->deleted = true; | |
1459 | 1478 | |
1460 | 1479 | if (!tc_skip_hw(fold->flags)) |
1461 | 1480 | fl_hw_destroy_filter(tp, fold, NULL); |
1462 | 1481 | |
1463 | 1482 | |
... | ... | @@ -1525,14 +1544,14 @@ |
1525 | 1544 | { |
1526 | 1545 | struct cls_fl_head *head = fl_head_dereference(tp); |
1527 | 1546 | struct cls_fl_filter *f = arg; |
1547 | + bool last_on_mask; | |
1548 | + int err = 0; | |
1528 | 1549 | |
1529 | - rhashtable_remove_fast(&f->mask->ht, &f->ht_node, | |
1530 | - f->mask->filter_ht_params); | |
1531 | - __fl_delete(tp, f, extack); | |
1550 | + err = __fl_delete(tp, f, &last_on_mask, extack); | |
1532 | 1551 | *last = list_empty(&head->masks); |
1533 | 1552 | __fl_put(f); |
1534 | 1553 | |
1535 | - return 0; | |
1554 | + return err; | |
1536 | 1555 | } |
1537 | 1556 | |
1538 | 1557 | static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg, |