Commit 53d0e83f9329aa51dcc205b514dbee05cb4df309
Committed by
David S. Miller
1 parent
3008ba5faa
rds: tcp: must use spin_lock_irq* and not spin_lock_bh with rds_tcp_conn_lock
rds_tcp_connection allocation/free management has the potential to be called from __rds_conn_create after IRQs have been disabled, so spin_[un]lock_bh cannot be used with rds_tcp_conn_lock. Bottom-halves that need to synchronize for critical sections protected by rds_tcp_conn_lock should instead use rds_destroy_pending() correctly. Reported-by: syzbot+c68e51bb5e699d3f8d91@syzkaller.appspotmail.com Fixes: ebeeb1ad9b8a ("rds: tcp: use rds_destroy_pending() to synchronize netns/module teardown and rds connection/workq management") Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com> Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 1 changed file with 9 additions and 8 deletions Side-by-side Diff
net/rds/tcp.c
... | ... | @@ -272,13 +272,14 @@ |
272 | 272 | static void rds_tcp_conn_free(void *arg) |
273 | 273 | { |
274 | 274 | struct rds_tcp_connection *tc = arg; |
275 | + unsigned long flags; | |
275 | 276 | |
276 | 277 | rdsdebug("freeing tc %p\n", tc); |
277 | 278 | |
278 | - spin_lock_bh(&rds_tcp_conn_lock); | |
279 | + spin_lock_irqsave(&rds_tcp_conn_lock, flags); | |
279 | 280 | if (!tc->t_tcp_node_detached) |
280 | 281 | list_del(&tc->t_tcp_node); |
281 | - spin_unlock_bh(&rds_tcp_conn_lock); | |
282 | + spin_unlock_irqrestore(&rds_tcp_conn_lock, flags); | |
282 | 283 | |
283 | 284 | kmem_cache_free(rds_tcp_conn_slab, tc); |
284 | 285 | } |
285 | 286 | |
... | ... | @@ -308,13 +309,13 @@ |
308 | 309 | rdsdebug("rds_conn_path [%d] tc %p\n", i, |
309 | 310 | conn->c_path[i].cp_transport_data); |
310 | 311 | } |
311 | - spin_lock_bh(&rds_tcp_conn_lock); | |
312 | + spin_lock_irq(&rds_tcp_conn_lock); | |
312 | 313 | for (i = 0; i < RDS_MPATH_WORKERS; i++) { |
313 | 314 | tc = conn->c_path[i].cp_transport_data; |
314 | 315 | tc->t_tcp_node_detached = false; |
315 | 316 | list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list); |
316 | 317 | } |
317 | - spin_unlock_bh(&rds_tcp_conn_lock); | |
318 | + spin_unlock_irq(&rds_tcp_conn_lock); | |
318 | 319 | fail: |
319 | 320 | if (ret) { |
320 | 321 | for (j = 0; j < i; j++) |
... | ... | @@ -527,7 +528,7 @@ |
527 | 528 | |
528 | 529 | rtn->rds_tcp_listen_sock = NULL; |
529 | 530 | rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w); |
530 | - spin_lock_bh(&rds_tcp_conn_lock); | |
531 | + spin_lock_irq(&rds_tcp_conn_lock); | |
531 | 532 | list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { |
532 | 533 | struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); |
533 | 534 | |
... | ... | @@ -540,7 +541,7 @@ |
540 | 541 | tc->t_tcp_node_detached = true; |
541 | 542 | } |
542 | 543 | } |
543 | - spin_unlock_bh(&rds_tcp_conn_lock); | |
544 | + spin_unlock_irq(&rds_tcp_conn_lock); | |
544 | 545 | list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) |
545 | 546 | rds_conn_destroy(tc->t_cpath->cp_conn); |
546 | 547 | } |
... | ... | @@ -588,7 +589,7 @@ |
588 | 589 | { |
589 | 590 | struct rds_tcp_connection *tc, *_tc; |
590 | 591 | |
591 | - spin_lock_bh(&rds_tcp_conn_lock); | |
592 | + spin_lock_irq(&rds_tcp_conn_lock); | |
592 | 593 | list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { |
593 | 594 | struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); |
594 | 595 | |
... | ... | @@ -598,7 +599,7 @@ |
598 | 599 | /* reconnect with new parameters */ |
599 | 600 | rds_conn_path_drop(tc->t_cpath, false); |
600 | 601 | } |
601 | - spin_unlock_bh(&rds_tcp_conn_lock); | |
602 | + spin_unlock_irq(&rds_tcp_conn_lock); | |
602 | 603 | } |
603 | 604 | |
604 | 605 | static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write, |