Commit 5405ff6e15f40f2f53e37d2dcd7de521e2b7a96f

Authored by Jon Paul Maloy
Committed by David S. Miller
1 parent 2312bf61ae

tipc: convert node lock to rwlock

According to the node FSM a node in state SELF_UP_PEER_UP cannot
change state inside a lock context, except when a TUNNEL_PROTOCOL
(SYNCH or FAILOVER) packet arrives. However, the node's individual
links may still change state.

Since each link now is protected by its own spinlock, we finally have
the conditions in place to convert the node spinlock to an rwlock_t.
If the node state and arriving packet type are rigth, we can let the
link directly receive the packet under protection of its own spinlock
and the node lock in read mode. In all other cases we use the node
lock in write mode. This enables full concurrent execution between
parallel links during steady-state traffic situations, i.e., 99+ %
of the time.

This commit implements this change.

Reviewed-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 3 changed files with 136 additions and 133 deletions Side-by-side Diff

... ... @@ -1547,7 +1547,7 @@
1547 1547 *bearer_id = 0;
1548 1548 rcu_read_lock();
1549 1549 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1550   - tipc_node_lock(n_ptr);
  1550 + tipc_node_read_lock(n_ptr);
1551 1551 for (i = 0; i < MAX_BEARERS; i++) {
1552 1552 l_ptr = n_ptr->links[i].link;
1553 1553 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
... ... @@ -1556,7 +1556,7 @@
1556 1556 break;
1557 1557 }
1558 1558 }
1559   - tipc_node_unlock(n_ptr);
  1559 + tipc_node_read_unlock(n_ptr);
1560 1560 if (found_node)
1561 1561 break;
1562 1562 }
... ... @@ -1658,7 +1658,7 @@
1658 1658 if (!node)
1659 1659 return -EINVAL;
1660 1660  
1661   - tipc_node_lock(node);
  1661 + tipc_node_read_lock(node);
1662 1662  
1663 1663 link = node->links[bearer_id].link;
1664 1664 if (!link) {
... ... @@ -1699,7 +1699,7 @@
1699 1699 }
1700 1700  
1701 1701 out:
1702   - tipc_node_unlock(node);
  1702 + tipc_node_read_unlock(node);
1703 1703  
1704 1704 return res;
1705 1705 }
1706 1706  
... ... @@ -1898,10 +1898,10 @@
1898 1898  
1899 1899 list_for_each_entry_continue_rcu(node, &tn->node_list,
1900 1900 list) {
1901   - tipc_node_lock(node);
  1901 + tipc_node_read_lock(node);
1902 1902 err = __tipc_nl_add_node_links(net, &msg, node,
1903 1903 &prev_link);
1904   - tipc_node_unlock(node);
  1904 + tipc_node_read_unlock(node);
1905 1905 if (err)
1906 1906 goto out;
1907 1907  
1908 1908  
... ... @@ -1913,10 +1913,10 @@
1913 1913 goto out;
1914 1914  
1915 1915 list_for_each_entry_rcu(node, &tn->node_list, list) {
1916   - tipc_node_lock(node);
  1916 + tipc_node_read_lock(node);
1917 1917 err = __tipc_nl_add_node_links(net, &msg, node,
1918 1918 &prev_link);
1919   - tipc_node_unlock(node);
  1919 + tipc_node_read_unlock(node);
1920 1920 if (err)
1921 1921 goto out;
1922 1922  
1923 1923  
1924 1924  
... ... @@ -1967,16 +1967,16 @@
1967 1967 if (!node)
1968 1968 return -EINVAL;
1969 1969  
1970   - tipc_node_lock(node);
  1970 + tipc_node_read_lock(node);
1971 1971 link = node->links[bearer_id].link;
1972 1972 if (!link) {
1973   - tipc_node_unlock(node);
  1973 + tipc_node_read_unlock(node);
1974 1974 nlmsg_free(msg.skb);
1975 1975 return -EINVAL;
1976 1976 }
1977 1977  
1978 1978 err = __tipc_nl_add_link(net, &msg, link, 0);
1979   - tipc_node_unlock(node);
  1979 + tipc_node_read_unlock(node);
1980 1980 if (err) {
1981 1981 nlmsg_free(msg.skb);
1982 1982 return err;
1983 1983  
1984 1984  
1985 1985  
1986 1986  
... ... @@ -2021,19 +2021,19 @@
2021 2021 node = tipc_link_find_owner(net, link_name, &bearer_id);
2022 2022 if (!node)
2023 2023 return -EINVAL;
  2024 +
2024 2025 le = &node->links[bearer_id];
2025   - tipc_node_lock(node);
  2026 + tipc_node_read_lock(node);
2026 2027 spin_lock_bh(&le->lock);
2027 2028 link = le->link;
2028 2029 if (!link) {
2029   - tipc_node_unlock(node);
  2030 + spin_unlock_bh(&le->lock);
  2031 + tipc_node_read_unlock(node);
2030 2032 return -EINVAL;
2031 2033 }
2032   -
2033 2034 link_reset_statistics(link);
2034 2035 spin_unlock_bh(&le->lock);
2035   - tipc_node_unlock(node);
2036   -
  2036 + tipc_node_read_unlock(node);
2037 2037 return 0;
2038 2038 }
... ... @@ -141,10 +141,63 @@
141 141 return NULL;
142 142 }
143 143  
  144 +void tipc_node_read_lock(struct tipc_node *n)
  145 +{
  146 + read_lock_bh(&n->lock);
  147 +}
  148 +
  149 +void tipc_node_read_unlock(struct tipc_node *n)
  150 +{
  151 + read_unlock_bh(&n->lock);
  152 +}
  153 +
  154 +static void tipc_node_write_lock(struct tipc_node *n)
  155 +{
  156 + write_lock_bh(&n->lock);
  157 +}
  158 +
  159 +static void tipc_node_write_unlock(struct tipc_node *n)
  160 +{
  161 + struct net *net = n->net;
  162 + u32 addr = 0;
  163 + u32 flags = n->action_flags;
  164 + u32 link_id = 0;
  165 + struct list_head *publ_list;
  166 +
  167 + if (likely(!flags)) {
  168 + write_unlock_bh(&n->lock);
  169 + return;
  170 + }
  171 +
  172 + addr = n->addr;
  173 + link_id = n->link_id;
  174 + publ_list = &n->publ_list;
  175 +
  176 + n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
  177 + TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
  178 +
  179 + write_unlock_bh(&n->lock);
  180 +
  181 + if (flags & TIPC_NOTIFY_NODE_DOWN)
  182 + tipc_publ_notify(net, publ_list, addr);
  183 +
  184 + if (flags & TIPC_NOTIFY_NODE_UP)
  185 + tipc_named_node_up(net, addr);
  186 +
  187 + if (flags & TIPC_NOTIFY_LINK_UP)
  188 + tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
  189 + TIPC_NODE_SCOPE, link_id, addr);
  190 +
  191 + if (flags & TIPC_NOTIFY_LINK_DOWN)
  192 + tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
  193 + link_id, addr);
  194 +}
  195 +
144 196 struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
145 197 {
146 198 struct tipc_net *tn = net_generic(net, tipc_net_id);
147 199 struct tipc_node *n_ptr, *temp_node;
  200 + int i;
148 201  
149 202 spin_lock_bh(&tn->node_list_lock);
150 203 n_ptr = tipc_node_find(net, addr);
... ... @@ -159,7 +212,7 @@
159 212 n_ptr->net = net;
160 213 n_ptr->capabilities = capabilities;
161 214 kref_init(&n_ptr->kref);
162   - spin_lock_init(&n_ptr->lock);
  215 + rwlock_init(&n_ptr->lock);
163 216 INIT_HLIST_NODE(&n_ptr->hash);
164 217 INIT_LIST_HEAD(&n_ptr->list);
165 218 INIT_LIST_HEAD(&n_ptr->publ_list);
... ... @@ -168,6 +221,8 @@
168 221 skb_queue_head_init(&n_ptr->bc_entry.inputq1);
169 222 __skb_queue_head_init(&n_ptr->bc_entry.arrvq);
170 223 skb_queue_head_init(&n_ptr->bc_entry.inputq2);
  224 + for (i = 0; i < MAX_BEARERS; i++)
  225 + spin_lock_init(&n_ptr->links[i].lock);
171 226 hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
172 227 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
173 228 if (n_ptr->addr < temp_node->addr)
174 229  
... ... @@ -246,9 +301,9 @@
246 301 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
247 302 return;
248 303 }
249   - tipc_node_lock(n);
  304 + tipc_node_write_lock(n);
250 305 list_add_tail(subscr, &n->publ_list);
251   - tipc_node_unlock(n);
  306 + tipc_node_write_unlock(n);
252 307 tipc_node_put(n);
253 308 }
254 309  
255 310  
... ... @@ -264,9 +319,9 @@
264 319 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
265 320 return;
266 321 }
267   - tipc_node_lock(n);
  322 + tipc_node_write_lock(n);
268 323 list_del_init(subscr);
269   - tipc_node_unlock(n);
  324 + tipc_node_write_unlock(n);
270 325 tipc_node_put(n);
271 326 }
272 327  
273 328  
... ... @@ -293,9 +348,9 @@
293 348 conn->port = port;
294 349 conn->peer_port = peer_port;
295 350  
296   - tipc_node_lock(node);
  351 + tipc_node_write_lock(node);
297 352 list_add_tail(&conn->list, &node->conn_sks);
298   - tipc_node_unlock(node);
  353 + tipc_node_write_unlock(node);
299 354 exit:
300 355 tipc_node_put(node);
301 356 return err;
302 357  
... ... @@ -313,14 +368,14 @@
313 368 if (!node)
314 369 return;
315 370  
316   - tipc_node_lock(node);
  371 + tipc_node_write_lock(node);
317 372 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
318 373 if (port != conn->port)
319 374 continue;
320 375 list_del(&conn->list);
321 376 kfree(conn);
322 377 }
323   - tipc_node_unlock(node);
  378 + tipc_node_write_unlock(node);
324 379 tipc_node_put(node);
325 380 }
326 381  
... ... @@ -337,7 +392,7 @@
337 392 __skb_queue_head_init(&xmitq);
338 393  
339 394 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
340   - tipc_node_lock(n);
  395 + tipc_node_read_lock(n);
341 396 le = &n->links[bearer_id];
342 397 spin_lock_bh(&le->lock);
343 398 if (le->link) {
... ... @@ -346,7 +401,7 @@
346 401 rc = tipc_link_timeout(le->link, &xmitq);
347 402 }
348 403 spin_unlock_bh(&le->lock);
349   - tipc_node_unlock(n);
  404 + tipc_node_read_unlock(n);
350 405 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
351 406 if (rc & TIPC_LINK_DOWN_EVT)
352 407 tipc_node_link_down(n, bearer_id, false);
353 408  
... ... @@ -425,9 +480,9 @@
425 480 static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
426 481 struct sk_buff_head *xmitq)
427 482 {
428   - tipc_node_lock(n);
  483 + tipc_node_write_lock(n);
429 484 __tipc_node_link_up(n, bearer_id, xmitq);
430   - tipc_node_unlock(n);
  485 + tipc_node_write_unlock(n);
431 486 }
432 487  
433 488 /**
... ... @@ -516,7 +571,7 @@
516 571  
517 572 __skb_queue_head_init(&xmitq);
518 573  
519   - tipc_node_lock(n);
  574 + tipc_node_write_lock(n);
520 575 if (!tipc_link_is_establishing(l)) {
521 576 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
522 577 if (delete) {
... ... @@ -528,7 +583,7 @@
528 583 /* Defuse pending tipc_node_link_up() */
529 584 tipc_link_fsm_evt(l, LINK_RESET_EVT);
530 585 }
531   - tipc_node_unlock(n);
  586 + tipc_node_write_unlock(n);
532 587 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
533 588 tipc_sk_rcv(n->net, &le->inputq);
534 589 }
... ... @@ -561,7 +616,7 @@
561 616 if (!n)
562 617 return;
563 618  
564   - tipc_node_lock(n);
  619 + tipc_node_write_lock(n);
565 620  
566 621 le = &n->links[b->identity];
567 622  
... ... @@ -656,7 +711,6 @@
656 711 if (n->state == NODE_FAILINGOVER)
657 712 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
658 713 le->link = l;
659   - spin_lock_init(&le->lock);
660 714 n->link_cnt++;
661 715 tipc_node_calculate_timer(n, l);
662 716 if (n->link_cnt == 1)
... ... @@ -665,7 +719,7 @@
665 719 }
666 720 memcpy(&le->maddr, maddr, sizeof(*maddr));
667 721 exit:
668   - tipc_node_unlock(n);
  722 + tipc_node_write_unlock(n);
669 723 if (reset && !tipc_link_is_reset(l))
670 724 tipc_node_link_down(n, b->identity, false);
671 725 tipc_node_put(n);
... ... @@ -873,24 +927,6 @@
873 927 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
874 928 }
875 929  
876   -bool tipc_node_filter_pkt(struct tipc_node *n, struct tipc_msg *hdr)
877   -{
878   - int state = n->state;
879   -
880   - if (likely(state == SELF_UP_PEER_UP))
881   - return true;
882   -
883   - if (state == SELF_LEAVING_PEER_DOWN)
884   - return false;
885   -
886   - if (state == SELF_DOWN_PEER_LEAVING) {
887   - if (msg_peer_node_is_up(hdr))
888   - return false;
889   - }
890   -
891   - return true;
892   -}
893   -
894 930 static void node_lost_contact(struct tipc_node *n,
895 931 struct sk_buff_head *inputq)
896 932 {
897 933  
898 934  
... ... @@ -952,56 +988,18 @@
952 988 if (bearer_id >= MAX_BEARERS)
953 989 goto exit;
954 990  
955   - tipc_node_lock(node);
  991 + tipc_node_read_lock(node);
956 992 link = node->links[bearer_id].link;
957 993 if (link) {
958 994 strncpy(linkname, link->name, len);
959 995 err = 0;
960 996 }
961 997 exit:
962   - tipc_node_unlock(node);
  998 + tipc_node_read_unlock(node);
963 999 tipc_node_put(node);
964 1000 return err;
965 1001 }
966 1002  
967   -void tipc_node_unlock(struct tipc_node *node)
968   -{
969   - struct net *net = node->net;
970   - u32 addr = 0;
971   - u32 flags = node->action_flags;
972   - u32 link_id = 0;
973   - struct list_head *publ_list;
974   -
975   - if (likely(!flags)) {
976   - spin_unlock_bh(&node->lock);
977   - return;
978   - }
979   -
980   - addr = node->addr;
981   - link_id = node->link_id;
982   - publ_list = &node->publ_list;
983   -
984   - node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
985   - TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
986   -
987   - spin_unlock_bh(&node->lock);
988   -
989   - if (flags & TIPC_NOTIFY_NODE_DOWN)
990   - tipc_publ_notify(net, publ_list, addr);
991   -
992   - if (flags & TIPC_NOTIFY_NODE_UP)
993   - tipc_named_node_up(net, addr);
994   -
995   - if (flags & TIPC_NOTIFY_LINK_UP)
996   - tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
997   - TIPC_NODE_SCOPE, link_id, addr);
998   -
999   - if (flags & TIPC_NOTIFY_LINK_DOWN)
1000   - tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
1001   - link_id, addr);
1002   -
1003   -}
1004   -
1005 1003 /* Caller should hold node lock for the passed node */
1006 1004 static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1007 1005 {
1008 1006  
1009 1007  
1010 1008  
1011 1009  
1012 1010  
1013 1011  
1014 1012  
... ... @@ -1048,40 +1046,38 @@
1048 1046 int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1049 1047 u32 dnode, int selector)
1050 1048 {
1051   - struct tipc_link_entry *le;
  1049 + struct tipc_link_entry *le = NULL;
1052 1050 struct tipc_node *n;
1053 1051 struct sk_buff_head xmitq;
1054   - struct tipc_media_addr *maddr = NULL;
1055 1052 int bearer_id = -1;
1056 1053 int rc = -EHOSTUNREACH;
1057 1054  
1058 1055 __skb_queue_head_init(&xmitq);
1059 1056 n = tipc_node_find(net, dnode);
1060 1057 if (likely(n)) {
1061   - tipc_node_lock(n);
  1058 + tipc_node_read_lock(n);
1062 1059 bearer_id = n->active_links[selector & 1];
1063 1060 if (bearer_id >= 0) {
1064 1061 le = &n->links[bearer_id];
1065   - maddr = &le->maddr;
1066 1062 spin_lock_bh(&le->lock);
1067   - if (likely(le->link))
1068   - rc = tipc_link_xmit(le->link, list, &xmitq);
  1063 + rc = tipc_link_xmit(le->link, list, &xmitq);
1069 1064 spin_unlock_bh(&le->lock);
1070 1065 }
1071   - tipc_node_unlock(n);
  1066 + tipc_node_read_unlock(n);
  1067 + if (likely(!skb_queue_empty(&xmitq))) {
  1068 + tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
  1069 + return 0;
  1070 + }
1072 1071 if (unlikely(rc == -ENOBUFS))
1073 1072 tipc_node_link_down(n, bearer_id, false);
1074 1073 tipc_node_put(n);
  1074 + return rc;
1075 1075 }
1076   - if (likely(!skb_queue_empty(&xmitq))) {
1077   - tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
1078   - return 0;
1079   - }
1080   - if (likely(in_own_node(net, dnode))) {
1081   - tipc_sk_rcv(net, list);
1082   - return 0;
1083   - }
1084   - return rc;
  1076 +
  1077 + if (unlikely(!in_own_node(net, dnode)))
  1078 + return rc;
  1079 + tipc_sk_rcv(net, list);
  1080 + return 0;
1085 1081 }
1086 1082  
1087 1083 /* tipc_node_xmit_skb(): send single buffer to destination
1088 1084  
... ... @@ -1171,9 +1167,9 @@
1171 1167  
1172 1168 /* Broadcast ACKs are sent on a unicast link */
1173 1169 if (rc & TIPC_LINK_SND_BC_ACK) {
1174   - tipc_node_lock(n);
  1170 + tipc_node_read_lock(n);
1175 1171 tipc_link_build_ack_msg(le->link, &xmitq);
1176   - tipc_node_unlock(n);
  1172 + tipc_node_read_unlock(n);
1177 1173 }
1178 1174  
1179 1175 if (!skb_queue_empty(&xmitq))
... ... @@ -1229,7 +1225,7 @@
1229 1225 }
1230 1226 }
1231 1227  
1232   - /* Update node accesibility if applicable */
  1228 + /* Check and update node accesibility if applicable */
1233 1229 if (state == SELF_UP_PEER_COMING) {
1234 1230 if (!tipc_link_is_up(l))
1235 1231 return true;
... ... @@ -1245,6 +1241,9 @@
1245 1241 return true;
1246 1242 }
1247 1243  
  1244 + if (state == SELF_LEAVING_PEER_DOWN)
  1245 + return false;
  1246 +
1248 1247 /* Ignore duplicate packets */
1249 1248 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1250 1249 return true;
1251 1250  
1252 1251  
1253 1252  
1254 1253  
... ... @@ -1361,22 +1360,30 @@
1361 1360 else if (unlikely(n->bc_entry.link->acked != bc_ack))
1362 1361 tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack);
1363 1362  
1364   - tipc_node_lock(n);
1365   -
1366   - /* Is reception permitted at the moment ? */
1367   - if (!tipc_node_filter_pkt(n, hdr))
1368   - goto unlock;
1369   -
1370   - /* Check and if necessary update node state */
1371   - if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) {
  1363 + /* Receive packet directly if conditions permit */
  1364 + tipc_node_read_lock(n);
  1365 + if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
1372 1366 spin_lock_bh(&le->lock);
1373   - rc = tipc_link_rcv(le->link, skb, &xmitq);
  1367 + if (le->link) {
  1368 + rc = tipc_link_rcv(le->link, skb, &xmitq);
  1369 + skb = NULL;
  1370 + }
1374 1371 spin_unlock_bh(&le->lock);
1375   - skb = NULL;
1376 1372 }
1377   -unlock:
1378   - tipc_node_unlock(n);
  1373 + tipc_node_read_unlock(n);
1379 1374  
  1375 + /* Check/update node state before receiving */
  1376 + if (unlikely(skb)) {
  1377 + tipc_node_write_lock(n);
  1378 + if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
  1379 + if (le->link) {
  1380 + rc = tipc_link_rcv(le->link, skb, &xmitq);
  1381 + skb = NULL;
  1382 + }
  1383 + }
  1384 + tipc_node_write_unlock(n);
  1385 + }
  1386 +
1380 1387 if (unlikely(rc & TIPC_LINK_UP_EVT))
1381 1388 tipc_node_link_up(n, bearer_id, &xmitq);
1382 1389  
1383 1390  
1384 1391  
... ... @@ -1440,15 +1447,15 @@
1440 1447 continue;
1441 1448 }
1442 1449  
1443   - tipc_node_lock(node);
  1450 + tipc_node_read_lock(node);
1444 1451 err = __tipc_nl_add_node(&msg, node);
1445 1452 if (err) {
1446 1453 last_addr = node->addr;
1447   - tipc_node_unlock(node);
  1454 + tipc_node_read_unlock(node);
1448 1455 goto out;
1449 1456 }
1450 1457  
1451   - tipc_node_unlock(node);
  1458 + tipc_node_read_unlock(node);
1452 1459 }
1453 1460 done = 1;
1454 1461 out:
... ... @@ -109,7 +109,7 @@
109 109 struct tipc_node {
110 110 u32 addr;
111 111 struct kref kref;
112   - spinlock_t lock;
  112 + rwlock_t lock;
113 113 struct net *net;
114 114 struct hlist_node hash;
115 115 int active_links[2];
... ... @@ -145,7 +145,8 @@
145 145 bool tipc_node_is_up(struct tipc_node *n);
146 146 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
147 147 char *linkname, size_t len);
148   -void tipc_node_unlock(struct tipc_node *node);
  148 +void tipc_node_read_lock(struct tipc_node *n);
  149 +void tipc_node_read_unlock(struct tipc_node *node);
149 150 int tipc_node_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
150 151 int selector);
151 152 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
... ... @@ -156,11 +157,6 @@
156 157 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
157 158 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
158 159 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
159   -
160   -static inline void tipc_node_lock(struct tipc_node *node)
161   -{
162   - spin_lock_bh(&node->lock);
163   -}
164 160  
165 161 static inline struct tipc_link *node_active_link(struct tipc_node *n, int sel)
166 162 {