Commit 1fc54d8f49c1270c584803437fb7c0ac543588c1
Committed by
David S. Miller
1 parent
edb2c34fb2
Exists in
master
and in
7 other branches
[TIPC]: Fix simple sparse warnings
Tried to run the new tipc stack through sparse. Following patch fixes all cases where 0 was used as replacement of NULL. Use NULL to document this is a pointer and to silence sparse. This brough sparse warning count down with 127 to 24 warnings. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: Per Liden <per.liden@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 21 changed files with 127 additions and 127 deletions Side-by-side Diff
- net/tipc/bcast.c
- net/tipc/bearer.c
- net/tipc/cluster.c
- net/tipc/cluster.h
- net/tipc/config.c
- net/tipc/dbg.c
- net/tipc/eth_media.c
- net/tipc/link.c
- net/tipc/name_distr.c
- net/tipc/name_table.c
- net/tipc/net.c
- net/tipc/node.c
- net/tipc/node.h
- net/tipc/node_subscr.c
- net/tipc/port.c
- net/tipc/ref.c
- net/tipc/ref.h
- net/tipc/socket.c
- net/tipc/subscr.c
- net/tipc/user_reg.c
- net/tipc/zone.c
net/tipc/bcast.c
| ... | ... | @@ -271,7 +271,7 @@ |
| 271 | 271 | msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); |
| 272 | 272 | msg_set_bcast_tag(msg, tipc_own_tag); |
| 273 | 273 | |
| 274 | - if (tipc_bearer_send(&bcbearer->bearer, buf, 0)) { | |
| 274 | + if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) { | |
| 275 | 275 | bcl->stats.sent_nacks++; |
| 276 | 276 | buf_discard(buf); |
| 277 | 277 | } else { |
net/tipc/bearer.c
| ... | ... | @@ -45,10 +45,10 @@ |
| 45 | 45 | |
| 46 | 46 | #define MAX_ADDR_STR 32 |
| 47 | 47 | |
| 48 | -static struct media *media_list = 0; | |
| 48 | +static struct media *media_list = NULL; | |
| 49 | 49 | static u32 media_count = 0; |
| 50 | 50 | |
| 51 | -struct bearer *tipc_bearers = 0; | |
| 51 | +struct bearer *tipc_bearers = NULL; | |
| 52 | 52 | |
| 53 | 53 | /** |
| 54 | 54 | * media_name_valid - validate media name |
| ... | ... | @@ -79,7 +79,7 @@ |
| 79 | 79 | if (!strcmp(m_ptr->name, name)) |
| 80 | 80 | return m_ptr; |
| 81 | 81 | } |
| 82 | - return 0; | |
| 82 | + return NULL; | |
| 83 | 83 | } |
| 84 | 84 | |
| 85 | 85 | /** |
| ... | ... | @@ -287,7 +287,7 @@ |
| 287 | 287 | if (b_ptr->active && (!strcmp(b_ptr->publ.name, name))) |
| 288 | 288 | return b_ptr; |
| 289 | 289 | } |
| 290 | - return 0; | |
| 290 | + return NULL; | |
| 291 | 291 | } |
| 292 | 292 | |
| 293 | 293 | /** |
| ... | ... | @@ -307,7 +307,7 @@ |
| 307 | 307 | if (!strcmp(b_if_name, if_name)) |
| 308 | 308 | return b_ptr; |
| 309 | 309 | } |
| 310 | - return 0; | |
| 310 | + return NULL; | |
| 311 | 311 | } |
| 312 | 312 | |
| 313 | 313 | /** |
| ... | ... | @@ -569,7 +569,7 @@ |
| 569 | 569 | |
| 570 | 570 | int tipc_block_bearer(const char *name) |
| 571 | 571 | { |
| 572 | - struct bearer *b_ptr = 0; | |
| 572 | + struct bearer *b_ptr = NULL; | |
| 573 | 573 | struct link *l_ptr; |
| 574 | 574 | struct link *temp_l_ptr; |
| 575 | 575 | |
| ... | ... | @@ -666,8 +666,8 @@ |
| 666 | 666 | } else { |
| 667 | 667 | kfree(tipc_bearers); |
| 668 | 668 | kfree(media_list); |
| 669 | - tipc_bearers = 0; | |
| 670 | - media_list = 0; | |
| 669 | + tipc_bearers = NULL; | |
| 670 | + media_list = NULL; | |
| 671 | 671 | res = -ENOMEM; |
| 672 | 672 | } |
| 673 | 673 | write_unlock_bh(&tipc_net_lock); |
| ... | ... | @@ -691,8 +691,8 @@ |
| 691 | 691 | } |
| 692 | 692 | kfree(tipc_bearers); |
| 693 | 693 | kfree(media_list); |
| 694 | - tipc_bearers = 0; | |
| 695 | - media_list = 0; | |
| 694 | + tipc_bearers = NULL; | |
| 695 | + media_list = NULL; | |
| 696 | 696 | media_count = 0; |
| 697 | 697 | } |
net/tipc/cluster.c
| ... | ... | @@ -48,7 +48,7 @@ |
| 48 | 48 | u32 lower, u32 upper); |
| 49 | 49 | struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest); |
| 50 | 50 | |
| 51 | -struct node **tipc_local_nodes = 0; | |
| 51 | +struct node **tipc_local_nodes = NULL; | |
| 52 | 52 | struct node_map tipc_cltr_bcast_nodes = {0,{0,}}; |
| 53 | 53 | u32 tipc_highest_allowed_slave = 0; |
| 54 | 54 | |
| ... | ... | @@ -61,7 +61,7 @@ |
| 61 | 61 | |
| 62 | 62 | c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC); |
| 63 | 63 | if (c_ptr == NULL) |
| 64 | - return 0; | |
| 64 | + return NULL; | |
| 65 | 65 | memset(c_ptr, 0, sizeof(*c_ptr)); |
| 66 | 66 | |
| 67 | 67 | c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); |
| ... | ... | @@ -73,7 +73,7 @@ |
| 73 | 73 | c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC); |
| 74 | 74 | if (c_ptr->nodes == NULL) { |
| 75 | 75 | kfree(c_ptr); |
| 76 | - return 0; | |
| 76 | + return NULL; | |
| 77 | 77 | } |
| 78 | 78 | memset(c_ptr->nodes, 0, alloc); |
| 79 | 79 | if (in_own_cluster(addr)) |
| ... | ... | @@ -91,7 +91,7 @@ |
| 91 | 91 | } |
| 92 | 92 | else { |
| 93 | 93 | kfree(c_ptr); |
| 94 | - c_ptr = 0; | |
| 94 | + c_ptr = NULL; | |
| 95 | 95 | } |
| 96 | 96 | |
| 97 | 97 | return c_ptr; |
| ... | ... | @@ -204,7 +204,7 @@ |
| 204 | 204 | |
| 205 | 205 | assert(!in_own_cluster(c_ptr->addr)); |
| 206 | 206 | if (!c_ptr->highest_node) |
| 207 | - return 0; | |
| 207 | + return NULL; | |
| 208 | 208 | |
| 209 | 209 | /* Start entry must be random */ |
| 210 | 210 | while (mask > c_ptr->highest_node) { |
| ... | ... | @@ -222,7 +222,7 @@ |
| 222 | 222 | if (tipc_node_has_active_links(c_ptr->nodes[n_num])) |
| 223 | 223 | return c_ptr->nodes[n_num]; |
| 224 | 224 | } |
| 225 | - return 0; | |
| 225 | + return NULL; | |
| 226 | 226 | } |
| 227 | 227 | |
| 228 | 228 | /* |
net/tipc/cluster.h
net/tipc/config.c
| ... | ... | @@ -683,11 +683,11 @@ |
| 683 | 683 | memset(&mng, 0, sizeof(mng)); |
| 684 | 684 | INIT_LIST_HEAD(&mng.link_subscribers); |
| 685 | 685 | |
| 686 | - res = tipc_attach(&mng.user_ref, 0, 0); | |
| 686 | + res = tipc_attach(&mng.user_ref, NULL, NULL); | |
| 687 | 687 | if (res) |
| 688 | 688 | goto failed; |
| 689 | 689 | |
| 690 | - res = tipc_createport(mng.user_ref, 0, TIPC_CRITICAL_IMPORTANCE, | |
| 690 | + res = tipc_createport(mng.user_ref, NULL, TIPC_CRITICAL_IMPORTANCE, | |
| 691 | 691 | NULL, NULL, NULL, |
| 692 | 692 | NULL, cfg_named_msg_event, NULL, |
| 693 | 693 | NULL, &mng.port_ref); |
net/tipc/dbg.c
| ... | ... | @@ -81,7 +81,7 @@ |
| 81 | 81 | |
| 82 | 82 | pb->crs = pb->buf = raw; |
| 83 | 83 | pb->size = sz; |
| 84 | - pb->next = 0; | |
| 84 | + pb->next = NULL; | |
| 85 | 85 | pb->buf[0] = 0; |
| 86 | 86 | pb->buf[sz-1] = ~0; |
| 87 | 87 | } |
| ... | ... | @@ -216,7 +216,7 @@ |
| 216 | 216 | } |
| 217 | 217 | } |
| 218 | 218 | pb_next = pb->next; |
| 219 | - pb->next = 0; | |
| 219 | + pb->next = NULL; | |
| 220 | 220 | pb = pb_next; |
| 221 | 221 | } |
| 222 | 222 | spin_unlock_bh(&print_lock); |
net/tipc/eth_media.c
| ... | ... | @@ -169,7 +169,7 @@ |
| 169 | 169 | |
| 170 | 170 | static void disable_bearer(struct tipc_bearer *tb_ptr) |
| 171 | 171 | { |
| 172 | - ((struct eth_bearer *)tb_ptr->usr_handle)->bearer = 0; | |
| 172 | + ((struct eth_bearer *)tb_ptr->usr_handle)->bearer = NULL; | |
| 173 | 173 | } |
| 174 | 174 | |
| 175 | 175 | /** |
| ... | ... | @@ -285,7 +285,7 @@ |
| 285 | 285 | for (i = 0; i < MAX_ETH_BEARERS ; i++) { |
| 286 | 286 | if (eth_bearers[i].bearer) { |
| 287 | 287 | eth_bearers[i].bearer->blocked = 1; |
| 288 | - eth_bearers[i].bearer = 0; | |
| 288 | + eth_bearers[i].bearer = NULL; | |
| 289 | 289 | } |
| 290 | 290 | if (eth_bearers[i].dev) { |
| 291 | 291 | dev_remove_pack(ð_bearers[i].tipc_packet_type); |
net/tipc/link.c
| ... | ... | @@ -573,7 +573,7 @@ |
| 573 | 573 | if (win <= 0) |
| 574 | 574 | break; |
| 575 | 575 | list_del_init(&p_ptr->wait_list); |
| 576 | - p_ptr->congested_link = 0; | |
| 576 | + p_ptr->congested_link = NULL; | |
| 577 | 577 | assert(p_ptr->wakeup); |
| 578 | 578 | spin_lock_bh(p_ptr->publ.lock); |
| 579 | 579 | p_ptr->publ.congested = 0; |
| ... | ... | @@ -1355,7 +1355,7 @@ |
| 1355 | 1355 | fragm_crs = 0; |
| 1356 | 1356 | fragm_rest = 0; |
| 1357 | 1357 | sect_rest = 0; |
| 1358 | - sect_crs = 0; | |
| 1358 | + sect_crs = NULL; | |
| 1359 | 1359 | curr_sect = -1; |
| 1360 | 1360 | |
| 1361 | 1361 | /* Prepare reusable fragment header: */ |
| ... | ... | @@ -1549,7 +1549,7 @@ |
| 1549 | 1549 | msg_dbg(buf_msg(buf), ">DEF-PROT>"); |
| 1550 | 1550 | l_ptr->unacked_window = 0; |
| 1551 | 1551 | buf_discard(buf); |
| 1552 | - l_ptr->proto_msg_queue = 0; | |
| 1552 | + l_ptr->proto_msg_queue = NULL; | |
| 1553 | 1553 | return TIPC_OK; |
| 1554 | 1554 | } else { |
| 1555 | 1555 | msg_dbg(buf_msg(buf), "|>DEF-PROT>"); |
| ... | ... | @@ -1860,7 +1860,7 @@ |
| 1860 | 1860 | struct sk_buff **tail, |
| 1861 | 1861 | struct sk_buff *buf) |
| 1862 | 1862 | { |
| 1863 | - struct sk_buff *prev = 0; | |
| 1863 | + struct sk_buff *prev = NULL; | |
| 1864 | 1864 | struct sk_buff *crs = *head; |
| 1865 | 1865 | u32 seq_no = msg_seqno(buf_msg(buf)); |
| 1866 | 1866 | |
| ... | ... | @@ -1953,7 +1953,7 @@ |
| 1953 | 1953 | void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg, |
| 1954 | 1954 | u32 gap, u32 tolerance, u32 priority, u32 ack_mtu) |
| 1955 | 1955 | { |
| 1956 | - struct sk_buff *buf = 0; | |
| 1956 | + struct sk_buff *buf = NULL; | |
| 1957 | 1957 | struct tipc_msg *msg = l_ptr->pmsg; |
| 1958 | 1958 | u32 msg_size = sizeof(l_ptr->proto_msg); |
| 1959 | 1959 | |
| ... | ... | @@ -2426,7 +2426,7 @@ |
| 2426 | 2426 | } |
| 2427 | 2427 | } |
| 2428 | 2428 | exit: |
| 2429 | - *buf = 0; | |
| 2429 | + *buf = NULL; | |
| 2430 | 2430 | buf_discard(tunnel_buf); |
| 2431 | 2431 | return 0; |
| 2432 | 2432 | } |
| 2433 | 2433 | |
| ... | ... | @@ -2586,13 +2586,13 @@ |
| 2586 | 2586 | int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, |
| 2587 | 2587 | struct tipc_msg **m) |
| 2588 | 2588 | { |
| 2589 | - struct sk_buff *prev = 0; | |
| 2589 | + struct sk_buff *prev = NULL; | |
| 2590 | 2590 | struct sk_buff *fbuf = *fb; |
| 2591 | 2591 | struct tipc_msg *fragm = buf_msg(fbuf); |
| 2592 | 2592 | struct sk_buff *pbuf = *pending; |
| 2593 | 2593 | u32 long_msg_seq_no = msg_long_msgno(fragm); |
| 2594 | 2594 | |
| 2595 | - *fb = 0; | |
| 2595 | + *fb = NULL; | |
| 2596 | 2596 | msg_dbg(fragm,"FRG<REC<"); |
| 2597 | 2597 | |
| 2598 | 2598 | /* Is there an incomplete message waiting for this fragment? */ |
| ... | ... | @@ -2670,8 +2670,8 @@ |
| 2670 | 2670 | |
| 2671 | 2671 | static void link_check_defragm_bufs(struct link *l_ptr) |
| 2672 | 2672 | { |
| 2673 | - struct sk_buff *prev = 0; | |
| 2674 | - struct sk_buff *next = 0; | |
| 2673 | + struct sk_buff *prev = NULL; | |
| 2674 | + struct sk_buff *next = NULL; | |
| 2675 | 2675 | struct sk_buff *buf = l_ptr->defragm_buf; |
| 2676 | 2676 | |
| 2677 | 2677 | if (!buf) |
| 2678 | 2678 | |
| 2679 | 2679 | |
| 2680 | 2680 | |
| ... | ... | @@ -2750,19 +2750,19 @@ |
| 2750 | 2750 | struct link *l_ptr; |
| 2751 | 2751 | |
| 2752 | 2752 | if (!link_name_validate(name, &link_name_parts)) |
| 2753 | - return 0; | |
| 2753 | + return NULL; | |
| 2754 | 2754 | |
| 2755 | 2755 | b_ptr = tipc_bearer_find_interface(link_name_parts.if_local); |
| 2756 | 2756 | if (!b_ptr) |
| 2757 | - return 0; | |
| 2757 | + return NULL; | |
| 2758 | 2758 | |
| 2759 | 2759 | *node = tipc_node_find(link_name_parts.addr_peer); |
| 2760 | 2760 | if (!*node) |
| 2761 | - return 0; | |
| 2761 | + return NULL; | |
| 2762 | 2762 | |
| 2763 | 2763 | l_ptr = (*node)->links[b_ptr->identity]; |
| 2764 | 2764 | if (!l_ptr || strcmp(l_ptr->name, name)) |
| 2765 | - return 0; | |
| 2765 | + return NULL; | |
| 2766 | 2766 | |
| 2767 | 2767 | return l_ptr; |
| 2768 | 2768 | } |
net/tipc/name_distr.c
| ... | ... | @@ -168,8 +168,8 @@ |
| 168 | 168 | void tipc_named_node_up(unsigned long node) |
| 169 | 169 | { |
| 170 | 170 | struct publication *publ; |
| 171 | - struct distr_item *item = 0; | |
| 172 | - struct sk_buff *buf = 0; | |
| 171 | + struct distr_item *item = NULL; | |
| 172 | + struct sk_buff *buf = NULL; | |
| 173 | 173 | u32 left = 0; |
| 174 | 174 | u32 rest; |
| 175 | 175 | u32 max_item_buf; |
| ... | ... | @@ -200,7 +200,7 @@ |
| 200 | 200 | "<%u.%u.%u>\n", tipc_zone(node), |
| 201 | 201 | tipc_cluster(node), tipc_node(node)); |
| 202 | 202 | tipc_link_send(buf, node, node); |
| 203 | - buf = 0; | |
| 203 | + buf = NULL; | |
| 204 | 204 | } |
| 205 | 205 | } |
| 206 | 206 | exit: |
net/tipc/name_table.c
| ... | ... | @@ -121,7 +121,7 @@ |
| 121 | 121 | (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC); |
| 122 | 122 | if (publ == NULL) { |
| 123 | 123 | warn("Memory squeeze; failed to create publication\n"); |
| 124 | - return 0; | |
| 124 | + return NULL; | |
| 125 | 125 | } |
| 126 | 126 | |
| 127 | 127 | memset(publ, 0, sizeof(*publ)); |
| ... | ... | @@ -168,7 +168,7 @@ |
| 168 | 168 | warn("Memory squeeze; failed to create name sequence\n"); |
| 169 | 169 | kfree(nseq); |
| 170 | 170 | kfree(sseq); |
| 171 | - return 0; | |
| 171 | + return NULL; | |
| 172 | 172 | } |
| 173 | 173 | |
| 174 | 174 | memset(nseq, 0, sizeof(*nseq)); |
| ... | ... | @@ -207,7 +207,7 @@ |
| 207 | 207 | else |
| 208 | 208 | return &sseqs[mid]; |
| 209 | 209 | } |
| 210 | - return 0; | |
| 210 | + return NULL; | |
| 211 | 211 | } |
| 212 | 212 | |
| 213 | 213 | /** |
| ... | ... | @@ -263,7 +263,7 @@ |
| 263 | 263 | |
| 264 | 264 | if ((sseq->lower != lower) || (sseq->upper != upper)) { |
| 265 | 265 | warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper); |
| 266 | - return 0; | |
| 266 | + return NULL; | |
| 267 | 267 | } |
| 268 | 268 | } else { |
| 269 | 269 | u32 inspos; |
| ... | ... | @@ -278,7 +278,7 @@ |
| 278 | 278 | if ((inspos < nseq->first_free) && |
| 279 | 279 | (upper >= nseq->sseqs[inspos].lower)) { |
| 280 | 280 | warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper); |
| 281 | - return 0; | |
| 281 | + return NULL; | |
| 282 | 282 | } |
| 283 | 283 | |
| 284 | 284 | /* Ensure there is space for new sub-sequence */ |
| ... | ... | @@ -294,7 +294,7 @@ |
| 294 | 294 | nseq->alloc *= 2; |
| 295 | 295 | } else { |
| 296 | 296 | warn("Memory squeeze; failed to create sub-sequence\n"); |
| 297 | - return 0; | |
| 297 | + return NULL; | |
| 298 | 298 | } |
| 299 | 299 | } |
| 300 | 300 | dbg("Have %u sseqs for type %u\n", nseq->alloc, type); |
| ... | ... | @@ -319,7 +319,7 @@ |
| 319 | 319 | |
| 320 | 320 | publ = publ_create(type, lower, upper, scope, node, port, key); |
| 321 | 321 | if (!publ) |
| 322 | - return 0; | |
| 322 | + return NULL; | |
| 323 | 323 | dbg("inserting publ %x, node=%x publ->node=%x, subscr->node=%x\n", |
| 324 | 324 | publ, node, publ->node, publ->subscr.node); |
| 325 | 325 | |
| ... | ... | @@ -394,7 +394,7 @@ |
| 394 | 394 | i, &nseq->sseqs[i], nseq->sseqs[i].lower, |
| 395 | 395 | nseq->sseqs[i].upper); |
| 396 | 396 | } |
| 397 | - return 0; | |
| 397 | + return NULL; | |
| 398 | 398 | } |
| 399 | 399 | dbg("nameseq_remove: seq: %x, sseq %x, <%u,%u> key %u\n", |
| 400 | 400 | nseq, sseq, nseq->type, inst, key); |
| ... | ... | @@ -413,7 +413,7 @@ |
| 413 | 413 | prev->zone_list_next = publ->zone_list_next; |
| 414 | 414 | sseq->zone_list = publ->zone_list_next; |
| 415 | 415 | } else { |
| 416 | - sseq->zone_list = 0; | |
| 416 | + sseq->zone_list = NULL; | |
| 417 | 417 | } |
| 418 | 418 | |
| 419 | 419 | if (in_own_cluster(node)) { |
| ... | ... | @@ -431,7 +431,7 @@ |
| 431 | 431 | prev->cluster_list_next = publ->cluster_list_next; |
| 432 | 432 | sseq->cluster_list = publ->cluster_list_next; |
| 433 | 433 | } else { |
| 434 | - sseq->cluster_list = 0; | |
| 434 | + sseq->cluster_list = NULL; | |
| 435 | 435 | } |
| 436 | 436 | } |
| 437 | 437 | |
| ... | ... | @@ -450,7 +450,7 @@ |
| 450 | 450 | prev->node_list_next = publ->node_list_next; |
| 451 | 451 | sseq->node_list = publ->node_list_next; |
| 452 | 452 | } else { |
| 453 | - sseq->node_list = 0; | |
| 453 | + sseq->node_list = NULL; | |
| 454 | 454 | } |
| 455 | 455 | } |
| 456 | 456 | assert(!publ->node || (publ->node == node)); |
| ... | ... | @@ -535,7 +535,7 @@ |
| 535 | 535 | } |
| 536 | 536 | } |
| 537 | 537 | |
| 538 | - return 0; | |
| 538 | + return NULL; | |
| 539 | 539 | }; |
| 540 | 540 | |
| 541 | 541 | struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, |
| ... | ... | @@ -547,7 +547,7 @@ |
| 547 | 547 | if (lower > upper) { |
| 548 | 548 | warn("Failed to publish illegal <%u,%u,%u>\n", |
| 549 | 549 | type, lower, upper); |
| 550 | - return 0; | |
| 550 | + return NULL; | |
| 551 | 551 | } |
| 552 | 552 | |
| 553 | 553 | dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node); |
| ... | ... | @@ -556,7 +556,7 @@ |
| 556 | 556 | dbg("tipc_nametbl_insert_publ: created %x\n", seq); |
| 557 | 557 | } |
| 558 | 558 | if (!seq) |
| 559 | - return 0; | |
| 559 | + return NULL; | |
| 560 | 560 | |
| 561 | 561 | assert(seq->type == type); |
| 562 | 562 | return tipc_nameseq_insert_publ(seq, type, lower, upper, |
| ... | ... | @@ -570,7 +570,7 @@ |
| 570 | 570 | struct name_seq *seq = nametbl_find_seq(type); |
| 571 | 571 | |
| 572 | 572 | if (!seq) |
| 573 | - return 0; | |
| 573 | + return NULL; | |
| 574 | 574 | |
| 575 | 575 | dbg("Withdrawing <%u,%u> from %x\n", type, lower, node); |
| 576 | 576 | publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key); |
| ... | ... | @@ -594,7 +594,7 @@ |
| 594 | 594 | u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) |
| 595 | 595 | { |
| 596 | 596 | struct sub_seq *sseq; |
| 597 | - struct publication *publ = 0; | |
| 597 | + struct publication *publ = NULL; | |
| 598 | 598 | struct name_seq *seq; |
| 599 | 599 | u32 ref; |
| 600 | 600 | |
| 601 | 601 | |
| ... | ... | @@ -740,12 +740,12 @@ |
| 740 | 740 | if (table.local_publ_count >= tipc_max_publications) { |
| 741 | 741 | warn("Failed publish: max %u local publication\n", |
| 742 | 742 | tipc_max_publications); |
| 743 | - return 0; | |
| 743 | + return NULL; | |
| 744 | 744 | } |
| 745 | 745 | if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) { |
| 746 | 746 | warn("Failed to publish reserved name <%u,%u,%u>\n", |
| 747 | 747 | type, lower, upper); |
| 748 | - return 0; | |
| 748 | + return NULL; | |
| 749 | 749 | } |
| 750 | 750 | |
| 751 | 751 | write_lock_bh(&tipc_nametbl_lock); |
net/tipc/net.c
| ... | ... | @@ -116,7 +116,7 @@ |
| 116 | 116 | */ |
| 117 | 117 | |
| 118 | 118 | rwlock_t tipc_net_lock = RW_LOCK_UNLOCKED; |
| 119 | -struct network tipc_net = { 0 }; | |
| 119 | +struct network tipc_net = { NULL }; | |
| 120 | 120 | |
| 121 | 121 | struct node *tipc_net_select_remote_node(u32 addr, u32 ref) |
| 122 | 122 | { |
| ... | ... | @@ -181,7 +181,7 @@ |
| 181 | 181 | tipc_zone_delete(tipc_net.zones[z_num]); |
| 182 | 182 | } |
| 183 | 183 | kfree(tipc_net.zones); |
| 184 | - tipc_net.zones = 0; | |
| 184 | + tipc_net.zones = NULL; | |
| 185 | 185 | } |
| 186 | 186 | |
| 187 | 187 | static void net_route_named_msg(struct sk_buff *buf) |
net/tipc/node.c
| ... | ... | @@ -155,7 +155,7 @@ |
| 155 | 155 | u32 i; |
| 156 | 156 | u32 highest_prio = 0; |
| 157 | 157 | |
| 158 | - active[0] = active[1] = 0; | |
| 158 | + active[0] = active[1] = NULL; | |
| 159 | 159 | |
| 160 | 160 | for (i = 0; i < MAX_BEARERS; i++) { |
| 161 | 161 | struct link *l_ptr = n_ptr->links[i]; |
| ... | ... | @@ -240,7 +240,7 @@ |
| 240 | 240 | |
| 241 | 241 | err("Attempt to create third link to %s\n", |
| 242 | 242 | addr_string_fill(addr_string, n_ptr->addr)); |
| 243 | - return 0; | |
| 243 | + return NULL; | |
| 244 | 244 | } |
| 245 | 245 | |
| 246 | 246 | if (!n_ptr->links[bearer_id]) { |
| 247 | 247 | |
| ... | ... | @@ -253,12 +253,12 @@ |
| 253 | 253 | l_ptr->b_ptr->publ.name, |
| 254 | 254 | addr_string_fill(addr_string, l_ptr->addr)); |
| 255 | 255 | } |
| 256 | - return 0; | |
| 256 | + return NULL; | |
| 257 | 257 | } |
| 258 | 258 | |
| 259 | 259 | void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr) |
| 260 | 260 | { |
| 261 | - n_ptr->links[l_ptr->b_ptr->identity] = 0; | |
| 261 | + n_ptr->links[l_ptr->b_ptr->identity] = NULL; | |
| 262 | 262 | tipc_net.zones[tipc_zone(l_ptr->addr)]->links--; |
| 263 | 263 | n_ptr->link_cnt--; |
| 264 | 264 | } |
| ... | ... | @@ -424,7 +424,7 @@ |
| 424 | 424 | |
| 425 | 425 | /* Notify subscribers */ |
| 426 | 426 | list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) { |
| 427 | - ns->node = 0; | |
| 427 | + ns->node = NULL; | |
| 428 | 428 | list_del_init(&ns->nodesub_list); |
| 429 | 429 | tipc_k_signal((Handler)ns->handle_node_down, |
| 430 | 430 | (unsigned long)ns->usr_handle); |
| ... | ... | @@ -443,7 +443,7 @@ |
| 443 | 443 | u32 router_addr; |
| 444 | 444 | |
| 445 | 445 | if (!tipc_addr_domain_valid(addr)) |
| 446 | - return 0; | |
| 446 | + return NULL; | |
| 447 | 447 | |
| 448 | 448 | /* Look for direct link to destination processsor */ |
| 449 | 449 | n_ptr = tipc_node_find(addr); |
| ... | ... | @@ -452,7 +452,7 @@ |
| 452 | 452 | |
| 453 | 453 | /* Cluster local system nodes *must* have direct links */ |
| 454 | 454 | if (!is_slave(addr) && in_own_cluster(addr)) |
| 455 | - return 0; | |
| 455 | + return NULL; | |
| 456 | 456 | |
| 457 | 457 | /* Look for cluster local router with direct link to node */ |
| 458 | 458 | router_addr = tipc_node_select_router(n_ptr, selector); |
| ... | ... | @@ -462,7 +462,7 @@ |
| 462 | 462 | /* Slave nodes can only be accessed within own cluster via a |
| 463 | 463 | known router with direct link -- if no router was found,give up */ |
| 464 | 464 | if (is_slave(addr)) |
| 465 | - return 0; | |
| 465 | + return NULL; | |
| 466 | 466 | |
| 467 | 467 | /* Inter zone/cluster -- find any direct link to remote cluster */ |
| 468 | 468 | addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); |
| ... | ... | @@ -475,7 +475,7 @@ |
| 475 | 475 | if (router_addr) |
| 476 | 476 | return tipc_node_select(router_addr, selector); |
| 477 | 477 | |
| 478 | - return 0; | |
| 478 | + return NULL; | |
| 479 | 479 | } |
| 480 | 480 | |
| 481 | 481 | /** |
net/tipc/node.h
net/tipc/node_subscr.c
| ... | ... | @@ -47,7 +47,7 @@ |
| 47 | 47 | void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr, |
| 48 | 48 | void *usr_handle, net_ev_handler handle_down) |
| 49 | 49 | { |
| 50 | - node_sub->node = 0; | |
| 50 | + node_sub->node = NULL; | |
| 51 | 51 | if (addr == tipc_own_addr) |
| 52 | 52 | return; |
| 53 | 53 | if (!tipc_addr_node_valid(addr)) { |
net/tipc/port.c
| ... | ... | @@ -54,8 +54,8 @@ |
| 54 | 54 | |
| 55 | 55 | #define MAX_REJECT_SIZE 1024 |
| 56 | 56 | |
| 57 | -static struct sk_buff *msg_queue_head = 0; | |
| 58 | -static struct sk_buff *msg_queue_tail = 0; | |
| 57 | +static struct sk_buff *msg_queue_head = NULL; | |
| 58 | +static struct sk_buff *msg_queue_tail = NULL; | |
| 59 | 59 | |
| 60 | 60 | spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED; |
| 61 | 61 | static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED; |
| 62 | 62 | |
| ... | ... | @@ -258,11 +258,11 @@ |
| 258 | 258 | p_ptr->publ.usr_handle = usr_handle; |
| 259 | 259 | INIT_LIST_HEAD(&p_ptr->wait_list); |
| 260 | 260 | INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); |
| 261 | - p_ptr->congested_link = 0; | |
| 261 | + p_ptr->congested_link = NULL; | |
| 262 | 262 | p_ptr->max_pkt = MAX_PKT_DEFAULT; |
| 263 | 263 | p_ptr->dispatcher = dispatcher; |
| 264 | 264 | p_ptr->wakeup = wakeup; |
| 265 | - p_ptr->user_port = 0; | |
| 265 | + p_ptr->user_port = NULL; | |
| 266 | 266 | k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref); |
| 267 | 267 | spin_lock_bh(&tipc_port_list_lock); |
| 268 | 268 | INIT_LIST_HEAD(&p_ptr->publications); |
| 269 | 269 | |
| ... | ... | @@ -276,9 +276,9 @@ |
| 276 | 276 | int tipc_deleteport(u32 ref) |
| 277 | 277 | { |
| 278 | 278 | struct port *p_ptr; |
| 279 | - struct sk_buff *buf = 0; | |
| 279 | + struct sk_buff *buf = NULL; | |
| 280 | 280 | |
| 281 | - tipc_withdraw(ref, 0, 0); | |
| 281 | + tipc_withdraw(ref, 0, NULL); | |
| 282 | 282 | p_ptr = tipc_port_lock(ref); |
| 283 | 283 | if (!p_ptr) |
| 284 | 284 | return -EINVAL; |
| ... | ... | @@ -329,7 +329,7 @@ |
| 329 | 329 | |
| 330 | 330 | p_ptr = tipc_port_lock(ref); |
| 331 | 331 | if (!p_ptr) |
| 332 | - return 0; | |
| 332 | + return NULL; | |
| 333 | 333 | handle = p_ptr->publ.usr_handle; |
| 334 | 334 | tipc_port_unlock(p_ptr); |
| 335 | 335 | return handle; |
| ... | ... | @@ -475,7 +475,7 @@ |
| 475 | 475 | |
| 476 | 476 | /* send self-abort message when rejecting on a connected port */ |
| 477 | 477 | if (msg_connected(msg)) { |
| 478 | - struct sk_buff *abuf = 0; | |
| 478 | + struct sk_buff *abuf = NULL; | |
| 479 | 479 | struct port *p_ptr = tipc_port_lock(msg_destport(msg)); |
| 480 | 480 | |
| 481 | 481 | if (p_ptr) { |
| ... | ... | @@ -510,7 +510,7 @@ |
| 510 | 510 | static void port_timeout(unsigned long ref) |
| 511 | 511 | { |
| 512 | 512 | struct port *p_ptr = tipc_port_lock(ref); |
| 513 | - struct sk_buff *buf = 0; | |
| 513 | + struct sk_buff *buf = NULL; | |
| 514 | 514 | |
| 515 | 515 | if (!p_ptr || !p_ptr->publ.connected) |
| 516 | 516 | return; |
| ... | ... | @@ -540,7 +540,7 @@ |
| 540 | 540 | static void port_handle_node_down(unsigned long ref) |
| 541 | 541 | { |
| 542 | 542 | struct port *p_ptr = tipc_port_lock(ref); |
| 543 | - struct sk_buff* buf = 0; | |
| 543 | + struct sk_buff* buf = NULL; | |
| 544 | 544 | |
| 545 | 545 | if (!p_ptr) |
| 546 | 546 | return; |
| ... | ... | @@ -555,7 +555,7 @@ |
| 555 | 555 | u32 imp = msg_importance(&p_ptr->publ.phdr); |
| 556 | 556 | |
| 557 | 557 | if (!p_ptr->publ.connected) |
| 558 | - return 0; | |
| 558 | + return NULL; | |
| 559 | 559 | if (imp < TIPC_CRITICAL_IMPORTANCE) |
| 560 | 560 | imp++; |
| 561 | 561 | return port_build_proto_msg(p_ptr->publ.ref, |
| ... | ... | @@ -575,7 +575,7 @@ |
| 575 | 575 | u32 imp = msg_importance(&p_ptr->publ.phdr); |
| 576 | 576 | |
| 577 | 577 | if (!p_ptr->publ.connected) |
| 578 | - return 0; | |
| 578 | + return NULL; | |
| 579 | 579 | if (imp < TIPC_CRITICAL_IMPORTANCE) |
| 580 | 580 | imp++; |
| 581 | 581 | return port_build_proto_msg(port_peerport(p_ptr), |
| ... | ... | @@ -594,8 +594,8 @@ |
| 594 | 594 | struct tipc_msg *msg = buf_msg(buf); |
| 595 | 595 | struct port *p_ptr = tipc_port_lock(msg_destport(msg)); |
| 596 | 596 | u32 err = TIPC_OK; |
| 597 | - struct sk_buff *r_buf = 0; | |
| 598 | - struct sk_buff *abort_buf = 0; | |
| 597 | + struct sk_buff *r_buf = NULL; | |
| 598 | + struct sk_buff *abort_buf = NULL; | |
| 599 | 599 | |
| 600 | 600 | msg_dbg(msg, "PORT<RECV<:"); |
| 601 | 601 | |
| ... | ... | @@ -804,7 +804,7 @@ |
| 804 | 804 | |
| 805 | 805 | spin_lock_bh(&queue_lock); |
| 806 | 806 | buf = msg_queue_head; |
| 807 | - msg_queue_head = 0; | |
| 807 | + msg_queue_head = NULL; | |
| 808 | 808 | spin_unlock_bh(&queue_lock); |
| 809 | 809 | |
| 810 | 810 | while (buf) { |
| ... | ... | @@ -991,8 +991,8 @@ |
| 991 | 991 | { |
| 992 | 992 | struct port *p_ptr; |
| 993 | 993 | struct user_port *up_ptr; |
| 994 | - tipc_continue_event cb = 0; | |
| 995 | - void *uh = 0; | |
| 994 | + tipc_continue_event cb = NULL; | |
| 995 | + void *uh = NULL; | |
| 996 | 996 | |
| 997 | 997 | p_ptr = tipc_port_lock(ref); |
| 998 | 998 | if (p_ptr) { |
| ... | ... | @@ -1016,7 +1016,7 @@ |
| 1016 | 1016 | void tipc_acknowledge(u32 ref, u32 ack) |
| 1017 | 1017 | { |
| 1018 | 1018 | struct port *p_ptr; |
| 1019 | - struct sk_buff *buf = 0; | |
| 1019 | + struct sk_buff *buf = NULL; | |
| 1020 | 1020 | |
| 1021 | 1021 | p_ptr = tipc_port_lock(ref); |
| 1022 | 1022 | if (!p_ptr) |
| ... | ... | @@ -1062,7 +1062,7 @@ |
| 1062 | 1062 | if (up_ptr == NULL) { |
| 1063 | 1063 | return -ENOMEM; |
| 1064 | 1064 | } |
| 1065 | - ref = tipc_createport_raw(0, port_dispatcher, port_wakeup, importance); | |
| 1065 | + ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance); | |
| 1066 | 1066 | p_ptr = tipc_port_lock(ref); |
| 1067 | 1067 | if (!p_ptr) { |
| 1068 | 1068 | kfree(up_ptr); |
| ... | ... | @@ -1273,7 +1273,7 @@ |
| 1273 | 1273 | int tipc_shutdown(u32 ref) |
| 1274 | 1274 | { |
| 1275 | 1275 | struct port *p_ptr; |
| 1276 | - struct sk_buff *buf = 0; | |
| 1276 | + struct sk_buff *buf = NULL; | |
| 1277 | 1277 | |
| 1278 | 1278 | p_ptr = tipc_port_lock(ref); |
| 1279 | 1279 | if (!p_ptr) |
net/tipc/ref.c
| ... | ... | @@ -61,7 +61,7 @@ |
| 61 | 61 | * because entry 0's reference field has the form XXXX|1--1. |
| 62 | 62 | */ |
| 63 | 63 | |
| 64 | -struct ref_table tipc_ref_table = { 0 }; | |
| 64 | +struct ref_table tipc_ref_table = { NULL }; | |
| 65 | 65 | |
| 66 | 66 | static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED; |
| 67 | 67 | |
| ... | ... | @@ -86,7 +86,7 @@ |
| 86 | 86 | write_lock_bh(&ref_table_lock); |
| 87 | 87 | index_mask = sz - 1; |
| 88 | 88 | for (i = sz - 1; i >= 0; i--) { |
| 89 | - table[i].object = 0; | |
| 89 | + table[i].object = NULL; | |
| 90 | 90 | table[i].lock = SPIN_LOCK_UNLOCKED; |
| 91 | 91 | table[i].data.next_plus_upper = (start & ~index_mask) + i - 1; |
| 92 | 92 | } |
| ... | ... | @@ -108,7 +108,7 @@ |
| 108 | 108 | return; |
| 109 | 109 | |
| 110 | 110 | vfree(tipc_ref_table.entries); |
| 111 | - tipc_ref_table.entries = 0; | |
| 111 | + tipc_ref_table.entries = NULL; | |
| 112 | 112 | } |
| 113 | 113 | |
| 114 | 114 | /** |
| ... | ... | @@ -173,7 +173,7 @@ |
| 173 | 173 | assert(entry->data.reference == ref); |
| 174 | 174 | |
| 175 | 175 | /* mark entry as unused */ |
| 176 | - entry->object = 0; | |
| 176 | + entry->object = NULL; | |
| 177 | 177 | if (tipc_ref_table.first_free == 0) |
| 178 | 178 | tipc_ref_table.first_free = index; |
| 179 | 179 | else |
net/tipc/ref.h
| ... | ... | @@ -92,7 +92,7 @@ |
| 92 | 92 | return r->object; |
| 93 | 93 | spin_unlock_bh(&r->lock); |
| 94 | 94 | } |
| 95 | - return 0; | |
| 95 | + return NULL; | |
| 96 | 96 | } |
| 97 | 97 | |
| 98 | 98 | /** |
| ... | ... | @@ -125,7 +125,7 @@ |
| 125 | 125 | if (likely(r->data.reference == ref)) |
| 126 | 126 | return r->object; |
| 127 | 127 | } |
| 128 | - return 0; | |
| 128 | + return NULL; | |
| 129 | 129 | } |
| 130 | 130 | |
| 131 | 131 | #endif |
net/tipc/socket.c
| ... | ... | @@ -178,7 +178,7 @@ |
| 178 | 178 | if (unlikely(protocol != 0)) |
| 179 | 179 | return -EPROTONOSUPPORT; |
| 180 | 180 | |
| 181 | - ref = tipc_createport_raw(0, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE); | |
| 181 | + ref = tipc_createport_raw(NULL, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE); | |
| 182 | 182 | if (unlikely(!ref)) |
| 183 | 183 | return -ENOMEM; |
| 184 | 184 | |
| ... | ... | @@ -265,7 +265,7 @@ |
| 265 | 265 | sock_lock(tsock); |
| 266 | 266 | buf = skb_dequeue(&sk->sk_receive_queue); |
| 267 | 267 | if (!buf) |
| 268 | - tsock->p->usr_handle = 0; | |
| 268 | + tsock->p->usr_handle = NULL; | |
| 269 | 269 | sock_unlock(tsock); |
| 270 | 270 | if (!buf) |
| 271 | 271 | break; |
| ... | ... | @@ -319,7 +319,7 @@ |
| 319 | 319 | return -ERESTARTSYS; |
| 320 | 320 | |
| 321 | 321 | if (unlikely(!uaddr_len)) { |
| 322 | - res = tipc_withdraw(tsock->p->ref, 0, 0); | |
| 322 | + res = tipc_withdraw(tsock->p->ref, 0, NULL); | |
| 323 | 323 | goto exit; |
| 324 | 324 | } |
| 325 | 325 | |
| ... | ... | @@ -1226,7 +1226,7 @@ |
| 1226 | 1226 | { |
| 1227 | 1227 | struct tipc_sock *tsock = tipc_sk(sock->sk); |
| 1228 | 1228 | struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; |
| 1229 | - struct msghdr m = {0,}; | |
| 1229 | + struct msghdr m = {NULL,}; | |
| 1230 | 1230 | struct sk_buff *buf; |
| 1231 | 1231 | struct tipc_msg *msg; |
| 1232 | 1232 | int res; |
| ... | ... | @@ -1251,7 +1251,7 @@ |
| 1251 | 1251 | /* Send a 'SYN-' to destination */ |
| 1252 | 1252 | |
| 1253 | 1253 | m.msg_name = dest; |
| 1254 | - if ((res = send_msg(0, sock, &m, 0)) < 0) { | |
| 1254 | + if ((res = send_msg(NULL, sock, &m, 0)) < 0) { | |
| 1255 | 1255 | sock->state = SS_DISCONNECTING; |
| 1256 | 1256 | return res; |
| 1257 | 1257 | } |
| 1258 | 1258 | |
| ... | ... | @@ -1367,9 +1367,9 @@ |
| 1367 | 1367 | |
| 1368 | 1368 | msg_dbg(msg,"<ACC<: "); |
| 1369 | 1369 | if (!msg_data_sz(msg)) { |
| 1370 | - struct msghdr m = {0,}; | |
| 1370 | + struct msghdr m = {NULL,}; | |
| 1371 | 1371 | |
| 1372 | - send_packet(0, newsock, &m, 0); | |
| 1372 | + send_packet(NULL, newsock, &m, 0); | |
| 1373 | 1373 | advance_queue(tsock); |
| 1374 | 1374 | } else { |
| 1375 | 1375 | sock_lock(tsock); |
net/tipc/subscr.c
| ... | ... | @@ -381,7 +381,7 @@ |
| 381 | 381 | struct tipc_name_seq const *dest) |
| 382 | 382 | { |
| 383 | 383 | struct subscriber *subscriber; |
| 384 | - struct iovec msg_sect = {0, 0}; | |
| 384 | + struct iovec msg_sect = {NULL, 0}; | |
| 385 | 385 | spinlock_t *subscriber_lock; |
| 386 | 386 | |
| 387 | 387 | dbg("subscr_named_msg_event: orig = %x own = %x,\n", |
| 388 | 388 | |
| 389 | 389 | |
| ... | ... | @@ -413,13 +413,13 @@ |
| 413 | 413 | tipc_createport(topsrv.user_ref, |
| 414 | 414 | (void *)(unsigned long)subscriber->ref, |
| 415 | 415 | importance, |
| 416 | - 0, | |
| 417 | - 0, | |
| 416 | + NULL, | |
| 417 | + NULL, | |
| 418 | 418 | subscr_conn_shutdown_event, |
| 419 | - 0, | |
| 420 | - 0, | |
| 419 | + NULL, | |
| 420 | + NULL, | |
| 421 | 421 | subscr_conn_msg_event, |
| 422 | - 0, | |
| 422 | + NULL, | |
| 423 | 423 | &subscriber->port_ref); |
| 424 | 424 | if (subscriber->port_ref == 0) { |
| 425 | 425 | warn("Memory squeeze; failed to create subscription port\n"); |
| 426 | 426 | |
| 427 | 427 | |
| 428 | 428 | |
| ... | ... | @@ -461,22 +461,22 @@ |
| 461 | 461 | INIT_LIST_HEAD(&topsrv.subscriber_list); |
| 462 | 462 | |
| 463 | 463 | spin_lock_bh(&topsrv.lock); |
| 464 | - res = tipc_attach(&topsrv.user_ref, 0, 0); | |
| 464 | + res = tipc_attach(&topsrv.user_ref, NULL, NULL); | |
| 465 | 465 | if (res) { |
| 466 | 466 | spin_unlock_bh(&topsrv.lock); |
| 467 | 467 | return res; |
| 468 | 468 | } |
| 469 | 469 | |
| 470 | 470 | res = tipc_createport(topsrv.user_ref, |
| 471 | - 0, | |
| 471 | + NULL, | |
| 472 | 472 | TIPC_CRITICAL_IMPORTANCE, |
| 473 | - 0, | |
| 474 | - 0, | |
| 475 | - 0, | |
| 476 | - 0, | |
| 473 | + NULL, | |
| 474 | + NULL, | |
| 475 | + NULL, | |
| 476 | + NULL, | |
| 477 | 477 | subscr_named_msg_event, |
| 478 | - 0, | |
| 479 | - 0, | |
| 478 | + NULL, | |
| 479 | + NULL, | |
| 480 | 480 | &topsrv.setup_port); |
| 481 | 481 | if (res) |
| 482 | 482 | goto failed; |
net/tipc/user_reg.c
| ... | ... | @@ -65,7 +65,7 @@ |
| 65 | 65 | #define MAX_USERID 64 |
| 66 | 66 | #define USER_LIST_SIZE ((MAX_USERID + 1) * sizeof(struct tipc_user)) |
| 67 | 67 | |
| 68 | -static struct tipc_user *users = 0; | |
| 68 | +static struct tipc_user *users = NULL; | |
| 69 | 69 | static u32 next_free_user = MAX_USERID + 1; |
| 70 | 70 | static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED; |
| 71 | 71 | |
| ... | ... | @@ -149,7 +149,7 @@ |
| 149 | 149 | reg_callback(&users[id]); |
| 150 | 150 | } |
| 151 | 151 | kfree(users); |
| 152 | - users = 0; | |
| 152 | + users = NULL; | |
| 153 | 153 | } |
| 154 | 154 | |
| 155 | 155 | /** |
net/tipc/zone.c
| ... | ... | @@ -44,11 +44,11 @@ |
| 44 | 44 | |
| 45 | 45 | struct _zone *tipc_zone_create(u32 addr) |
| 46 | 46 | { |
| 47 | - struct _zone *z_ptr = 0; | |
| 47 | + struct _zone *z_ptr = NULL; | |
| 48 | 48 | u32 z_num; |
| 49 | 49 | |
| 50 | 50 | if (!tipc_addr_domain_valid(addr)) |
| 51 | - return 0; | |
| 51 | + return NULL; | |
| 52 | 52 | |
| 53 | 53 | z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC); |
| 54 | 54 | if (z_ptr != NULL) { |
| 55 | 55 | |
| ... | ... | @@ -114,10 +114,10 @@ |
| 114 | 114 | u32 c_num; |
| 115 | 115 | |
| 116 | 116 | if (!z_ptr) |
| 117 | - return 0; | |
| 117 | + return NULL; | |
| 118 | 118 | c_ptr = z_ptr->clusters[tipc_cluster(addr)]; |
| 119 | 119 | if (!c_ptr) |
| 120 | - return 0; | |
| 120 | + return NULL; | |
| 121 | 121 | n_ptr = tipc_cltr_select_node(c_ptr, ref); |
| 122 | 122 | if (n_ptr) |
| 123 | 123 | return n_ptr; |
| 124 | 124 | |
| ... | ... | @@ -126,12 +126,12 @@ |
| 126 | 126 | for (c_num = 1; c_num <= tipc_max_clusters; c_num++) { |
| 127 | 127 | c_ptr = z_ptr->clusters[c_num]; |
| 128 | 128 | if (!c_ptr) |
| 129 | - return 0; | |
| 129 | + return NULL; | |
| 130 | 130 | n_ptr = tipc_cltr_select_node(c_ptr, ref); |
| 131 | 131 | if (n_ptr) |
| 132 | 132 | return n_ptr; |
| 133 | 133 | } |
| 134 | - return 0; | |
| 134 | + return NULL; | |
| 135 | 135 | } |
| 136 | 136 | |
| 137 | 137 | u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref) |