Commit cc7ec456f82da7f89a5b376e613b3ac4311b3e9a
Committed by
David S. Miller
1 parent
7180a03118
Exists in
master
and in
7 other branches
net_sched: cleanups
Cleanup net/sched code to current CodingStyle and practices. Reduce inline abuse Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 41 changed files with 842 additions and 801 deletions Side-by-side Diff
- net/sched/act_api.c
- net/sched/act_csum.c
- net/sched/act_gact.c
- net/sched/act_ipt.c
- net/sched/act_mirred.c
- net/sched/act_nat.c
- net/sched/act_pedit.c
- net/sched/act_police.c
- net/sched/act_simple.c
- net/sched/act_skbedit.c
- net/sched/cls_api.c
- net/sched/cls_basic.c
- net/sched/cls_cgroup.c
- net/sched/cls_flow.c
- net/sched/cls_fw.c
- net/sched/cls_route.c
- net/sched/cls_rsvp.h
- net/sched/cls_tcindex.c
- net/sched/cls_u32.c
- net/sched/em_cmp.c
- net/sched/em_meta.c
- net/sched/em_nbyte.c
- net/sched/em_text.c
- net/sched/em_u32.c
- net/sched/ematch.c
- net/sched/sch_api.c
- net/sched/sch_atm.c
- net/sched/sch_cbq.c
- net/sched/sch_dsmark.c
- net/sched/sch_fifo.c
- net/sched/sch_generic.c
- net/sched/sch_gred.c
- net/sched/sch_hfsc.c
- net/sched/sch_htb.c
- net/sched/sch_multiq.c
- net/sched/sch_netem.c
- net/sched/sch_prio.c
- net/sched/sch_red.c
- net/sched/sch_sfq.c
- net/sched/sch_tbf.c
- net/sched/sch_teql.c
net/sched/act_api.c
... | ... | @@ -78,7 +78,7 @@ |
78 | 78 | struct tc_action *a, struct tcf_hashinfo *hinfo) |
79 | 79 | { |
80 | 80 | struct tcf_common *p; |
81 | - int err = 0, index = -1,i = 0, s_i = 0, n_i = 0; | |
81 | + int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; | |
82 | 82 | struct nlattr *nest; |
83 | 83 | |
84 | 84 | read_lock_bh(hinfo->lock); |
... | ... | @@ -126,7 +126,7 @@ |
126 | 126 | { |
127 | 127 | struct tcf_common *p, *s_p; |
128 | 128 | struct nlattr *nest; |
129 | - int i= 0, n_i = 0; | |
129 | + int i = 0, n_i = 0; | |
130 | 130 | |
131 | 131 | nest = nla_nest_start(skb, a->order); |
132 | 132 | if (nest == NULL) |
... | ... | @@ -138,7 +138,7 @@ |
138 | 138 | while (p != NULL) { |
139 | 139 | s_p = p->tcfc_next; |
140 | 140 | if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) |
141 | - module_put(a->ops->owner); | |
141 | + module_put(a->ops->owner); | |
142 | 142 | n_i++; |
143 | 143 | p = s_p; |
144 | 144 | } |
... | ... | @@ -447,7 +447,8 @@ |
447 | 447 | nest = nla_nest_start(skb, TCA_OPTIONS); |
448 | 448 | if (nest == NULL) |
449 | 449 | goto nla_put_failure; |
450 | - if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) { | |
450 | + err = tcf_action_dump_old(skb, a, bind, ref); | |
451 | + if (err > 0) { | |
451 | 452 | nla_nest_end(skb, nest); |
452 | 453 | return err; |
453 | 454 | } |
... | ... | @@ -491,7 +492,7 @@ |
491 | 492 | struct tc_action *a; |
492 | 493 | struct tc_action_ops *a_o; |
493 | 494 | char act_name[IFNAMSIZ]; |
494 | - struct nlattr *tb[TCA_ACT_MAX+1]; | |
495 | + struct nlattr *tb[TCA_ACT_MAX + 1]; | |
495 | 496 | struct nlattr *kind; |
496 | 497 | int err; |
497 | 498 | |
... | ... | @@ -549,9 +550,9 @@ |
549 | 550 | goto err_free; |
550 | 551 | |
551 | 552 | /* module count goes up only when brand new policy is created |
552 | - if it exists and is only bound to in a_o->init() then | |
553 | - ACT_P_CREATED is not returned (a zero is). | |
554 | - */ | |
553 | + * if it exists and is only bound to in a_o->init() then | |
554 | + * ACT_P_CREATED is not returned (a zero is). | |
555 | + */ | |
555 | 556 | if (err != ACT_P_CREATED) |
556 | 557 | module_put(a_o->owner); |
557 | 558 | a->ops = a_o; |
... | ... | @@ -569,7 +570,7 @@ |
569 | 570 | struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, |
570 | 571 | char *name, int ovr, int bind) |
571 | 572 | { |
572 | - struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; | |
573 | + struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; | |
573 | 574 | struct tc_action *head = NULL, *act, *act_prev = NULL; |
574 | 575 | int err; |
575 | 576 | int i; |
... | ... | @@ -697,7 +698,7 @@ |
697 | 698 | static struct tc_action * |
698 | 699 | tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid) |
699 | 700 | { |
700 | - struct nlattr *tb[TCA_ACT_MAX+1]; | |
701 | + struct nlattr *tb[TCA_ACT_MAX + 1]; | |
701 | 702 | struct tc_action *a; |
702 | 703 | int index; |
703 | 704 | int err; |
... | ... | @@ -770,7 +771,7 @@ |
770 | 771 | struct tcamsg *t; |
771 | 772 | struct netlink_callback dcb; |
772 | 773 | struct nlattr *nest; |
773 | - struct nlattr *tb[TCA_ACT_MAX+1]; | |
774 | + struct nlattr *tb[TCA_ACT_MAX + 1]; | |
774 | 775 | struct nlattr *kind; |
775 | 776 | struct tc_action *a = create_a(0); |
776 | 777 | int err = -ENOMEM; |
... | ... | @@ -821,7 +822,8 @@ |
821 | 822 | nlh->nlmsg_flags |= NLM_F_ROOT; |
822 | 823 | module_put(a->ops->owner); |
823 | 824 | kfree(a); |
824 | - err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | |
825 | + err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, | |
826 | + n->nlmsg_flags & NLM_F_ECHO); | |
825 | 827 | if (err > 0) |
826 | 828 | return 0; |
827 | 829 | |
828 | 830 | |
... | ... | @@ -842,14 +844,14 @@ |
842 | 844 | u32 pid, int event) |
843 | 845 | { |
844 | 846 | int i, ret; |
845 | - struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; | |
847 | + struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; | |
846 | 848 | struct tc_action *head = NULL, *act, *act_prev = NULL; |
847 | 849 | |
848 | 850 | ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); |
849 | 851 | if (ret < 0) |
850 | 852 | return ret; |
851 | 853 | |
852 | - if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { | |
854 | + if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { | |
853 | 855 | if (tb[1] != NULL) |
854 | 856 | return tca_action_flush(net, tb[1], n, pid); |
855 | 857 | else |
... | ... | @@ -892,7 +894,7 @@ |
892 | 894 | /* now do the delete */ |
893 | 895 | tcf_action_destroy(head, 0); |
894 | 896 | ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC, |
895 | - n->nlmsg_flags&NLM_F_ECHO); | |
897 | + n->nlmsg_flags & NLM_F_ECHO); | |
896 | 898 | if (ret > 0) |
897 | 899 | return 0; |
898 | 900 | return ret; |
... | ... | @@ -936,7 +938,7 @@ |
936 | 938 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
937 | 939 | NETLINK_CB(skb).dst_group = RTNLGRP_TC; |
938 | 940 | |
939 | - err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); | |
941 | + err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO); | |
940 | 942 | if (err > 0) |
941 | 943 | err = 0; |
942 | 944 | return err; |
... | ... | @@ -967,7 +969,7 @@ |
967 | 969 | |
968 | 970 | /* dump then free all the actions after update; inserted policy |
969 | 971 | * stays intact |
970 | - * */ | |
972 | + */ | |
971 | 973 | ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); |
972 | 974 | for (a = act; a; a = act) { |
973 | 975 | act = a->next; |
... | ... | @@ -993,8 +995,7 @@ |
993 | 995 | return -EINVAL; |
994 | 996 | } |
995 | 997 | |
996 | - /* n->nlmsg_flags&NLM_F_CREATE | |
997 | - * */ | |
998 | + /* n->nlmsg_flags & NLM_F_CREATE */ | |
998 | 999 | switch (n->nlmsg_type) { |
999 | 1000 | case RTM_NEWACTION: |
1000 | 1001 | /* we are going to assume all other flags |
... | ... | @@ -1003,7 +1004,7 @@ |
1003 | 1004 | * but since we want avoid ambiguity (eg when flags |
1004 | 1005 | * is zero) then just set this |
1005 | 1006 | */ |
1006 | - if (n->nlmsg_flags&NLM_F_REPLACE) | |
1007 | + if (n->nlmsg_flags & NLM_F_REPLACE) | |
1007 | 1008 | ovr = 1; |
1008 | 1009 | replay: |
1009 | 1010 | ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr); |
... | ... | @@ -1028,7 +1029,7 @@ |
1028 | 1029 | static struct nlattr * |
1029 | 1030 | find_dump_kind(const struct nlmsghdr *n) |
1030 | 1031 | { |
1031 | - struct nlattr *tb1, *tb2[TCA_ACT_MAX+1]; | |
1032 | + struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; | |
1032 | 1033 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; |
1033 | 1034 | struct nlattr *nla[TCAA_MAX + 1]; |
1034 | 1035 | struct nlattr *kind; |
1035 | 1036 | |
... | ... | @@ -1071,9 +1072,8 @@ |
1071 | 1072 | } |
1072 | 1073 | |
1073 | 1074 | a_o = tc_lookup_action(kind); |
1074 | - if (a_o == NULL) { | |
1075 | + if (a_o == NULL) | |
1075 | 1076 | return 0; |
1076 | - } | |
1077 | 1077 | |
1078 | 1078 | memset(&a, 0, sizeof(struct tc_action)); |
1079 | 1079 | a.ops = a_o; |
net/sched/act_csum.c
net/sched/act_gact.c
... | ... | @@ -50,7 +50,7 @@ |
50 | 50 | } |
51 | 51 | |
52 | 52 | typedef int (*g_rand)(struct tcf_gact *gact); |
53 | -static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ }; | |
53 | +static g_rand gact_rand[MAX_RAND] = { NULL, gact_net_rand, gact_determ }; | |
54 | 54 | #endif /* CONFIG_GACT_PROB */ |
55 | 55 | |
56 | 56 | static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = { |
... | ... | @@ -89,7 +89,7 @@ |
89 | 89 | pc = tcf_hash_create(parm->index, est, a, sizeof(*gact), |
90 | 90 | bind, &gact_idx_gen, &gact_hash_info); |
91 | 91 | if (IS_ERR(pc)) |
92 | - return PTR_ERR(pc); | |
92 | + return PTR_ERR(pc); | |
93 | 93 | ret = ACT_P_CREATED; |
94 | 94 | } else { |
95 | 95 | if (!ovr) { |
96 | 96 | |
... | ... | @@ -205,9 +205,9 @@ |
205 | 205 | static int __init gact_init_module(void) |
206 | 206 | { |
207 | 207 | #ifdef CONFIG_GACT_PROB |
208 | - printk(KERN_INFO "GACT probability on\n"); | |
208 | + pr_info("GACT probability on\n"); | |
209 | 209 | #else |
210 | - printk(KERN_INFO "GACT probability NOT on\n"); | |
210 | + pr_info("GACT probability NOT on\n"); | |
211 | 211 | #endif |
212 | 212 | return tcf_register_action(&act_gact_ops); |
213 | 213 | } |
net/sched/act_ipt.c
... | ... | @@ -138,7 +138,7 @@ |
138 | 138 | pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind, |
139 | 139 | &ipt_idx_gen, &ipt_hash_info); |
140 | 140 | if (IS_ERR(pc)) |
141 | - return PTR_ERR(pc); | |
141 | + return PTR_ERR(pc); | |
142 | 142 | ret = ACT_P_CREATED; |
143 | 143 | } else { |
144 | 144 | if (!ovr) { |
... | ... | @@ -162,7 +162,8 @@ |
162 | 162 | if (unlikely(!t)) |
163 | 163 | goto err2; |
164 | 164 | |
165 | - if ((err = ipt_init_target(t, tname, hook)) < 0) | |
165 | + err = ipt_init_target(t, tname, hook); | |
166 | + if (err < 0) | |
166 | 167 | goto err3; |
167 | 168 | |
168 | 169 | spin_lock_bh(&ipt->tcf_lock); |
... | ... | @@ -212,8 +213,9 @@ |
212 | 213 | bstats_update(&ipt->tcf_bstats, skb); |
213 | 214 | |
214 | 215 | /* yes, we have to worry about both in and out dev |
215 | - worry later - danger - this API seems to have changed | |
216 | - from earlier kernels */ | |
216 | + * worry later - danger - this API seems to have changed | |
217 | + * from earlier kernels | |
218 | + */ | |
217 | 219 | par.in = skb->dev; |
218 | 220 | par.out = NULL; |
219 | 221 | par.hooknum = ipt->tcfi_hook; |
... | ... | @@ -253,9 +255,9 @@ |
253 | 255 | struct tc_cnt c; |
254 | 256 | |
255 | 257 | /* for simple targets kernel size == user size |
256 | - ** user name = target name | |
257 | - ** for foolproof you need to not assume this | |
258 | - */ | |
258 | + * user name = target name | |
259 | + * for foolproof you need to not assume this | |
260 | + */ | |
259 | 261 | |
260 | 262 | t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); |
261 | 263 | if (unlikely(!t)) |
net/sched/act_mirred.c
... | ... | @@ -41,13 +41,13 @@ |
41 | 41 | .lock = &mirred_lock, |
42 | 42 | }; |
43 | 43 | |
44 | -static inline int tcf_mirred_release(struct tcf_mirred *m, int bind) | |
44 | +static int tcf_mirred_release(struct tcf_mirred *m, int bind) | |
45 | 45 | { |
46 | 46 | if (m) { |
47 | 47 | if (bind) |
48 | 48 | m->tcf_bindcnt--; |
49 | 49 | m->tcf_refcnt--; |
50 | - if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) { | |
50 | + if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) { | |
51 | 51 | list_del(&m->tcfm_list); |
52 | 52 | if (m->tcfm_dev) |
53 | 53 | dev_put(m->tcfm_dev); |
net/sched/act_nat.c
net/sched/act_pedit.c
... | ... | @@ -70,7 +70,7 @@ |
70 | 70 | pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, |
71 | 71 | &pedit_idx_gen, &pedit_hash_info); |
72 | 72 | if (IS_ERR(pc)) |
73 | - return PTR_ERR(pc); | |
73 | + return PTR_ERR(pc); | |
74 | 74 | p = to_pedit(pc); |
75 | 75 | keys = kmalloc(ksize, GFP_KERNEL); |
76 | 76 | if (keys == NULL) { |
... | ... | @@ -127,11 +127,9 @@ |
127 | 127 | int i, munged = 0; |
128 | 128 | unsigned int off; |
129 | 129 | |
130 | - if (skb_cloned(skb)) { | |
131 | - if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | |
132 | - return p->tcf_action; | |
133 | - } | |
134 | - } | |
130 | + if (skb_cloned(skb) && | |
131 | + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) | |
132 | + return p->tcf_action; | |
135 | 133 | |
136 | 134 | off = skb_network_offset(skb); |
137 | 135 |
net/sched/act_police.c
... | ... | @@ -22,8 +22,8 @@ |
22 | 22 | #include <net/act_api.h> |
23 | 23 | #include <net/netlink.h> |
24 | 24 | |
25 | -#define L2T(p,L) qdisc_l2t((p)->tcfp_R_tab, L) | |
26 | -#define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L) | |
25 | +#define L2T(p, L) qdisc_l2t((p)->tcfp_R_tab, L) | |
26 | +#define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L) | |
27 | 27 | |
28 | 28 | #define POL_TAB_MASK 15 |
29 | 29 | static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1]; |
... | ... | @@ -37,8 +37,7 @@ |
37 | 37 | }; |
38 | 38 | |
39 | 39 | /* old policer structure from before tc actions */ |
40 | -struct tc_police_compat | |
41 | -{ | |
40 | +struct tc_police_compat { | |
42 | 41 | u32 index; |
43 | 42 | int action; |
44 | 43 | u32 limit; |
... | ... | @@ -139,7 +138,7 @@ |
139 | 138 | static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est, |
140 | 139 | struct tc_action *a, int ovr, int bind) |
141 | 140 | { |
142 | - unsigned h; | |
141 | + unsigned int h; | |
143 | 142 | int ret = 0, err; |
144 | 143 | struct nlattr *tb[TCA_POLICE_MAX + 1]; |
145 | 144 | struct tc_police *parm; |
net/sched/act_simple.c
... | ... | @@ -47,7 +47,7 @@ |
47 | 47 | /* print policy string followed by _ then packet count |
48 | 48 | * Example if this was the 3rd packet and the string was "hello" |
49 | 49 | * then it would look like "hello_3" (without quotes) |
50 | - **/ | |
50 | + */ | |
51 | 51 | pr_info("simple: %s_%d\n", |
52 | 52 | (char *)d->tcfd_defdata, d->tcf_bstats.packets); |
53 | 53 | spin_unlock(&d->tcf_lock); |
... | ... | @@ -125,7 +125,7 @@ |
125 | 125 | pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, |
126 | 126 | &simp_idx_gen, &simp_hash_info); |
127 | 127 | if (IS_ERR(pc)) |
128 | - return PTR_ERR(pc); | |
128 | + return PTR_ERR(pc); | |
129 | 129 | |
130 | 130 | d = to_defact(pc); |
131 | 131 | ret = alloc_defdata(d, defdata); |
... | ... | @@ -149,7 +149,7 @@ |
149 | 149 | return ret; |
150 | 150 | } |
151 | 151 | |
152 | -static inline int tcf_simp_cleanup(struct tc_action *a, int bind) | |
152 | +static int tcf_simp_cleanup(struct tc_action *a, int bind) | |
153 | 153 | { |
154 | 154 | struct tcf_defact *d = a->priv; |
155 | 155 | |
... | ... | @@ -158,8 +158,8 @@ |
158 | 158 | return 0; |
159 | 159 | } |
160 | 160 | |
161 | -static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, | |
162 | - int bind, int ref) | |
161 | +static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, | |
162 | + int bind, int ref) | |
163 | 163 | { |
164 | 164 | unsigned char *b = skb_tail_pointer(skb); |
165 | 165 | struct tcf_defact *d = a->priv; |
net/sched/act_skbedit.c
... | ... | @@ -113,7 +113,7 @@ |
113 | 113 | pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, |
114 | 114 | &skbedit_idx_gen, &skbedit_hash_info); |
115 | 115 | if (IS_ERR(pc)) |
116 | - return PTR_ERR(pc); | |
116 | + return PTR_ERR(pc); | |
117 | 117 | |
118 | 118 | d = to_skbedit(pc); |
119 | 119 | ret = ACT_P_CREATED; |
... | ... | @@ -144,7 +144,7 @@ |
144 | 144 | return ret; |
145 | 145 | } |
146 | 146 | |
147 | -static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind) | |
147 | +static int tcf_skbedit_cleanup(struct tc_action *a, int bind) | |
148 | 148 | { |
149 | 149 | struct tcf_skbedit *d = a->priv; |
150 | 150 | |
... | ... | @@ -153,8 +153,8 @@ |
153 | 153 | return 0; |
154 | 154 | } |
155 | 155 | |
156 | -static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, | |
157 | - int bind, int ref) | |
156 | +static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, | |
157 | + int bind, int ref) | |
158 | 158 | { |
159 | 159 | unsigned char *b = skb_tail_pointer(skb); |
160 | 160 | struct tcf_skbedit *d = a->priv; |
net/sched/cls_api.c
... | ... | @@ -85,7 +85,7 @@ |
85 | 85 | int rc = -ENOENT; |
86 | 86 | |
87 | 87 | write_lock(&cls_mod_lock); |
88 | - for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next) | |
88 | + for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next) | |
89 | 89 | if (t == ops) |
90 | 90 | break; |
91 | 91 | |
... | ... | @@ -111,7 +111,7 @@ |
111 | 111 | u32 first = TC_H_MAKE(0xC0000000U, 0U); |
112 | 112 | |
113 | 113 | if (tp) |
114 | - first = tp->prio-1; | |
114 | + first = tp->prio - 1; | |
115 | 115 | |
116 | 116 | return first; |
117 | 117 | } |
... | ... | @@ -149,7 +149,8 @@ |
149 | 149 | |
150 | 150 | if (prio == 0) { |
151 | 151 | /* If no priority is given, user wants we allocated it. */ |
152 | - if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) | |
152 | + if (n->nlmsg_type != RTM_NEWTFILTER || | |
153 | + !(n->nlmsg_flags & NLM_F_CREATE)) | |
153 | 154 | return -ENOENT; |
154 | 155 | prio = TC_H_MAKE(0x80000000U, 0U); |
155 | 156 | } |
... | ... | @@ -176,7 +177,8 @@ |
176 | 177 | } |
177 | 178 | |
178 | 179 | /* Is it classful? */ |
179 | - if ((cops = q->ops->cl_ops) == NULL) | |
180 | + cops = q->ops->cl_ops; | |
181 | + if (!cops) | |
180 | 182 | return -EINVAL; |
181 | 183 | |
182 | 184 | if (cops->tcf_chain == NULL) |
183 | 185 | |
... | ... | @@ -196,10 +198,11 @@ |
196 | 198 | goto errout; |
197 | 199 | |
198 | 200 | /* Check the chain for existence of proto-tcf with this priority */ |
199 | - for (back = chain; (tp=*back) != NULL; back = &tp->next) { | |
201 | + for (back = chain; (tp = *back) != NULL; back = &tp->next) { | |
200 | 202 | if (tp->prio >= prio) { |
201 | 203 | if (tp->prio == prio) { |
202 | - if (!nprio || (tp->protocol != protocol && protocol)) | |
204 | + if (!nprio || | |
205 | + (tp->protocol != protocol && protocol)) | |
203 | 206 | goto errout; |
204 | 207 | } else |
205 | 208 | tp = NULL; |
... | ... | @@ -216,7 +219,8 @@ |
216 | 219 | goto errout; |
217 | 220 | |
218 | 221 | err = -ENOENT; |
219 | - if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE)) | |
222 | + if (n->nlmsg_type != RTM_NEWTFILTER || | |
223 | + !(n->nlmsg_flags & NLM_F_CREATE)) | |
220 | 224 | goto errout; |
221 | 225 | |
222 | 226 | |
... | ... | @@ -420,7 +424,8 @@ |
420 | 424 | |
421 | 425 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) |
422 | 426 | return skb->len; |
423 | - if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) | |
427 | + dev = __dev_get_by_index(net, tcm->tcm_ifindex); | |
428 | + if (!dev) | |
424 | 429 | return skb->len; |
425 | 430 | |
426 | 431 | if (!tcm->tcm_parent) |
... | ... | @@ -429,7 +434,8 @@ |
429 | 434 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); |
430 | 435 | if (!q) |
431 | 436 | goto out; |
432 | - if ((cops = q->ops->cl_ops) == NULL) | |
437 | + cops = q->ops->cl_ops; | |
438 | + if (!cops) | |
433 | 439 | goto errout; |
434 | 440 | if (cops->tcf_chain == NULL) |
435 | 441 | goto errout; |
... | ... | @@ -444,8 +450,9 @@ |
444 | 450 | |
445 | 451 | s_t = cb->args[0]; |
446 | 452 | |
447 | - for (tp=*chain, t=0; tp; tp = tp->next, t++) { | |
448 | - if (t < s_t) continue; | |
453 | + for (tp = *chain, t = 0; tp; tp = tp->next, t++) { | |
454 | + if (t < s_t) | |
455 | + continue; | |
449 | 456 | if (TC_H_MAJ(tcm->tcm_info) && |
450 | 457 | TC_H_MAJ(tcm->tcm_info) != tp->prio) |
451 | 458 | continue; |
452 | 459 | |
... | ... | @@ -468,10 +475,10 @@ |
468 | 475 | arg.skb = skb; |
469 | 476 | arg.cb = cb; |
470 | 477 | arg.w.stop = 0; |
471 | - arg.w.skip = cb->args[1]-1; | |
478 | + arg.w.skip = cb->args[1] - 1; | |
472 | 479 | arg.w.count = 0; |
473 | 480 | tp->ops->walk(tp, &arg.w); |
474 | - cb->args[1] = arg.w.count+1; | |
481 | + cb->args[1] = arg.w.count + 1; | |
475 | 482 | if (arg.w.stop) |
476 | 483 | break; |
477 | 484 | } |
net/sched/cls_basic.c
... | ... | @@ -21,14 +21,12 @@ |
21 | 21 | #include <net/act_api.h> |
22 | 22 | #include <net/pkt_cls.h> |
23 | 23 | |
24 | -struct basic_head | |
25 | -{ | |
24 | +struct basic_head { | |
26 | 25 | u32 hgenerator; |
27 | 26 | struct list_head flist; |
28 | 27 | }; |
29 | 28 | |
30 | -struct basic_filter | |
31 | -{ | |
29 | +struct basic_filter { | |
32 | 30 | u32 handle; |
33 | 31 | struct tcf_exts exts; |
34 | 32 | struct tcf_ematch_tree ematches; |
... | ... | @@ -92,8 +90,7 @@ |
92 | 90 | return 0; |
93 | 91 | } |
94 | 92 | |
95 | -static inline void basic_delete_filter(struct tcf_proto *tp, | |
96 | - struct basic_filter *f) | |
93 | +static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f) | |
97 | 94 | { |
98 | 95 | tcf_unbind_filter(tp, &f->res); |
99 | 96 | tcf_exts_destroy(tp, &f->exts); |
... | ... | @@ -135,9 +132,9 @@ |
135 | 132 | [TCA_BASIC_EMATCHES] = { .type = NLA_NESTED }, |
136 | 133 | }; |
137 | 134 | |
138 | -static inline int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f, | |
139 | - unsigned long base, struct nlattr **tb, | |
140 | - struct nlattr *est) | |
135 | +static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f, | |
136 | + unsigned long base, struct nlattr **tb, | |
137 | + struct nlattr *est) | |
141 | 138 | { |
142 | 139 | int err = -EINVAL; |
143 | 140 | struct tcf_exts e; |
... | ... | @@ -203,7 +200,7 @@ |
203 | 200 | } while (--i > 0 && basic_get(tp, head->hgenerator)); |
204 | 201 | |
205 | 202 | if (i <= 0) { |
206 | - printk(KERN_ERR "Insufficient number of handles\n"); | |
203 | + pr_err("Insufficient number of handles\n"); | |
207 | 204 | goto errout; |
208 | 205 | } |
209 | 206 |
net/sched/cls_cgroup.c
... | ... | @@ -56,7 +56,8 @@ |
56 | 56 | { |
57 | 57 | struct cgroup_cls_state *cs; |
58 | 58 | |
59 | - if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL))) | |
59 | + cs = kzalloc(sizeof(*cs), GFP_KERNEL); | |
60 | + if (!cs) | |
60 | 61 | return ERR_PTR(-ENOMEM); |
61 | 62 | |
62 | 63 | if (cgrp->parent) |
... | ... | @@ -94,8 +95,7 @@ |
94 | 95 | return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); |
95 | 96 | } |
96 | 97 | |
97 | -struct cls_cgroup_head | |
98 | -{ | |
98 | +struct cls_cgroup_head { | |
99 | 99 | u32 handle; |
100 | 100 | struct tcf_exts exts; |
101 | 101 | struct tcf_ematch_tree ematches; |
... | ... | @@ -166,7 +166,7 @@ |
166 | 166 | u32 handle, struct nlattr **tca, |
167 | 167 | unsigned long *arg) |
168 | 168 | { |
169 | - struct nlattr *tb[TCA_CGROUP_MAX+1]; | |
169 | + struct nlattr *tb[TCA_CGROUP_MAX + 1]; | |
170 | 170 | struct cls_cgroup_head *head = tp->root; |
171 | 171 | struct tcf_ematch_tree t; |
172 | 172 | struct tcf_exts e; |
net/sched/cls_flow.c
... | ... | @@ -121,7 +121,7 @@ |
121 | 121 | if (!pskb_network_may_pull(skb, sizeof(*iph))) |
122 | 122 | break; |
123 | 123 | iph = ip_hdr(skb); |
124 | - if (iph->frag_off & htons(IP_MF|IP_OFFSET)) | |
124 | + if (iph->frag_off & htons(IP_MF | IP_OFFSET)) | |
125 | 125 | break; |
126 | 126 | poff = proto_ports_offset(iph->protocol); |
127 | 127 | if (poff >= 0 && |
... | ... | @@ -163,7 +163,7 @@ |
163 | 163 | if (!pskb_network_may_pull(skb, sizeof(*iph))) |
164 | 164 | break; |
165 | 165 | iph = ip_hdr(skb); |
166 | - if (iph->frag_off & htons(IP_MF|IP_OFFSET)) | |
166 | + if (iph->frag_off & htons(IP_MF | IP_OFFSET)) | |
167 | 167 | break; |
168 | 168 | poff = proto_ports_offset(iph->protocol); |
169 | 169 | if (poff >= 0 && |
net/sched/cls_fw.c
... | ... | @@ -31,14 +31,12 @@ |
31 | 31 | |
32 | 32 | #define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *)) |
33 | 33 | |
34 | -struct fw_head | |
35 | -{ | |
34 | +struct fw_head { | |
36 | 35 | struct fw_filter *ht[HTSIZE]; |
37 | 36 | u32 mask; |
38 | 37 | }; |
39 | 38 | |
40 | -struct fw_filter | |
41 | -{ | |
39 | +struct fw_filter { | |
42 | 40 | struct fw_filter *next; |
43 | 41 | u32 id; |
44 | 42 | struct tcf_result res; |
... | ... | @@ -53,7 +51,7 @@ |
53 | 51 | .police = TCA_FW_POLICE |
54 | 52 | }; |
55 | 53 | |
56 | -static __inline__ int fw_hash(u32 handle) | |
54 | +static inline int fw_hash(u32 handle) | |
57 | 55 | { |
58 | 56 | if (HTSIZE == 4096) |
59 | 57 | return ((handle >> 24) & 0xFFF) ^ |
60 | 58 | |
... | ... | @@ -82,14 +80,14 @@ |
82 | 80 | static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, |
83 | 81 | struct tcf_result *res) |
84 | 82 | { |
85 | - struct fw_head *head = (struct fw_head*)tp->root; | |
83 | + struct fw_head *head = (struct fw_head *)tp->root; | |
86 | 84 | struct fw_filter *f; |
87 | 85 | int r; |
88 | 86 | u32 id = skb->mark; |
89 | 87 | |
90 | 88 | if (head != NULL) { |
91 | 89 | id &= head->mask; |
92 | - for (f=head->ht[fw_hash(id)]; f; f=f->next) { | |
90 | + for (f = head->ht[fw_hash(id)]; f; f = f->next) { | |
93 | 91 | if (f->id == id) { |
94 | 92 | *res = f->res; |
95 | 93 | #ifdef CONFIG_NET_CLS_IND |
... | ... | @@ -105,7 +103,8 @@ |
105 | 103 | } |
106 | 104 | } else { |
107 | 105 | /* old method */ |
108 | - if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id^tp->q->handle)))) { | |
106 | + if (id && (TC_H_MAJ(id) == 0 || | |
107 | + !(TC_H_MAJ(id ^ tp->q->handle)))) { | |
109 | 108 | res->classid = id; |
110 | 109 | res->class = 0; |
111 | 110 | return 0; |
112 | 111 | |
... | ... | @@ -117,13 +116,13 @@ |
117 | 116 | |
118 | 117 | static unsigned long fw_get(struct tcf_proto *tp, u32 handle) |
119 | 118 | { |
120 | - struct fw_head *head = (struct fw_head*)tp->root; | |
119 | + struct fw_head *head = (struct fw_head *)tp->root; | |
121 | 120 | struct fw_filter *f; |
122 | 121 | |
123 | 122 | if (head == NULL) |
124 | 123 | return 0; |
125 | 124 | |
126 | - for (f=head->ht[fw_hash(handle)]; f; f=f->next) { | |
125 | + for (f = head->ht[fw_hash(handle)]; f; f = f->next) { | |
127 | 126 | if (f->id == handle) |
128 | 127 | return (unsigned long)f; |
129 | 128 | } |
... | ... | @@ -139,8 +138,7 @@ |
139 | 138 | return 0; |
140 | 139 | } |
141 | 140 | |
142 | -static inline void | |
143 | -fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f) | |
141 | +static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f) | |
144 | 142 | { |
145 | 143 | tcf_unbind_filter(tp, &f->res); |
146 | 144 | tcf_exts_destroy(tp, &f->exts); |
... | ... | @@ -156,8 +154,8 @@ |
156 | 154 | if (head == NULL) |
157 | 155 | return; |
158 | 156 | |
159 | - for (h=0; h<HTSIZE; h++) { | |
160 | - while ((f=head->ht[h]) != NULL) { | |
157 | + for (h = 0; h < HTSIZE; h++) { | |
158 | + while ((f = head->ht[h]) != NULL) { | |
161 | 159 | head->ht[h] = f->next; |
162 | 160 | fw_delete_filter(tp, f); |
163 | 161 | } |
164 | 162 | |
... | ... | @@ -167,14 +165,14 @@ |
167 | 165 | |
168 | 166 | static int fw_delete(struct tcf_proto *tp, unsigned long arg) |
169 | 167 | { |
170 | - struct fw_head *head = (struct fw_head*)tp->root; | |
171 | - struct fw_filter *f = (struct fw_filter*)arg; | |
168 | + struct fw_head *head = (struct fw_head *)tp->root; | |
169 | + struct fw_filter *f = (struct fw_filter *)arg; | |
172 | 170 | struct fw_filter **fp; |
173 | 171 | |
174 | 172 | if (head == NULL || f == NULL) |
175 | 173 | goto out; |
176 | 174 | |
177 | - for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) { | |
175 | + for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) { | |
178 | 176 | if (*fp == f) { |
179 | 177 | tcf_tree_lock(tp); |
180 | 178 | *fp = f->next; |
... | ... | @@ -240,7 +238,7 @@ |
240 | 238 | struct nlattr **tca, |
241 | 239 | unsigned long *arg) |
242 | 240 | { |
243 | - struct fw_head *head = (struct fw_head*)tp->root; | |
241 | + struct fw_head *head = (struct fw_head *)tp->root; | |
244 | 242 | struct fw_filter *f = (struct fw_filter *) *arg; |
245 | 243 | struct nlattr *opt = tca[TCA_OPTIONS]; |
246 | 244 | struct nlattr *tb[TCA_FW_MAX + 1]; |
... | ... | @@ -302,7 +300,7 @@ |
302 | 300 | |
303 | 301 | static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) |
304 | 302 | { |
305 | - struct fw_head *head = (struct fw_head*)tp->root; | |
303 | + struct fw_head *head = (struct fw_head *)tp->root; | |
306 | 304 | int h; |
307 | 305 | |
308 | 306 | if (head == NULL) |
... | ... | @@ -332,7 +330,7 @@ |
332 | 330 | struct sk_buff *skb, struct tcmsg *t) |
333 | 331 | { |
334 | 332 | struct fw_head *head = (struct fw_head *)tp->root; |
335 | - struct fw_filter *f = (struct fw_filter*)fh; | |
333 | + struct fw_filter *f = (struct fw_filter *)fh; | |
336 | 334 | unsigned char *b = skb_tail_pointer(skb); |
337 | 335 | struct nlattr *nest; |
338 | 336 |
net/sched/cls_route.c
... | ... | @@ -23,34 +23,30 @@ |
23 | 23 | #include <net/pkt_cls.h> |
24 | 24 | |
25 | 25 | /* |
26 | - 1. For now we assume that route tags < 256. | |
27 | - It allows to use direct table lookups, instead of hash tables. | |
28 | - 2. For now we assume that "from TAG" and "fromdev DEV" statements | |
29 | - are mutually exclusive. | |
30 | - 3. "to TAG from ANY" has higher priority, than "to ANY from XXX" | |
26 | + * 1. For now we assume that route tags < 256. | |
27 | + * It allows to use direct table lookups, instead of hash tables. | |
28 | + * 2. For now we assume that "from TAG" and "fromdev DEV" statements | |
29 | + * are mutually exclusive. | |
30 | + * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX" | |
31 | 31 | */ |
32 | 32 | |
33 | -struct route4_fastmap | |
34 | -{ | |
33 | +struct route4_fastmap { | |
35 | 34 | struct route4_filter *filter; |
36 | 35 | u32 id; |
37 | 36 | int iif; |
38 | 37 | }; |
39 | 38 | |
40 | -struct route4_head | |
41 | -{ | |
39 | +struct route4_head { | |
42 | 40 | struct route4_fastmap fastmap[16]; |
43 | - struct route4_bucket *table[256+1]; | |
41 | + struct route4_bucket *table[256 + 1]; | |
44 | 42 | }; |
45 | 43 | |
46 | -struct route4_bucket | |
47 | -{ | |
44 | +struct route4_bucket { | |
48 | 45 | /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */ |
49 | - struct route4_filter *ht[16+16+1]; | |
46 | + struct route4_filter *ht[16 + 16 + 1]; | |
50 | 47 | }; |
51 | 48 | |
52 | -struct route4_filter | |
53 | -{ | |
49 | +struct route4_filter { | |
54 | 50 | struct route4_filter *next; |
55 | 51 | u32 id; |
56 | 52 | int iif; |
57 | 53 | |
58 | 54 | |
59 | 55 | |
... | ... | @@ -61,20 +57,20 @@ |
61 | 57 | struct route4_bucket *bkt; |
62 | 58 | }; |
63 | 59 | |
64 | -#define ROUTE4_FAILURE ((struct route4_filter*)(-1L)) | |
60 | +#define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) | |
65 | 61 | |
66 | 62 | static const struct tcf_ext_map route_ext_map = { |
67 | 63 | .police = TCA_ROUTE4_POLICE, |
68 | 64 | .action = TCA_ROUTE4_ACT |
69 | 65 | }; |
70 | 66 | |
71 | -static __inline__ int route4_fastmap_hash(u32 id, int iif) | |
67 | +static inline int route4_fastmap_hash(u32 id, int iif) | |
72 | 68 | { |
73 | - return id&0xF; | |
69 | + return id & 0xF; | |
74 | 70 | } |
75 | 71 | |
76 | -static inline | |
77 | -void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) | |
72 | +static void | |
73 | +route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) | |
78 | 74 | { |
79 | 75 | spinlock_t *root_lock = qdisc_root_sleeping_lock(q); |
80 | 76 | |
81 | 77 | |
82 | 78 | |
83 | 79 | |
84 | 80 | |
85 | 81 | |
86 | 82 | |
87 | 83 | |
88 | 84 | |
... | ... | @@ -83,32 +79,33 @@ |
83 | 79 | spin_unlock_bh(root_lock); |
84 | 80 | } |
85 | 81 | |
86 | -static inline void | |
82 | +static void | |
87 | 83 | route4_set_fastmap(struct route4_head *head, u32 id, int iif, |
88 | 84 | struct route4_filter *f) |
89 | 85 | { |
90 | 86 | int h = route4_fastmap_hash(id, iif); |
87 | + | |
91 | 88 | head->fastmap[h].id = id; |
92 | 89 | head->fastmap[h].iif = iif; |
93 | 90 | head->fastmap[h].filter = f; |
94 | 91 | } |
95 | 92 | |
96 | -static __inline__ int route4_hash_to(u32 id) | |
93 | +static inline int route4_hash_to(u32 id) | |
97 | 94 | { |
98 | - return id&0xFF; | |
95 | + return id & 0xFF; | |
99 | 96 | } |
100 | 97 | |
101 | -static __inline__ int route4_hash_from(u32 id) | |
98 | +static inline int route4_hash_from(u32 id) | |
102 | 99 | { |
103 | - return (id>>16)&0xF; | |
100 | + return (id >> 16) & 0xF; | |
104 | 101 | } |
105 | 102 | |
106 | -static __inline__ int route4_hash_iif(int iif) | |
103 | +static inline int route4_hash_iif(int iif) | |
107 | 104 | { |
108 | - return 16 + ((iif>>16)&0xF); | |
105 | + return 16 + ((iif >> 16) & 0xF); | |
109 | 106 | } |
110 | 107 | |
111 | -static __inline__ int route4_hash_wild(void) | |
108 | +static inline int route4_hash_wild(void) | |
112 | 109 | { |
113 | 110 | return 32; |
114 | 111 | } |
115 | 112 | |
116 | 113 | |
... | ... | @@ -131,21 +128,22 @@ |
131 | 128 | static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp, |
132 | 129 | struct tcf_result *res) |
133 | 130 | { |
134 | - struct route4_head *head = (struct route4_head*)tp->root; | |
131 | + struct route4_head *head = (struct route4_head *)tp->root; | |
135 | 132 | struct dst_entry *dst; |
136 | 133 | struct route4_bucket *b; |
137 | 134 | struct route4_filter *f; |
138 | 135 | u32 id, h; |
139 | 136 | int iif, dont_cache = 0; |
140 | 137 | |
141 | - if ((dst = skb_dst(skb)) == NULL) | |
138 | + dst = skb_dst(skb); | |
139 | + if (!dst) | |
142 | 140 | goto failure; |
143 | 141 | |
144 | 142 | id = dst->tclassid; |
145 | 143 | if (head == NULL) |
146 | 144 | goto old_method; |
147 | 145 | |
148 | - iif = ((struct rtable*)dst)->fl.iif; | |
146 | + iif = ((struct rtable *)dst)->fl.iif; | |
149 | 147 | |
150 | 148 | h = route4_fastmap_hash(id, iif); |
151 | 149 | if (id == head->fastmap[h].id && |
... | ... | @@ -161,7 +159,8 @@ |
161 | 159 | h = route4_hash_to(id); |
162 | 160 | |
163 | 161 | restart: |
164 | - if ((b = head->table[h]) != NULL) { | |
162 | + b = head->table[h]; | |
163 | + if (b) { | |
165 | 164 | for (f = b->ht[route4_hash_from(id)]; f; f = f->next) |
166 | 165 | if (f->id == id) |
167 | 166 | ROUTE4_APPLY_RESULT(); |
... | ... | @@ -197,8 +196,9 @@ |
197 | 196 | |
198 | 197 | static inline u32 to_hash(u32 id) |
199 | 198 | { |
200 | - u32 h = id&0xFF; | |
201 | - if (id&0x8000) | |
199 | + u32 h = id & 0xFF; | |
200 | + | |
201 | + if (id & 0x8000) | |
202 | 202 | h += 256; |
203 | 203 | return h; |
204 | 204 | } |
205 | 205 | |
206 | 206 | |
207 | 207 | |
... | ... | @@ -211,17 +211,17 @@ |
211 | 211 | if (!(id & 0x8000)) { |
212 | 212 | if (id > 255) |
213 | 213 | return 256; |
214 | - return id&0xF; | |
214 | + return id & 0xF; | |
215 | 215 | } |
216 | - return 16 + (id&0xF); | |
216 | + return 16 + (id & 0xF); | |
217 | 217 | } |
218 | 218 | |
219 | 219 | static unsigned long route4_get(struct tcf_proto *tp, u32 handle) |
220 | 220 | { |
221 | - struct route4_head *head = (struct route4_head*)tp->root; | |
221 | + struct route4_head *head = (struct route4_head *)tp->root; | |
222 | 222 | struct route4_bucket *b; |
223 | 223 | struct route4_filter *f; |
224 | - unsigned h1, h2; | |
224 | + unsigned int h1, h2; | |
225 | 225 | |
226 | 226 | if (!head) |
227 | 227 | return 0; |
228 | 228 | |
... | ... | @@ -230,11 +230,12 @@ |
230 | 230 | if (h1 > 256) |
231 | 231 | return 0; |
232 | 232 | |
233 | - h2 = from_hash(handle>>16); | |
233 | + h2 = from_hash(handle >> 16); | |
234 | 234 | if (h2 > 32) |
235 | 235 | return 0; |
236 | 236 | |
237 | - if ((b = head->table[h1]) != NULL) { | |
237 | + b = head->table[h1]; | |
238 | + if (b) { | |
238 | 239 | for (f = b->ht[h2]; f; f = f->next) |
239 | 240 | if (f->handle == handle) |
240 | 241 | return (unsigned long)f; |
... | ... | @@ -251,7 +252,7 @@ |
251 | 252 | return 0; |
252 | 253 | } |
253 | 254 | |
254 | -static inline void | |
255 | +static void | |
255 | 256 | route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f) |
256 | 257 | { |
257 | 258 | tcf_unbind_filter(tp, &f->res); |
258 | 259 | |
... | ... | @@ -267,11 +268,12 @@ |
267 | 268 | if (head == NULL) |
268 | 269 | return; |
269 | 270 | |
270 | - for (h1=0; h1<=256; h1++) { | |
271 | + for (h1 = 0; h1 <= 256; h1++) { | |
271 | 272 | struct route4_bucket *b; |
272 | 273 | |
273 | - if ((b = head->table[h1]) != NULL) { | |
274 | - for (h2=0; h2<=32; h2++) { | |
274 | + b = head->table[h1]; | |
275 | + if (b) { | |
276 | + for (h2 = 0; h2 <= 32; h2++) { | |
275 | 277 | struct route4_filter *f; |
276 | 278 | |
277 | 279 | while ((f = b->ht[h2]) != NULL) { |
... | ... | @@ -287,9 +289,9 @@ |
287 | 289 | |
288 | 290 | static int route4_delete(struct tcf_proto *tp, unsigned long arg) |
289 | 291 | { |
290 | - struct route4_head *head = (struct route4_head*)tp->root; | |
291 | - struct route4_filter **fp, *f = (struct route4_filter*)arg; | |
292 | - unsigned h = 0; | |
292 | + struct route4_head *head = (struct route4_head *)tp->root; | |
293 | + struct route4_filter **fp, *f = (struct route4_filter *)arg; | |
294 | + unsigned int h = 0; | |
293 | 295 | struct route4_bucket *b; |
294 | 296 | int i; |
295 | 297 | |
... | ... | @@ -299,7 +301,7 @@ |
299 | 301 | h = f->handle; |
300 | 302 | b = f->bkt; |
301 | 303 | |
302 | - for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) { | |
304 | + for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) { | |
303 | 305 | if (*fp == f) { |
304 | 306 | tcf_tree_lock(tp); |
305 | 307 | *fp = f->next; |
... | ... | @@ -310,7 +312,7 @@ |
310 | 312 | |
311 | 313 | /* Strip tree */ |
312 | 314 | |
313 | - for (i=0; i<=32; i++) | |
315 | + for (i = 0; i <= 32; i++) | |
314 | 316 | if (b->ht[i]) |
315 | 317 | return 0; |
316 | 318 | |
... | ... | @@ -380,7 +382,8 @@ |
380 | 382 | } |
381 | 383 | |
382 | 384 | h1 = to_hash(nhandle); |
383 | - if ((b = head->table[h1]) == NULL) { | |
385 | + b = head->table[h1]; | |
386 | + if (!b) { | |
384 | 387 | err = -ENOBUFS; |
385 | 388 | b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); |
386 | 389 | if (b == NULL) |
... | ... | @@ -391,6 +394,7 @@ |
391 | 394 | tcf_tree_unlock(tp); |
392 | 395 | } else { |
393 | 396 | unsigned int h2 = from_hash(nhandle >> 16); |
397 | + | |
394 | 398 | err = -EEXIST; |
395 | 399 | for (fp = b->ht[h2]; fp; fp = fp->next) |
396 | 400 | if (fp->handle == f->handle) |
... | ... | @@ -444,7 +448,8 @@ |
444 | 448 | if (err < 0) |
445 | 449 | return err; |
446 | 450 | |
447 | - if ((f = (struct route4_filter*)*arg) != NULL) { | |
451 | + f = (struct route4_filter *)*arg; | |
452 | + if (f) { | |
448 | 453 | if (f->handle != handle && handle) |
449 | 454 | return -EINVAL; |
450 | 455 | |
... | ... | @@ -481,7 +486,7 @@ |
481 | 486 | |
482 | 487 | reinsert: |
483 | 488 | h = from_hash(f->handle >> 16); |
484 | - for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next) | |
489 | + for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next) | |
485 | 490 | if (f->handle < f1->handle) |
486 | 491 | break; |
487 | 492 | |
... | ... | @@ -492,7 +497,8 @@ |
492 | 497 | if (old_handle && f->handle != old_handle) { |
493 | 498 | th = to_hash(old_handle); |
494 | 499 | h = from_hash(old_handle >> 16); |
495 | - if ((b = head->table[th]) != NULL) { | |
500 | + b = head->table[th]; | |
501 | + if (b) { | |
496 | 502 | for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) { |
497 | 503 | if (*fp == f) { |
498 | 504 | *fp = f->next; |
... | ... | @@ -515,7 +521,7 @@ |
515 | 521 | static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) |
516 | 522 | { |
517 | 523 | struct route4_head *head = tp->root; |
518 | - unsigned h, h1; | |
524 | + unsigned int h, h1; | |
519 | 525 | |
520 | 526 | if (head == NULL) |
521 | 527 | arg->stop = 1; |
... | ... | @@ -549,7 +555,7 @@ |
549 | 555 | static int route4_dump(struct tcf_proto *tp, unsigned long fh, |
550 | 556 | struct sk_buff *skb, struct tcmsg *t) |
551 | 557 | { |
552 | - struct route4_filter *f = (struct route4_filter*)fh; | |
558 | + struct route4_filter *f = (struct route4_filter *)fh; | |
553 | 559 | unsigned char *b = skb_tail_pointer(skb); |
554 | 560 | struct nlattr *nest; |
555 | 561 | u32 id; |
556 | 562 | |
557 | 563 | |
... | ... | @@ -563,15 +569,15 @@ |
563 | 569 | if (nest == NULL) |
564 | 570 | goto nla_put_failure; |
565 | 571 | |
566 | - if (!(f->handle&0x8000)) { | |
567 | - id = f->id&0xFF; | |
572 | + if (!(f->handle & 0x8000)) { | |
573 | + id = f->id & 0xFF; | |
568 | 574 | NLA_PUT_U32(skb, TCA_ROUTE4_TO, id); |
569 | 575 | } |
570 | - if (f->handle&0x80000000) { | |
571 | - if ((f->handle>>16) != 0xFFFF) | |
576 | + if (f->handle & 0x80000000) { | |
577 | + if ((f->handle >> 16) != 0xFFFF) | |
572 | 578 | NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif); |
573 | 579 | } else { |
574 | - id = f->id>>16; | |
580 | + id = f->id >> 16; | |
575 | 581 | NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id); |
576 | 582 | } |
577 | 583 | if (f->res.classid) |
net/sched/cls_rsvp.h
... | ... | @@ -66,28 +66,25 @@ |
66 | 66 | powerful classification engine. */ |
67 | 67 | |
68 | 68 | |
69 | -struct rsvp_head | |
70 | -{ | |
69 | +struct rsvp_head { | |
71 | 70 | u32 tmap[256/32]; |
72 | 71 | u32 hgenerator; |
73 | 72 | u8 tgenerator; |
74 | 73 | struct rsvp_session *ht[256]; |
75 | 74 | }; |
76 | 75 | |
77 | -struct rsvp_session | |
78 | -{ | |
76 | +struct rsvp_session { | |
79 | 77 | struct rsvp_session *next; |
80 | 78 | __be32 dst[RSVP_DST_LEN]; |
81 | 79 | struct tc_rsvp_gpi dpi; |
82 | 80 | u8 protocol; |
83 | 81 | u8 tunnelid; |
84 | 82 | /* 16 (src,sport) hash slots, and one wildcard source slot */ |
85 | - struct rsvp_filter *ht[16+1]; | |
83 | + struct rsvp_filter *ht[16 + 1]; | |
86 | 84 | }; |
87 | 85 | |
88 | 86 | |
89 | -struct rsvp_filter | |
90 | -{ | |
87 | +struct rsvp_filter { | |
91 | 88 | struct rsvp_filter *next; |
92 | 89 | __be32 src[RSVP_DST_LEN]; |
93 | 90 | struct tc_rsvp_gpi spi; |
94 | 91 | |
95 | 92 | |
96 | 93 | |
... | ... | @@ -100,17 +97,19 @@ |
100 | 97 | struct rsvp_session *sess; |
101 | 98 | }; |
102 | 99 | |
103 | -static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) | |
100 | +static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) | |
104 | 101 | { |
105 | - unsigned h = (__force __u32)dst[RSVP_DST_LEN-1]; | |
102 | + unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1]; | |
103 | + | |
106 | 104 | h ^= h>>16; |
107 | 105 | h ^= h>>8; |
108 | 106 | return (h ^ protocol ^ tunnelid) & 0xFF; |
109 | 107 | } |
110 | 108 | |
111 | -static __inline__ unsigned hash_src(__be32 *src) | |
109 | +static inline unsigned int hash_src(__be32 *src) | |
112 | 110 | { |
113 | - unsigned h = (__force __u32)src[RSVP_DST_LEN-1]; | |
111 | + unsigned int h = (__force __u32)src[RSVP_DST_LEN-1]; | |
112 | + | |
114 | 113 | h ^= h>>16; |
115 | 114 | h ^= h>>8; |
116 | 115 | h ^= h>>4; |
117 | 116 | |
... | ... | @@ -134,10 +133,10 @@ |
134 | 133 | static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, |
135 | 134 | struct tcf_result *res) |
136 | 135 | { |
137 | - struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht; | |
136 | + struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht; | |
138 | 137 | struct rsvp_session *s; |
139 | 138 | struct rsvp_filter *f; |
140 | - unsigned h1, h2; | |
139 | + unsigned int h1, h2; | |
141 | 140 | __be32 *dst, *src; |
142 | 141 | u8 protocol; |
143 | 142 | u8 tunnelid = 0; |
144 | 143 | |
... | ... | @@ -162,13 +161,13 @@ |
162 | 161 | src = &nhptr->saddr.s6_addr32[0]; |
163 | 162 | dst = &nhptr->daddr.s6_addr32[0]; |
164 | 163 | protocol = nhptr->nexthdr; |
165 | - xprt = ((u8*)nhptr) + sizeof(struct ipv6hdr); | |
164 | + xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr); | |
166 | 165 | #else |
167 | 166 | src = &nhptr->saddr; |
168 | 167 | dst = &nhptr->daddr; |
169 | 168 | protocol = nhptr->protocol; |
170 | - xprt = ((u8*)nhptr) + (nhptr->ihl<<2); | |
171 | - if (nhptr->frag_off & htons(IP_MF|IP_OFFSET)) | |
169 | + xprt = ((u8 *)nhptr) + (nhptr->ihl<<2); | |
170 | + if (nhptr->frag_off & htons(IP_MF | IP_OFFSET)) | |
172 | 171 | return -1; |
173 | 172 | #endif |
174 | 173 | |
175 | 174 | |
... | ... | @@ -176,10 +175,10 @@ |
176 | 175 | h2 = hash_src(src); |
177 | 176 | |
178 | 177 | for (s = sht[h1]; s; s = s->next) { |
179 | - if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && | |
178 | + if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] && | |
180 | 179 | protocol == s->protocol && |
181 | 180 | !(s->dpi.mask & |
182 | - (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) && | |
181 | + (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) && | |
183 | 182 | #if RSVP_DST_LEN == 4 |
184 | 183 | dst[0] == s->dst[0] && |
185 | 184 | dst[1] == s->dst[1] && |
... | ... | @@ -188,8 +187,8 @@ |
188 | 187 | tunnelid == s->tunnelid) { |
189 | 188 | |
190 | 189 | for (f = s->ht[h2]; f; f = f->next) { |
191 | - if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] && | |
192 | - !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key)) | |
190 | + if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] && | |
191 | + !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key)) | |
193 | 192 | #if RSVP_DST_LEN == 4 |
194 | 193 | && |
195 | 194 | src[0] == f->src[0] && |
... | ... | @@ -205,7 +204,7 @@ |
205 | 204 | return 0; |
206 | 205 | |
207 | 206 | tunnelid = f->res.classid; |
208 | - nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr)); | |
207 | + nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr)); | |
209 | 208 | goto restart; |
210 | 209 | } |
211 | 210 | } |
212 | 211 | |
... | ... | @@ -224,11 +223,11 @@ |
224 | 223 | |
225 | 224 | static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle) |
226 | 225 | { |
227 | - struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht; | |
226 | + struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht; | |
228 | 227 | struct rsvp_session *s; |
229 | 228 | struct rsvp_filter *f; |
230 | - unsigned h1 = handle&0xFF; | |
231 | - unsigned h2 = (handle>>8)&0xFF; | |
229 | + unsigned int h1 = handle & 0xFF; | |
230 | + unsigned int h2 = (handle >> 8) & 0xFF; | |
232 | 231 | |
233 | 232 | if (h2 > 16) |
234 | 233 | return 0; |
... | ... | @@ -258,7 +257,7 @@ |
258 | 257 | return -ENOBUFS; |
259 | 258 | } |
260 | 259 | |
261 | -static inline void | |
260 | +static void | |
262 | 261 | rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) |
263 | 262 | { |
264 | 263 | tcf_unbind_filter(tp, &f->res); |
265 | 264 | |
... | ... | @@ -277,13 +276,13 @@ |
277 | 276 | |
278 | 277 | sht = data->ht; |
279 | 278 | |
280 | - for (h1=0; h1<256; h1++) { | |
279 | + for (h1 = 0; h1 < 256; h1++) { | |
281 | 280 | struct rsvp_session *s; |
282 | 281 | |
283 | 282 | while ((s = sht[h1]) != NULL) { |
284 | 283 | sht[h1] = s->next; |
285 | 284 | |
286 | - for (h2=0; h2<=16; h2++) { | |
285 | + for (h2 = 0; h2 <= 16; h2++) { | |
287 | 286 | struct rsvp_filter *f; |
288 | 287 | |
289 | 288 | while ((f = s->ht[h2]) != NULL) { |
290 | 289 | |
... | ... | @@ -299,13 +298,13 @@ |
299 | 298 | |
300 | 299 | static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) |
301 | 300 | { |
302 | - struct rsvp_filter **fp, *f = (struct rsvp_filter*)arg; | |
303 | - unsigned h = f->handle; | |
301 | + struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg; | |
302 | + unsigned int h = f->handle; | |
304 | 303 | struct rsvp_session **sp; |
305 | 304 | struct rsvp_session *s = f->sess; |
306 | 305 | int i; |
307 | 306 | |
308 | - for (fp = &s->ht[(h>>8)&0xFF]; *fp; fp = &(*fp)->next) { | |
307 | + for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) { | |
309 | 308 | if (*fp == f) { |
310 | 309 | tcf_tree_lock(tp); |
311 | 310 | *fp = f->next; |
312 | 311 | |
... | ... | @@ -314,12 +313,12 @@ |
314 | 313 | |
315 | 314 | /* Strip tree */ |
316 | 315 | |
317 | - for (i=0; i<=16; i++) | |
316 | + for (i = 0; i <= 16; i++) | |
318 | 317 | if (s->ht[i]) |
319 | 318 | return 0; |
320 | 319 | |
321 | 320 | /* OK, session has no flows */ |
322 | - for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF]; | |
321 | + for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF]; | |
323 | 322 | *sp; sp = &(*sp)->next) { |
324 | 323 | if (*sp == s) { |
325 | 324 | tcf_tree_lock(tp); |
326 | 325 | |
... | ... | @@ -337,13 +336,14 @@ |
337 | 336 | return 0; |
338 | 337 | } |
339 | 338 | |
340 | -static unsigned gen_handle(struct tcf_proto *tp, unsigned salt) | |
339 | +static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt) | |
341 | 340 | { |
342 | 341 | struct rsvp_head *data = tp->root; |
343 | 342 | int i = 0xFFFF; |
344 | 343 | |
345 | 344 | while (i-- > 0) { |
346 | 345 | u32 h; |
346 | + | |
347 | 347 | if ((data->hgenerator += 0x10000) == 0) |
348 | 348 | data->hgenerator = 0x10000; |
349 | 349 | h = data->hgenerator|salt; |
350 | 350 | |
... | ... | @@ -355,10 +355,10 @@ |
355 | 355 | |
356 | 356 | static int tunnel_bts(struct rsvp_head *data) |
357 | 357 | { |
358 | - int n = data->tgenerator>>5; | |
359 | - u32 b = 1<<(data->tgenerator&0x1F); | |
358 | + int n = data->tgenerator >> 5; | |
359 | + u32 b = 1 << (data->tgenerator & 0x1F); | |
360 | 360 | |
361 | - if (data->tmap[n]&b) | |
361 | + if (data->tmap[n] & b) | |
362 | 362 | return 0; |
363 | 363 | data->tmap[n] |= b; |
364 | 364 | return 1; |
365 | 365 | |
... | ... | @@ -372,10 +372,10 @@ |
372 | 372 | |
373 | 373 | memset(tmap, 0, sizeof(tmap)); |
374 | 374 | |
375 | - for (h1=0; h1<256; h1++) { | |
375 | + for (h1 = 0; h1 < 256; h1++) { | |
376 | 376 | struct rsvp_session *s; |
377 | 377 | for (s = sht[h1]; s; s = s->next) { |
378 | - for (h2=0; h2<=16; h2++) { | |
378 | + for (h2 = 0; h2 <= 16; h2++) { | |
379 | 379 | struct rsvp_filter *f; |
380 | 380 | |
381 | 381 | for (f = s->ht[h2]; f; f = f->next) { |
... | ... | @@ -395,8 +395,8 @@ |
395 | 395 | { |
396 | 396 | int i, k; |
397 | 397 | |
398 | - for (k=0; k<2; k++) { | |
399 | - for (i=255; i>0; i--) { | |
398 | + for (k = 0; k < 2; k++) { | |
399 | + for (i = 255; i > 0; i--) { | |
400 | 400 | if (++data->tgenerator == 0) |
401 | 401 | data->tgenerator = 1; |
402 | 402 | if (tunnel_bts(data)) |
... | ... | @@ -428,7 +428,7 @@ |
428 | 428 | struct nlattr *opt = tca[TCA_OPTIONS-1]; |
429 | 429 | struct nlattr *tb[TCA_RSVP_MAX + 1]; |
430 | 430 | struct tcf_exts e; |
431 | - unsigned h1, h2; | |
431 | + unsigned int h1, h2; | |
432 | 432 | __be32 *dst; |
433 | 433 | int err; |
434 | 434 | |
... | ... | @@ -443,7 +443,8 @@ |
443 | 443 | if (err < 0) |
444 | 444 | return err; |
445 | 445 | |
446 | - if ((f = (struct rsvp_filter*)*arg) != NULL) { | |
446 | + f = (struct rsvp_filter *)*arg; | |
447 | + if (f) { | |
447 | 448 | /* Node exists: adjust only classid */ |
448 | 449 | |
449 | 450 | if (f->handle != handle && handle) |
... | ... | @@ -500,7 +501,7 @@ |
500 | 501 | goto errout; |
501 | 502 | } |
502 | 503 | |
503 | - for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) { | |
504 | + for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) { | |
504 | 505 | if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && |
505 | 506 | pinfo && pinfo->protocol == s->protocol && |
506 | 507 | memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 && |
... | ... | @@ -523,7 +524,7 @@ |
523 | 524 | tcf_exts_change(tp, &f->exts, &e); |
524 | 525 | |
525 | 526 | for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next) |
526 | - if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask) | |
527 | + if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask) | |
527 | 528 | break; |
528 | 529 | f->next = *fp; |
529 | 530 | wmb(); |
... | ... | @@ -567,7 +568,7 @@ |
567 | 568 | static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) |
568 | 569 | { |
569 | 570 | struct rsvp_head *head = tp->root; |
570 | - unsigned h, h1; | |
571 | + unsigned int h, h1; | |
571 | 572 | |
572 | 573 | if (arg->stop) |
573 | 574 | return; |
... | ... | @@ -598,7 +599,7 @@ |
598 | 599 | static int rsvp_dump(struct tcf_proto *tp, unsigned long fh, |
599 | 600 | struct sk_buff *skb, struct tcmsg *t) |
600 | 601 | { |
601 | - struct rsvp_filter *f = (struct rsvp_filter*)fh; | |
602 | + struct rsvp_filter *f = (struct rsvp_filter *)fh; | |
602 | 603 | struct rsvp_session *s; |
603 | 604 | unsigned char *b = skb_tail_pointer(skb); |
604 | 605 | struct nlattr *nest; |
... | ... | @@ -624,7 +625,7 @@ |
624 | 625 | NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo); |
625 | 626 | if (f->res.classid) |
626 | 627 | NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid); |
627 | - if (((f->handle>>8)&0xFF) != 16) | |
628 | + if (((f->handle >> 8) & 0xFF) != 16) | |
628 | 629 | NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src); |
629 | 630 | |
630 | 631 | if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0) |
net/sched/cls_tcindex.c
net/sched/cls_u32.c
... | ... | @@ -42,8 +42,7 @@ |
42 | 42 | #include <net/act_api.h> |
43 | 43 | #include <net/pkt_cls.h> |
44 | 44 | |
45 | -struct tc_u_knode | |
46 | -{ | |
45 | +struct tc_u_knode { | |
47 | 46 | struct tc_u_knode *next; |
48 | 47 | u32 handle; |
49 | 48 | struct tc_u_hnode *ht_up; |
50 | 49 | |
51 | 50 | |
... | ... | @@ -63,19 +62,17 @@ |
63 | 62 | struct tc_u32_sel sel; |
64 | 63 | }; |
65 | 64 | |
66 | -struct tc_u_hnode | |
67 | -{ | |
65 | +struct tc_u_hnode { | |
68 | 66 | struct tc_u_hnode *next; |
69 | 67 | u32 handle; |
70 | 68 | u32 prio; |
71 | 69 | struct tc_u_common *tp_c; |
72 | 70 | int refcnt; |
73 | - unsigned divisor; | |
71 | + unsigned int divisor; | |
74 | 72 | struct tc_u_knode *ht[1]; |
75 | 73 | }; |
76 | 74 | |
77 | -struct tc_u_common | |
78 | -{ | |
75 | +struct tc_u_common { | |
79 | 76 | struct tc_u_hnode *hlist; |
80 | 77 | struct Qdisc *q; |
81 | 78 | int refcnt; |
82 | 79 | |
... | ... | @@ -87,9 +84,11 @@ |
87 | 84 | .police = TCA_U32_POLICE |
88 | 85 | }; |
89 | 86 | |
90 | -static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift) | |
87 | +static inline unsigned int u32_hash_fold(__be32 key, | |
88 | + const struct tc_u32_sel *sel, | |
89 | + u8 fshift) | |
91 | 90 | { |
92 | - unsigned h = ntohl(key & sel->hmask)>>fshift; | |
91 | + unsigned int h = ntohl(key & sel->hmask) >> fshift; | |
93 | 92 | |
94 | 93 | return h; |
95 | 94 | } |
... | ... | @@ -101,7 +100,7 @@ |
101 | 100 | unsigned int off; |
102 | 101 | } stack[TC_U32_MAXDEPTH]; |
103 | 102 | |
104 | - struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; | |
103 | + struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root; | |
105 | 104 | unsigned int off = skb_network_offset(skb); |
106 | 105 | struct tc_u_knode *n; |
107 | 106 | int sdepth = 0; |
... | ... | @@ -120,7 +119,7 @@ |
120 | 119 | struct tc_u32_key *key = n->sel.keys; |
121 | 120 | |
122 | 121 | #ifdef CONFIG_CLS_U32_PERF |
123 | - n->pf->rcnt +=1; | |
122 | + n->pf->rcnt += 1; | |
124 | 123 | j = 0; |
125 | 124 | #endif |
126 | 125 | |
... | ... | @@ -133,7 +132,7 @@ |
133 | 132 | } |
134 | 133 | #endif |
135 | 134 | |
136 | - for (i = n->sel.nkeys; i>0; i--, key++) { | |
135 | + for (i = n->sel.nkeys; i > 0; i--, key++) { | |
137 | 136 | int toff = off + key->off + (off2 & key->offmask); |
138 | 137 | __be32 *data, _data; |
139 | 138 | |
140 | 139 | |
... | ... | @@ -148,13 +147,13 @@ |
148 | 147 | goto next_knode; |
149 | 148 | } |
150 | 149 | #ifdef CONFIG_CLS_U32_PERF |
151 | - n->pf->kcnts[j] +=1; | |
150 | + n->pf->kcnts[j] += 1; | |
152 | 151 | j++; |
153 | 152 | #endif |
154 | 153 | } |
155 | 154 | if (n->ht_down == NULL) { |
156 | 155 | check_terminal: |
157 | - if (n->sel.flags&TC_U32_TERMINAL) { | |
156 | + if (n->sel.flags & TC_U32_TERMINAL) { | |
158 | 157 | |
159 | 158 | *res = n->res; |
160 | 159 | #ifdef CONFIG_NET_CLS_IND |
... | ... | @@ -164,7 +163,7 @@ |
164 | 163 | } |
165 | 164 | #endif |
166 | 165 | #ifdef CONFIG_CLS_U32_PERF |
167 | - n->pf->rhit +=1; | |
166 | + n->pf->rhit += 1; | |
168 | 167 | #endif |
169 | 168 | r = tcf_exts_exec(skb, &n->exts, res); |
170 | 169 | if (r < 0) { |
171 | 170 | |
... | ... | @@ -197,10 +196,10 @@ |
197 | 196 | sel = ht->divisor & u32_hash_fold(*data, &n->sel, |
198 | 197 | n->fshift); |
199 | 198 | } |
200 | - if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) | |
199 | + if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT))) | |
201 | 200 | goto next_ht; |
202 | 201 | |
203 | - if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { | |
202 | + if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) { | |
204 | 203 | off2 = n->sel.off + 3; |
205 | 204 | if (n->sel.flags & TC_U32_VAROFFSET) { |
206 | 205 | __be16 *data, _data; |
... | ... | @@ -215,7 +214,7 @@ |
215 | 214 | } |
216 | 215 | off2 &= ~3; |
217 | 216 | } |
218 | - if (n->sel.flags&TC_U32_EAT) { | |
217 | + if (n->sel.flags & TC_U32_EAT) { | |
219 | 218 | off += off2; |
220 | 219 | off2 = 0; |
221 | 220 | } |
222 | 221 | |
... | ... | @@ -236,11 +235,11 @@ |
236 | 235 | |
237 | 236 | deadloop: |
238 | 237 | if (net_ratelimit()) |
239 | - printk(KERN_WARNING "cls_u32: dead loop\n"); | |
238 | + pr_warning("cls_u32: dead loop\n"); | |
240 | 239 | return -1; |
241 | 240 | } |
242 | 241 | |
243 | -static __inline__ struct tc_u_hnode * | |
242 | +static struct tc_u_hnode * | |
244 | 243 | u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) |
245 | 244 | { |
246 | 245 | struct tc_u_hnode *ht; |
247 | 246 | |
... | ... | @@ -252,10 +251,10 @@ |
252 | 251 | return ht; |
253 | 252 | } |
254 | 253 | |
255 | -static __inline__ struct tc_u_knode * | |
254 | +static struct tc_u_knode * | |
256 | 255 | u32_lookup_key(struct tc_u_hnode *ht, u32 handle) |
257 | 256 | { |
258 | - unsigned sel; | |
257 | + unsigned int sel; | |
259 | 258 | struct tc_u_knode *n = NULL; |
260 | 259 | |
261 | 260 | sel = TC_U32_HASH(handle); |
... | ... | @@ -300,7 +299,7 @@ |
300 | 299 | do { |
301 | 300 | if (++tp_c->hgenerator == 0x7FF) |
302 | 301 | tp_c->hgenerator = 1; |
303 | - } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20)); | |
302 | + } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20)); | |
304 | 303 | |
305 | 304 | return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0; |
306 | 305 | } |
307 | 306 | |
... | ... | @@ -378,9 +377,9 @@ |
378 | 377 | static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) |
379 | 378 | { |
380 | 379 | struct tc_u_knode *n; |
381 | - unsigned h; | |
380 | + unsigned int h; | |
382 | 381 | |
383 | - for (h=0; h<=ht->divisor; h++) { | |
382 | + for (h = 0; h <= ht->divisor; h++) { | |
384 | 383 | while ((n = ht->ht[h]) != NULL) { |
385 | 384 | ht->ht[h] = n->next; |
386 | 385 | |
387 | 386 | |
... | ... | @@ -446,13 +445,13 @@ |
446 | 445 | |
447 | 446 | static int u32_delete(struct tcf_proto *tp, unsigned long arg) |
448 | 447 | { |
449 | - struct tc_u_hnode *ht = (struct tc_u_hnode*)arg; | |
448 | + struct tc_u_hnode *ht = (struct tc_u_hnode *)arg; | |
450 | 449 | |
451 | 450 | if (ht == NULL) |
452 | 451 | return 0; |
453 | 452 | |
454 | 453 | if (TC_U32_KEY(ht->handle)) |
455 | - return u32_delete_key(tp, (struct tc_u_knode*)ht); | |
454 | + return u32_delete_key(tp, (struct tc_u_knode *)ht); | |
456 | 455 | |
457 | 456 | if (tp->root == ht) |
458 | 457 | return -EINVAL; |
459 | 458 | |
460 | 459 | |
... | ... | @@ -470,14 +469,14 @@ |
470 | 469 | static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) |
471 | 470 | { |
472 | 471 | struct tc_u_knode *n; |
473 | - unsigned i = 0x7FF; | |
472 | + unsigned int i = 0x7FF; | |
474 | 473 | |
475 | - for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next) | |
474 | + for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next) | |
476 | 475 | if (i < TC_U32_NODE(n->handle)) |
477 | 476 | i = TC_U32_NODE(n->handle); |
478 | 477 | i++; |
479 | 478 | |
480 | - return handle|(i>0xFFF ? 0xFFF : i); | |
479 | + return handle | (i > 0xFFF ? 0xFFF : i); | |
481 | 480 | } |
482 | 481 | |
483 | 482 | static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { |
... | ... | @@ -566,7 +565,8 @@ |
566 | 565 | if (err < 0) |
567 | 566 | return err; |
568 | 567 | |
569 | - if ((n = (struct tc_u_knode*)*arg) != NULL) { | |
568 | + n = (struct tc_u_knode *)*arg; | |
569 | + if (n) { | |
570 | 570 | if (TC_U32_KEY(n->handle) == 0) |
571 | 571 | return -EINVAL; |
572 | 572 | |
... | ... | @@ -574,7 +574,7 @@ |
574 | 574 | } |
575 | 575 | |
576 | 576 | if (tb[TCA_U32_DIVISOR]) { |
577 | - unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); | |
577 | + unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); | |
578 | 578 | |
579 | 579 | if (--divisor > 0x100) |
580 | 580 | return -EINVAL; |
... | ... | @@ -585,7 +585,7 @@ |
585 | 585 | if (handle == 0) |
586 | 586 | return -ENOMEM; |
587 | 587 | } |
588 | - ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL); | |
588 | + ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL); | |
589 | 589 | if (ht == NULL) |
590 | 590 | return -ENOBUFS; |
591 | 591 | ht->tp_c = tp_c; |
... | ... | @@ -683,7 +683,7 @@ |
683 | 683 | struct tc_u_common *tp_c = tp->data; |
684 | 684 | struct tc_u_hnode *ht; |
685 | 685 | struct tc_u_knode *n; |
686 | - unsigned h; | |
686 | + unsigned int h; | |
687 | 687 | |
688 | 688 | if (arg->stop) |
689 | 689 | return; |
... | ... | @@ -717,7 +717,7 @@ |
717 | 717 | static int u32_dump(struct tcf_proto *tp, unsigned long fh, |
718 | 718 | struct sk_buff *skb, struct tcmsg *t) |
719 | 719 | { |
720 | - struct tc_u_knode *n = (struct tc_u_knode*)fh; | |
720 | + struct tc_u_knode *n = (struct tc_u_knode *)fh; | |
721 | 721 | struct nlattr *nest; |
722 | 722 | |
723 | 723 | if (n == NULL) |
... | ... | @@ -730,8 +730,9 @@ |
730 | 730 | goto nla_put_failure; |
731 | 731 | |
732 | 732 | if (TC_U32_KEY(n->handle) == 0) { |
733 | - struct tc_u_hnode *ht = (struct tc_u_hnode*)fh; | |
734 | - u32 divisor = ht->divisor+1; | |
733 | + struct tc_u_hnode *ht = (struct tc_u_hnode *)fh; | |
734 | + u32 divisor = ht->divisor + 1; | |
735 | + | |
735 | 736 | NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor); |
736 | 737 | } else { |
737 | 738 | NLA_PUT(skb, TCA_U32_SEL, |
... | ... | @@ -755,7 +756,7 @@ |
755 | 756 | goto nla_put_failure; |
756 | 757 | |
757 | 758 | #ifdef CONFIG_NET_CLS_IND |
758 | - if(strlen(n->indev)) | |
759 | + if (strlen(n->indev)) | |
759 | 760 | NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev); |
760 | 761 | #endif |
761 | 762 | #ifdef CONFIG_CLS_U32_PERF |
net/sched/em_cmp.c
... | ... | @@ -33,40 +33,41 @@ |
33 | 33 | return 0; |
34 | 34 | |
35 | 35 | switch (cmp->align) { |
36 | - case TCF_EM_ALIGN_U8: | |
37 | - val = *ptr; | |
38 | - break; | |
36 | + case TCF_EM_ALIGN_U8: | |
37 | + val = *ptr; | |
38 | + break; | |
39 | 39 | |
40 | - case TCF_EM_ALIGN_U16: | |
41 | - val = get_unaligned_be16(ptr); | |
40 | + case TCF_EM_ALIGN_U16: | |
41 | + val = get_unaligned_be16(ptr); | |
42 | 42 | |
43 | - if (cmp_needs_transformation(cmp)) | |
44 | - val = be16_to_cpu(val); | |
45 | - break; | |
43 | + if (cmp_needs_transformation(cmp)) | |
44 | + val = be16_to_cpu(val); | |
45 | + break; | |
46 | 46 | |
47 | - case TCF_EM_ALIGN_U32: | |
48 | - /* Worth checking boundries? The branching seems | |
49 | - * to get worse. Visit again. */ | |
50 | - val = get_unaligned_be32(ptr); | |
47 | + case TCF_EM_ALIGN_U32: | |
48 | + /* Worth checking boundries? The branching seems | |
49 | + * to get worse. Visit again. | |
50 | + */ | |
51 | + val = get_unaligned_be32(ptr); | |
51 | 52 | |
52 | - if (cmp_needs_transformation(cmp)) | |
53 | - val = be32_to_cpu(val); | |
54 | - break; | |
53 | + if (cmp_needs_transformation(cmp)) | |
54 | + val = be32_to_cpu(val); | |
55 | + break; | |
55 | 56 | |
56 | - default: | |
57 | - return 0; | |
57 | + default: | |
58 | + return 0; | |
58 | 59 | } |
59 | 60 | |
60 | 61 | if (cmp->mask) |
61 | 62 | val &= cmp->mask; |
62 | 63 | |
63 | 64 | switch (cmp->opnd) { |
64 | - case TCF_EM_OPND_EQ: | |
65 | - return val == cmp->val; | |
66 | - case TCF_EM_OPND_LT: | |
67 | - return val < cmp->val; | |
68 | - case TCF_EM_OPND_GT: | |
69 | - return val > cmp->val; | |
65 | + case TCF_EM_OPND_EQ: | |
66 | + return val == cmp->val; | |
67 | + case TCF_EM_OPND_LT: | |
68 | + return val < cmp->val; | |
69 | + case TCF_EM_OPND_GT: | |
70 | + return val > cmp->val; | |
70 | 71 | } |
71 | 72 | |
72 | 73 | return 0; |
net/sched/em_meta.c
... | ... | @@ -73,21 +73,18 @@ |
73 | 73 | #include <net/pkt_cls.h> |
74 | 74 | #include <net/sock.h> |
75 | 75 | |
76 | -struct meta_obj | |
77 | -{ | |
76 | +struct meta_obj { | |
78 | 77 | unsigned long value; |
79 | 78 | unsigned int len; |
80 | 79 | }; |
81 | 80 | |
82 | -struct meta_value | |
83 | -{ | |
81 | +struct meta_value { | |
84 | 82 | struct tcf_meta_val hdr; |
85 | 83 | unsigned long val; |
86 | 84 | unsigned int len; |
87 | 85 | }; |
88 | 86 | |
89 | -struct meta_match | |
90 | -{ | |
87 | +struct meta_match { | |
91 | 88 | struct meta_value lvalue; |
92 | 89 | struct meta_value rvalue; |
93 | 90 | }; |
... | ... | @@ -483,8 +480,7 @@ |
483 | 480 | * Meta value collectors assignment table |
484 | 481 | **************************************************************************/ |
485 | 482 | |
486 | -struct meta_ops | |
487 | -{ | |
483 | +struct meta_ops { | |
488 | 484 | void (*get)(struct sk_buff *, struct tcf_pkt_info *, |
489 | 485 | struct meta_value *, struct meta_obj *, int *); |
490 | 486 | }; |
... | ... | @@ -494,7 +490,7 @@ |
494 | 490 | |
495 | 491 | /* Meta value operations table listing all meta value collectors and |
496 | 492 | * assigns them to a type and meta id. */ |
497 | -static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { | |
493 | +static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = { | |
498 | 494 | [TCF_META_TYPE_VAR] = { |
499 | 495 | [META_ID(DEV)] = META_FUNC(var_dev), |
500 | 496 | [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if), |
... | ... | @@ -550,7 +546,7 @@ |
550 | 546 | } |
551 | 547 | }; |
552 | 548 | |
553 | -static inline struct meta_ops * meta_ops(struct meta_value *val) | |
549 | +static inline struct meta_ops *meta_ops(struct meta_value *val) | |
554 | 550 | { |
555 | 551 | return &__meta_ops[meta_type(val)][meta_id(val)]; |
556 | 552 | } |
557 | 553 | |
... | ... | @@ -649,9 +645,8 @@ |
649 | 645 | { |
650 | 646 | if (v->len == sizeof(unsigned long)) |
651 | 647 | NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val); |
652 | - else if (v->len == sizeof(u32)) { | |
648 | + else if (v->len == sizeof(u32)) | |
653 | 649 | NLA_PUT_U32(skb, tlv, v->val); |
654 | - } | |
655 | 650 | |
656 | 651 | return 0; |
657 | 652 | |
... | ... | @@ -663,8 +658,7 @@ |
663 | 658 | * Type specific operations table |
664 | 659 | **************************************************************************/ |
665 | 660 | |
666 | -struct meta_type_ops | |
667 | -{ | |
661 | +struct meta_type_ops { | |
668 | 662 | void (*destroy)(struct meta_value *); |
669 | 663 | int (*compare)(struct meta_obj *, struct meta_obj *); |
670 | 664 | int (*change)(struct meta_value *, struct nlattr *); |
... | ... | @@ -672,7 +666,7 @@ |
672 | 666 | int (*dump)(struct sk_buff *, struct meta_value *, int); |
673 | 667 | }; |
674 | 668 | |
675 | -static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = { | |
669 | +static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = { | |
676 | 670 | [TCF_META_TYPE_VAR] = { |
677 | 671 | .destroy = meta_var_destroy, |
678 | 672 | .compare = meta_var_compare, |
... | ... | @@ -688,7 +682,7 @@ |
688 | 682 | } |
689 | 683 | }; |
690 | 684 | |
691 | -static inline struct meta_type_ops * meta_type_ops(struct meta_value *v) | |
685 | +static inline struct meta_type_ops *meta_type_ops(struct meta_value *v) | |
692 | 686 | { |
693 | 687 | return &__meta_type_ops[meta_type(v)]; |
694 | 688 | } |
... | ... | @@ -713,7 +707,7 @@ |
713 | 707 | return err; |
714 | 708 | |
715 | 709 | if (meta_type_ops(v)->apply_extras) |
716 | - meta_type_ops(v)->apply_extras(v, dst); | |
710 | + meta_type_ops(v)->apply_extras(v, dst); | |
717 | 711 | |
718 | 712 | return 0; |
719 | 713 | } |
... | ... | @@ -732,12 +726,12 @@ |
732 | 726 | r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value); |
733 | 727 | |
734 | 728 | switch (meta->lvalue.hdr.op) { |
735 | - case TCF_EM_OPND_EQ: | |
736 | - return !r; | |
737 | - case TCF_EM_OPND_LT: | |
738 | - return r < 0; | |
739 | - case TCF_EM_OPND_GT: | |
740 | - return r > 0; | |
729 | + case TCF_EM_OPND_EQ: | |
730 | + return !r; | |
731 | + case TCF_EM_OPND_LT: | |
732 | + return r < 0; | |
733 | + case TCF_EM_OPND_GT: | |
734 | + return r > 0; | |
741 | 735 | } |
742 | 736 | |
743 | 737 | return 0; |
... | ... | @@ -771,7 +765,7 @@ |
771 | 765 | |
772 | 766 | static inline int meta_is_supported(struct meta_value *val) |
773 | 767 | { |
774 | - return (!meta_id(val) || meta_ops(val)->get); | |
768 | + return !meta_id(val) || meta_ops(val)->get; | |
775 | 769 | } |
776 | 770 | |
777 | 771 | static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = { |
net/sched/em_nbyte.c
net/sched/em_text.c
net/sched/em_u32.c
net/sched/ematch.c
... | ... | @@ -93,7 +93,7 @@ |
93 | 93 | static LIST_HEAD(ematch_ops); |
94 | 94 | static DEFINE_RWLOCK(ematch_mod_lock); |
95 | 95 | |
96 | -static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind) | |
96 | +static struct tcf_ematch_ops *tcf_em_lookup(u16 kind) | |
97 | 97 | { |
98 | 98 | struct tcf_ematch_ops *e = NULL; |
99 | 99 | |
... | ... | @@ -163,8 +163,8 @@ |
163 | 163 | } |
164 | 164 | EXPORT_SYMBOL(tcf_em_unregister); |
165 | 165 | |
166 | -static inline struct tcf_ematch * tcf_em_get_match(struct tcf_ematch_tree *tree, | |
167 | - int index) | |
166 | +static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree, | |
167 | + int index) | |
168 | 168 | { |
169 | 169 | return &tree->matches[index]; |
170 | 170 | } |
... | ... | @@ -184,7 +184,8 @@ |
184 | 184 | |
185 | 185 | if (em_hdr->kind == TCF_EM_CONTAINER) { |
186 | 186 | /* Special ematch called "container", carries an index |
187 | - * referencing an external ematch sequence. */ | |
187 | + * referencing an external ematch sequence. | |
188 | + */ | |
188 | 189 | u32 ref; |
189 | 190 | |
190 | 191 | if (data_len < sizeof(ref)) |
... | ... | @@ -195,7 +196,8 @@ |
195 | 196 | goto errout; |
196 | 197 | |
197 | 198 | /* We do not allow backward jumps to avoid loops and jumps |
198 | - * to our own position are of course illegal. */ | |
199 | + * to our own position are of course illegal. | |
200 | + */ | |
199 | 201 | if (ref <= idx) |
200 | 202 | goto errout; |
201 | 203 | |
... | ... | @@ -208,7 +210,8 @@ |
208 | 210 | * which automatically releases the reference again, therefore |
209 | 211 | * the module MUST not be given back under any circumstances |
210 | 212 | * here. Be aware, the destroy function assumes that the |
211 | - * module is held if the ops field is non zero. */ | |
213 | + * module is held if the ops field is non zero. | |
214 | + */ | |
212 | 215 | em->ops = tcf_em_lookup(em_hdr->kind); |
213 | 216 | |
214 | 217 | if (em->ops == NULL) { |
... | ... | @@ -221,7 +224,8 @@ |
221 | 224 | if (em->ops) { |
222 | 225 | /* We dropped the RTNL mutex in order to |
223 | 226 | * perform the module load. Tell the caller |
224 | - * to replay the request. */ | |
227 | + * to replay the request. | |
228 | + */ | |
225 | 229 | module_put(em->ops->owner); |
226 | 230 | err = -EAGAIN; |
227 | 231 | } |
... | ... | @@ -230,7 +234,8 @@ |
230 | 234 | } |
231 | 235 | |
232 | 236 | /* ematch module provides expected length of data, so we |
233 | - * can do a basic sanity check. */ | |
237 | + * can do a basic sanity check. | |
238 | + */ | |
234 | 239 | if (em->ops->datalen && data_len < em->ops->datalen) |
235 | 240 | goto errout; |
236 | 241 | |
... | ... | @@ -246,7 +251,8 @@ |
246 | 251 | * TCF_EM_SIMPLE may be specified stating that the |
247 | 252 | * data only consists of a u32 integer and the module |
248 | 253 | * does not expected a memory reference but rather |
249 | - * the value carried. */ | |
254 | + * the value carried. | |
255 | + */ | |
250 | 256 | if (em_hdr->flags & TCF_EM_SIMPLE) { |
251 | 257 | if (data_len < sizeof(u32)) |
252 | 258 | goto errout; |
... | ... | @@ -334,7 +340,8 @@ |
334 | 340 | * The array of rt attributes is parsed in the order as they are |
335 | 341 | * provided, their type must be incremental from 1 to n. Even |
336 | 342 | * if it does not serve any real purpose, a failure of sticking |
337 | - * to this policy will result in parsing failure. */ | |
343 | + * to this policy will result in parsing failure. | |
344 | + */ | |
338 | 345 | for (idx = 0; nla_ok(rt_match, list_len); idx++) { |
339 | 346 | err = -EINVAL; |
340 | 347 | |
... | ... | @@ -359,7 +366,8 @@ |
359 | 366 | /* Check if the number of matches provided by userspace actually |
360 | 367 | * complies with the array of matches. The number was used for |
361 | 368 | * the validation of references and a mismatch could lead to |
362 | - * undefined references during the matching process. */ | |
369 | + * undefined references during the matching process. | |
370 | + */ | |
363 | 371 | if (idx != tree_hdr->nmatches) { |
364 | 372 | err = -EINVAL; |
365 | 373 | goto errout_abort; |
... | ... | @@ -449,7 +457,7 @@ |
449 | 457 | .flags = em->flags |
450 | 458 | }; |
451 | 459 | |
452 | - NLA_PUT(skb, i+1, sizeof(em_hdr), &em_hdr); | |
460 | + NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr); | |
453 | 461 | |
454 | 462 | if (em->ops && em->ops->dump) { |
455 | 463 | if (em->ops->dump(skb, em) < 0) |
... | ... | @@ -478,6 +486,7 @@ |
478 | 486 | struct tcf_pkt_info *info) |
479 | 487 | { |
480 | 488 | int r = em->ops->match(skb, em, info); |
489 | + | |
481 | 490 | return tcf_em_is_inverted(em) ? !r : r; |
482 | 491 | } |
483 | 492 | |
... | ... | @@ -527,8 +536,8 @@ |
527 | 536 | |
528 | 537 | stack_overflow: |
529 | 538 | if (net_ratelimit()) |
530 | - printk(KERN_WARNING "tc ematch: local stack overflow," | |
531 | - " increase NET_EMATCH_STACK\n"); | |
539 | + pr_warning("tc ematch: local stack overflow," | |
540 | + " increase NET_EMATCH_STACK\n"); | |
532 | 541 | return -1; |
533 | 542 | } |
534 | 543 | EXPORT_SYMBOL(__tcf_em_tree_match); |
net/sched/sch_api.c
... | ... | @@ -187,7 +187,7 @@ |
187 | 187 | int err = -ENOENT; |
188 | 188 | |
189 | 189 | write_lock(&qdisc_mod_lock); |
190 | - for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next) | |
190 | + for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) | |
191 | 191 | if (q == qops) |
192 | 192 | break; |
193 | 193 | if (q) { |
... | ... | @@ -321,7 +321,9 @@ |
321 | 321 | if (!tab || --tab->refcnt) |
322 | 322 | return; |
323 | 323 | |
324 | - for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) { | |
324 | + for (rtabp = &qdisc_rtab_list; | |
325 | + (rtab = *rtabp) != NULL; | |
326 | + rtabp = &rtab->next) { | |
325 | 327 | if (rtab == tab) { |
326 | 328 | *rtabp = rtab->next; |
327 | 329 | kfree(rtab); |
... | ... | @@ -459,9 +461,8 @@ |
459 | 461 | void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc) |
460 | 462 | { |
461 | 463 | if (!(qdisc->flags & TCQ_F_WARN_NONWC)) { |
462 | - printk(KERN_WARNING | |
463 | - "%s: %s qdisc %X: is non-work-conserving?\n", | |
464 | - txt, qdisc->ops->id, qdisc->handle >> 16); | |
464 | + pr_warn("%s: %s qdisc %X: is non-work-conserving?\n", | |
465 | + txt, qdisc->ops->id, qdisc->handle >> 16); | |
465 | 466 | qdisc->flags |= TCQ_F_WARN_NONWC; |
466 | 467 | } |
467 | 468 | } |
... | ... | @@ -625,7 +626,7 @@ |
625 | 626 | autohandle = TC_H_MAKE(0x80000000U, 0); |
626 | 627 | } while (qdisc_lookup(dev, autohandle) && --i > 0); |
627 | 628 | |
628 | - return i>0 ? autohandle : 0; | |
629 | + return i > 0 ? autohandle : 0; | |
629 | 630 | } |
630 | 631 | |
631 | 632 | void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) |
... | ... | @@ -915,9 +916,8 @@ |
915 | 916 | return 0; |
916 | 917 | } |
917 | 918 | |
918 | -struct check_loop_arg | |
919 | -{ | |
920 | - struct qdisc_walker w; | |
919 | +struct check_loop_arg { | |
920 | + struct qdisc_walker w; | |
921 | 921 | struct Qdisc *p; |
922 | 922 | int depth; |
923 | 923 | }; |
... | ... | @@ -970,7 +970,8 @@ |
970 | 970 | struct Qdisc *p = NULL; |
971 | 971 | int err; |
972 | 972 | |
973 | - if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) | |
973 | + dev = __dev_get_by_index(net, tcm->tcm_ifindex); | |
974 | + if (!dev) | |
974 | 975 | return -ENODEV; |
975 | 976 | |
976 | 977 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
977 | 978 | |
... | ... | @@ -980,12 +981,12 @@ |
980 | 981 | if (clid) { |
981 | 982 | if (clid != TC_H_ROOT) { |
982 | 983 | if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { |
983 | - if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) | |
984 | + p = qdisc_lookup(dev, TC_H_MAJ(clid)); | |
985 | + if (!p) | |
984 | 986 | return -ENOENT; |
985 | 987 | q = qdisc_leaf(p, clid); |
986 | - } else { /* ingress */ | |
987 | - if (dev_ingress_queue(dev)) | |
988 | - q = dev_ingress_queue(dev)->qdisc_sleeping; | |
988 | + } else if (dev_ingress_queue(dev)) { | |
989 | + q = dev_ingress_queue(dev)->qdisc_sleeping; | |
989 | 990 | } |
990 | 991 | } else { |
991 | 992 | q = dev->qdisc; |
... | ... | @@ -996,7 +997,8 @@ |
996 | 997 | if (tcm->tcm_handle && q->handle != tcm->tcm_handle) |
997 | 998 | return -EINVAL; |
998 | 999 | } else { |
999 | - if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL) | |
1000 | + q = qdisc_lookup(dev, tcm->tcm_handle); | |
1001 | + if (!q) | |
1000 | 1002 | return -ENOENT; |
1001 | 1003 | } |
1002 | 1004 | |
... | ... | @@ -1008,7 +1010,8 @@ |
1008 | 1010 | return -EINVAL; |
1009 | 1011 | if (q->handle == 0) |
1010 | 1012 | return -ENOENT; |
1011 | - if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0) | |
1013 | + err = qdisc_graft(dev, p, skb, n, clid, NULL, q); | |
1014 | + if (err != 0) | |
1012 | 1015 | return err; |
1013 | 1016 | } else { |
1014 | 1017 | qdisc_notify(net, skb, n, clid, NULL, q); |
... | ... | @@ -1017,7 +1020,7 @@ |
1017 | 1020 | } |
1018 | 1021 | |
1019 | 1022 | /* |
1020 | - Create/change qdisc. | |
1023 | + * Create/change qdisc. | |
1021 | 1024 | */ |
1022 | 1025 | |
1023 | 1026 | static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) |
... | ... | @@ -1036,7 +1039,8 @@ |
1036 | 1039 | clid = tcm->tcm_parent; |
1037 | 1040 | q = p = NULL; |
1038 | 1041 | |
1039 | - if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) | |
1042 | + dev = __dev_get_by_index(net, tcm->tcm_ifindex); | |
1043 | + if (!dev) | |
1040 | 1044 | return -ENODEV; |
1041 | 1045 | |
1042 | 1046 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
1043 | 1047 | |
... | ... | @@ -1046,12 +1050,12 @@ |
1046 | 1050 | if (clid) { |
1047 | 1051 | if (clid != TC_H_ROOT) { |
1048 | 1052 | if (clid != TC_H_INGRESS) { |
1049 | - if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL) | |
1053 | + p = qdisc_lookup(dev, TC_H_MAJ(clid)); | |
1054 | + if (!p) | |
1050 | 1055 | return -ENOENT; |
1051 | 1056 | q = qdisc_leaf(p, clid); |
1052 | - } else { /* ingress */ | |
1053 | - if (dev_ingress_queue_create(dev)) | |
1054 | - q = dev_ingress_queue(dev)->qdisc_sleeping; | |
1057 | + } else if (dev_ingress_queue_create(dev)) { | |
1058 | + q = dev_ingress_queue(dev)->qdisc_sleeping; | |
1055 | 1059 | } |
1056 | 1060 | } else { |
1057 | 1061 | q = dev->qdisc; |
1058 | 1062 | |
1059 | 1063 | |
... | ... | @@ -1063,13 +1067,14 @@ |
1063 | 1067 | |
1064 | 1068 | if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) { |
1065 | 1069 | if (tcm->tcm_handle) { |
1066 | - if (q && !(n->nlmsg_flags&NLM_F_REPLACE)) | |
1070 | + if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) | |
1067 | 1071 | return -EEXIST; |
1068 | 1072 | if (TC_H_MIN(tcm->tcm_handle)) |
1069 | 1073 | return -EINVAL; |
1070 | - if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL) | |
1074 | + q = qdisc_lookup(dev, tcm->tcm_handle); | |
1075 | + if (!q) | |
1071 | 1076 | goto create_n_graft; |
1072 | - if (n->nlmsg_flags&NLM_F_EXCL) | |
1077 | + if (n->nlmsg_flags & NLM_F_EXCL) | |
1073 | 1078 | return -EEXIST; |
1074 | 1079 | if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) |
1075 | 1080 | return -EINVAL; |
... | ... | @@ -1079,7 +1084,7 @@ |
1079 | 1084 | atomic_inc(&q->refcnt); |
1080 | 1085 | goto graft; |
1081 | 1086 | } else { |
1082 | - if (q == NULL) | |
1087 | + if (!q) | |
1083 | 1088 | goto create_n_graft; |
1084 | 1089 | |
1085 | 1090 | /* This magic test requires explanation. |
... | ... | @@ -1101,9 +1106,9 @@ |
1101 | 1106 | * For now we select create/graft, if |
1102 | 1107 | * user gave KIND, which does not match existing. |
1103 | 1108 | */ |
1104 | - if ((n->nlmsg_flags&NLM_F_CREATE) && | |
1105 | - (n->nlmsg_flags&NLM_F_REPLACE) && | |
1106 | - ((n->nlmsg_flags&NLM_F_EXCL) || | |
1109 | + if ((n->nlmsg_flags & NLM_F_CREATE) && | |
1110 | + (n->nlmsg_flags & NLM_F_REPLACE) && | |
1111 | + ((n->nlmsg_flags & NLM_F_EXCL) || | |
1107 | 1112 | (tca[TCA_KIND] && |
1108 | 1113 | nla_strcmp(tca[TCA_KIND], q->ops->id)))) |
1109 | 1114 | goto create_n_graft; |
... | ... | @@ -1118,7 +1123,7 @@ |
1118 | 1123 | /* Change qdisc parameters */ |
1119 | 1124 | if (q == NULL) |
1120 | 1125 | return -ENOENT; |
1121 | - if (n->nlmsg_flags&NLM_F_EXCL) | |
1126 | + if (n->nlmsg_flags & NLM_F_EXCL) | |
1122 | 1127 | return -EEXIST; |
1123 | 1128 | if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) |
1124 | 1129 | return -EINVAL; |
... | ... | @@ -1128,7 +1133,7 @@ |
1128 | 1133 | return err; |
1129 | 1134 | |
1130 | 1135 | create_n_graft: |
1131 | - if (!(n->nlmsg_flags&NLM_F_CREATE)) | |
1136 | + if (!(n->nlmsg_flags & NLM_F_CREATE)) | |
1132 | 1137 | return -ENOENT; |
1133 | 1138 | if (clid == TC_H_INGRESS) { |
1134 | 1139 | if (dev_ingress_queue(dev)) |
1135 | 1140 | |
1136 | 1141 | |
... | ... | @@ -1234,16 +1239,19 @@ |
1234 | 1239 | return -ENOBUFS; |
1235 | 1240 | |
1236 | 1241 | if (old && !tc_qdisc_dump_ignore(old)) { |
1237 | - if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) | |
1242 | + if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, | |
1243 | + 0, RTM_DELQDISC) < 0) | |
1238 | 1244 | goto err_out; |
1239 | 1245 | } |
1240 | 1246 | if (new && !tc_qdisc_dump_ignore(new)) { |
1241 | - if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) | |
1247 | + if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, | |
1248 | + old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) | |
1242 | 1249 | goto err_out; |
1243 | 1250 | } |
1244 | 1251 | |
1245 | 1252 | if (skb->len) |
1246 | - return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | |
1253 | + return rtnetlink_send(skb, net, pid, RTNLGRP_TC, | |
1254 | + n->nlmsg_flags & NLM_F_ECHO); | |
1247 | 1255 | |
1248 | 1256 | err_out: |
1249 | 1257 | kfree_skb(skb); |
... | ... | @@ -1275,7 +1283,7 @@ |
1275 | 1283 | q_idx++; |
1276 | 1284 | continue; |
1277 | 1285 | } |
1278 | - if (!tc_qdisc_dump_ignore(q) && | |
1286 | + if (!tc_qdisc_dump_ignore(q) && | |
1279 | 1287 | tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, |
1280 | 1288 | cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) |
1281 | 1289 | goto done; |
... | ... | @@ -1356,7 +1364,8 @@ |
1356 | 1364 | u32 qid = TC_H_MAJ(clid); |
1357 | 1365 | int err; |
1358 | 1366 | |
1359 | - if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) | |
1367 | + dev = __dev_get_by_index(net, tcm->tcm_ifindex); | |
1368 | + if (!dev) | |
1360 | 1369 | return -ENODEV; |
1361 | 1370 | |
1362 | 1371 | err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); |
... | ... | @@ -1391,9 +1400,9 @@ |
1391 | 1400 | qid = dev->qdisc->handle; |
1392 | 1401 | |
1393 | 1402 | /* Now qid is genuine qdisc handle consistent |
1394 | - both with parent and child. | |
1395 | - | |
1396 | - TC_H_MAJ(pid) still may be unspecified, complete it now. | |
1403 | + * both with parent and child. | |
1404 | + * | |
1405 | + * TC_H_MAJ(pid) still may be unspecified, complete it now. | |
1397 | 1406 | */ |
1398 | 1407 | if (pid) |
1399 | 1408 | pid = TC_H_MAKE(qid, pid); |
... | ... | @@ -1403,7 +1412,8 @@ |
1403 | 1412 | } |
1404 | 1413 | |
1405 | 1414 | /* OK. Locate qdisc */ |
1406 | - if ((q = qdisc_lookup(dev, qid)) == NULL) | |
1415 | + q = qdisc_lookup(dev, qid); | |
1416 | + if (!q) | |
1407 | 1417 | return -ENOENT; |
1408 | 1418 | |
1409 | 1419 | /* An check that it supports classes */ |
1410 | 1420 | |
... | ... | @@ -1423,13 +1433,14 @@ |
1423 | 1433 | |
1424 | 1434 | if (cl == 0) { |
1425 | 1435 | err = -ENOENT; |
1426 | - if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE)) | |
1436 | + if (n->nlmsg_type != RTM_NEWTCLASS || | |
1437 | + !(n->nlmsg_flags & NLM_F_CREATE)) | |
1427 | 1438 | goto out; |
1428 | 1439 | } else { |
1429 | 1440 | switch (n->nlmsg_type) { |
1430 | 1441 | case RTM_NEWTCLASS: |
1431 | 1442 | err = -EEXIST; |
1432 | - if (n->nlmsg_flags&NLM_F_EXCL) | |
1443 | + if (n->nlmsg_flags & NLM_F_EXCL) | |
1433 | 1444 | goto out; |
1434 | 1445 | break; |
1435 | 1446 | case RTM_DELTCLASS: |
1436 | 1447 | |
... | ... | @@ -1521,14 +1532,14 @@ |
1521 | 1532 | return -EINVAL; |
1522 | 1533 | } |
1523 | 1534 | |
1524 | - return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); | |
1535 | + return rtnetlink_send(skb, net, pid, RTNLGRP_TC, | |
1536 | + n->nlmsg_flags & NLM_F_ECHO); | |
1525 | 1537 | } |
1526 | 1538 | |
1527 | -struct qdisc_dump_args | |
1528 | -{ | |
1529 | - struct qdisc_walker w; | |
1530 | - struct sk_buff *skb; | |
1531 | - struct netlink_callback *cb; | |
1539 | +struct qdisc_dump_args { | |
1540 | + struct qdisc_walker w; | |
1541 | + struct sk_buff *skb; | |
1542 | + struct netlink_callback *cb; | |
1532 | 1543 | }; |
1533 | 1544 | |
1534 | 1545 | static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg) |
... | ... | @@ -1590,7 +1601,7 @@ |
1590 | 1601 | |
1591 | 1602 | static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) |
1592 | 1603 | { |
1593 | - struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh); | |
1604 | + struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh); | |
1594 | 1605 | struct net *net = sock_net(skb->sk); |
1595 | 1606 | struct netdev_queue *dev_queue; |
1596 | 1607 | struct net_device *dev; |
... | ... | @@ -1598,7 +1609,8 @@ |
1598 | 1609 | |
1599 | 1610 | if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) |
1600 | 1611 | return 0; |
1601 | - if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL) | |
1612 | + dev = dev_get_by_index(net, tcm->tcm_ifindex); | |
1613 | + if (!dev) | |
1602 | 1614 | return 0; |
1603 | 1615 | |
1604 | 1616 | s_t = cb->args[0]; |
1605 | 1617 | |
1606 | 1618 | |
... | ... | @@ -1621,19 +1633,22 @@ |
1621 | 1633 | } |
1622 | 1634 | |
1623 | 1635 | /* Main classifier routine: scans classifier chain attached |
1624 | - to this qdisc, (optionally) tests for protocol and asks | |
1625 | - specific classifiers. | |
1636 | + * to this qdisc, (optionally) tests for protocol and asks | |
1637 | + * specific classifiers. | |
1626 | 1638 | */ |
1627 | 1639 | int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, |
1628 | 1640 | struct tcf_result *res) |
1629 | 1641 | { |
1630 | 1642 | __be16 protocol = skb->protocol; |
1631 | - int err = 0; | |
1643 | + int err; | |
1632 | 1644 | |
1633 | 1645 | for (; tp; tp = tp->next) { |
1634 | - if ((tp->protocol == protocol || | |
1635 | - tp->protocol == htons(ETH_P_ALL)) && | |
1636 | - (err = tp->classify(skb, tp, res)) >= 0) { | |
1646 | + if (tp->protocol != protocol && | |
1647 | + tp->protocol != htons(ETH_P_ALL)) | |
1648 | + continue; | |
1649 | + err = tp->classify(skb, tp, res); | |
1650 | + | |
1651 | + if (err >= 0) { | |
1637 | 1652 | #ifdef CONFIG_NET_CLS_ACT |
1638 | 1653 | if (err != TC_ACT_RECLASSIFY && skb->tc_verd) |
1639 | 1654 | skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0); |
1640 | 1655 | |
... | ... | @@ -1664,11 +1679,11 @@ |
1664 | 1679 | |
1665 | 1680 | if (verd++ >= MAX_REC_LOOP) { |
1666 | 1681 | if (net_ratelimit()) |
1667 | - printk(KERN_NOTICE | |
1668 | - "%s: packet reclassify loop" | |
1682 | + pr_notice("%s: packet reclassify loop" | |
1669 | 1683 | " rule prio %u protocol %02x\n", |
1670 | - tp->q->ops->id, | |
1671 | - tp->prio & 0xffff, ntohs(tp->protocol)); | |
1684 | + tp->q->ops->id, | |
1685 | + tp->prio & 0xffff, | |
1686 | + ntohs(tp->protocol)); | |
1672 | 1687 | return TC_ACT_SHOT; |
1673 | 1688 | } |
1674 | 1689 | skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd); |
... | ... | @@ -1761,7 +1776,7 @@ |
1761 | 1776 | |
1762 | 1777 | err = register_pernet_subsys(&psched_net_ops); |
1763 | 1778 | if (err) { |
1764 | - printk(KERN_ERR "pktsched_init: " | |
1779 | + pr_err("pktsched_init: " | |
1765 | 1780 | "cannot initialize per netns operations\n"); |
1766 | 1781 | return err; |
1767 | 1782 | } |
net/sched/sch_atm.c
... | ... | @@ -319,7 +319,7 @@ |
319 | 319 | * creation), and one for the reference held when calling delete. |
320 | 320 | */ |
321 | 321 | if (flow->ref < 2) { |
322 | - printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref); | |
322 | + pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref); | |
323 | 323 | return -EINVAL; |
324 | 324 | } |
325 | 325 | if (flow->ref > 2) |
326 | 326 | |
327 | 327 | |
... | ... | @@ -384,12 +384,12 @@ |
384 | 384 | } |
385 | 385 | } |
386 | 386 | flow = NULL; |
387 | - done: | |
388 | - ; | |
387 | +done: | |
388 | + ; | |
389 | 389 | } |
390 | - if (!flow) | |
390 | + if (!flow) { | |
391 | 391 | flow = &p->link; |
392 | - else { | |
392 | + } else { | |
393 | 393 | if (flow->vcc) |
394 | 394 | ATM_SKB(skb)->atm_options = flow->vcc->atm_options; |
395 | 395 | /*@@@ looks good ... but it's not supposed to work :-) */ |
... | ... | @@ -576,8 +576,7 @@ |
576 | 576 | |
577 | 577 | list_for_each_entry_safe(flow, tmp, &p->flows, list) { |
578 | 578 | if (flow->ref > 1) |
579 | - printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow, | |
580 | - flow->ref); | |
579 | + pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref); | |
581 | 580 | atm_tc_put(sch, (unsigned long)flow); |
582 | 581 | } |
583 | 582 | tasklet_kill(&p->task); |
584 | 583 | |
... | ... | @@ -616,9 +615,8 @@ |
616 | 615 | } |
617 | 616 | if (flow->excess) |
618 | 617 | NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid); |
619 | - else { | |
618 | + else | |
620 | 619 | NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0); |
621 | - } | |
622 | 620 | |
623 | 621 | nla_nest_end(skb, nest); |
624 | 622 | return skb->len; |
net/sched/sch_cbq.c
... | ... | @@ -72,8 +72,7 @@ |
72 | 72 | struct cbq_sched_data; |
73 | 73 | |
74 | 74 | |
75 | -struct cbq_class | |
76 | -{ | |
75 | +struct cbq_class { | |
77 | 76 | struct Qdisc_class_common common; |
78 | 77 | struct cbq_class *next_alive; /* next class with backlog in this priority band */ |
79 | 78 | |
80 | 79 | |
81 | 80 | |
82 | 81 | |
... | ... | @@ -139,19 +138,18 @@ |
139 | 138 | int refcnt; |
140 | 139 | int filters; |
141 | 140 | |
142 | - struct cbq_class *defaults[TC_PRIO_MAX+1]; | |
141 | + struct cbq_class *defaults[TC_PRIO_MAX + 1]; | |
143 | 142 | }; |
144 | 143 | |
145 | -struct cbq_sched_data | |
146 | -{ | |
144 | +struct cbq_sched_data { | |
147 | 145 | struct Qdisc_class_hash clhash; /* Hash table of all classes */ |
148 | - int nclasses[TC_CBQ_MAXPRIO+1]; | |
149 | - unsigned quanta[TC_CBQ_MAXPRIO+1]; | |
146 | + int nclasses[TC_CBQ_MAXPRIO + 1]; | |
147 | + unsigned int quanta[TC_CBQ_MAXPRIO + 1]; | |
150 | 148 | |
151 | 149 | struct cbq_class link; |
152 | 150 | |
153 | - unsigned activemask; | |
154 | - struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes | |
151 | + unsigned int activemask; | |
152 | + struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes | |
155 | 153 | with backlog */ |
156 | 154 | |
157 | 155 | #ifdef CONFIG_NET_CLS_ACT |
... | ... | @@ -162,7 +160,7 @@ |
162 | 160 | int tx_len; |
163 | 161 | psched_time_t now; /* Cached timestamp */ |
164 | 162 | psched_time_t now_rt; /* Cached real time */ |
165 | - unsigned pmask; | |
163 | + unsigned int pmask; | |
166 | 164 | |
167 | 165 | struct hrtimer delay_timer; |
168 | 166 | struct qdisc_watchdog watchdog; /* Watchdog timer, |
169 | 167 | |
... | ... | @@ -175,9 +173,9 @@ |
175 | 173 | }; |
176 | 174 | |
177 | 175 | |
178 | -#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len) | |
176 | +#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len) | |
179 | 177 | |
180 | -static __inline__ struct cbq_class * | |
178 | +static inline struct cbq_class * | |
181 | 179 | cbq_class_lookup(struct cbq_sched_data *q, u32 classid) |
182 | 180 | { |
183 | 181 | struct Qdisc_class_common *clc; |
184 | 182 | |
185 | 183 | |
186 | 184 | |
... | ... | @@ -193,25 +191,27 @@ |
193 | 191 | static struct cbq_class * |
194 | 192 | cbq_reclassify(struct sk_buff *skb, struct cbq_class *this) |
195 | 193 | { |
196 | - struct cbq_class *cl, *new; | |
194 | + struct cbq_class *cl; | |
197 | 195 | |
198 | - for (cl = this->tparent; cl; cl = cl->tparent) | |
199 | - if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this) | |
200 | - return new; | |
196 | + for (cl = this->tparent; cl; cl = cl->tparent) { | |
197 | + struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT]; | |
201 | 198 | |
199 | + if (new != NULL && new != this) | |
200 | + return new; | |
201 | + } | |
202 | 202 | return NULL; |
203 | 203 | } |
204 | 204 | |
205 | 205 | #endif |
206 | 206 | |
207 | 207 | /* Classify packet. The procedure is pretty complicated, but |
208 | - it allows us to combine link sharing and priority scheduling | |
209 | - transparently. | |
210 | - | |
211 | - Namely, you can put link sharing rules (f.e. route based) at root of CBQ, | |
212 | - so that it resolves to split nodes. Then packets are classified | |
213 | - by logical priority, or a more specific classifier may be attached | |
214 | - to the split node. | |
208 | + * it allows us to combine link sharing and priority scheduling | |
209 | + * transparently. | |
210 | + * | |
211 | + * Namely, you can put link sharing rules (f.e. route based) at root of CBQ, | |
212 | + * so that it resolves to split nodes. Then packets are classified | |
213 | + * by logical priority, or a more specific classifier may be attached | |
214 | + * to the split node. | |
215 | 215 | */ |
216 | 216 | |
217 | 217 | static struct cbq_class * |
... | ... | @@ -227,7 +227,7 @@ |
227 | 227 | /* |
228 | 228 | * Step 1. If skb->priority points to one of our classes, use it. |
229 | 229 | */ |
230 | - if (TC_H_MAJ(prio^sch->handle) == 0 && | |
230 | + if (TC_H_MAJ(prio ^ sch->handle) == 0 && | |
231 | 231 | (cl = cbq_class_lookup(q, prio)) != NULL) |
232 | 232 | return cl; |
233 | 233 | |
234 | 234 | |
... | ... | @@ -243,10 +243,11 @@ |
243 | 243 | (result = tc_classify_compat(skb, head->filter_list, &res)) < 0) |
244 | 244 | goto fallback; |
245 | 245 | |
246 | - if ((cl = (void*)res.class) == NULL) { | |
246 | + cl = (void *)res.class; | |
247 | + if (!cl) { | |
247 | 248 | if (TC_H_MAJ(res.classid)) |
248 | 249 | cl = cbq_class_lookup(q, res.classid); |
249 | - else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL) | |
250 | + else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) | |
250 | 251 | cl = defmap[TC_PRIO_BESTEFFORT]; |
251 | 252 | |
252 | 253 | if (cl == NULL || cl->level >= head->level) |
... | ... | @@ -282,7 +283,7 @@ |
282 | 283 | * Step 4. No success... |
283 | 284 | */ |
284 | 285 | if (TC_H_MAJ(prio) == 0 && |
285 | - !(cl = head->defaults[prio&TC_PRIO_MAX]) && | |
286 | + !(cl = head->defaults[prio & TC_PRIO_MAX]) && | |
286 | 287 | !(cl = head->defaults[TC_PRIO_BESTEFFORT])) |
287 | 288 | return head; |
288 | 289 | |
289 | 290 | |
... | ... | @@ -290,12 +291,12 @@ |
290 | 291 | } |
291 | 292 | |
292 | 293 | /* |
293 | - A packet has just been enqueued on the empty class. | |
294 | - cbq_activate_class adds it to the tail of active class list | |
295 | - of its priority band. | |
294 | + * A packet has just been enqueued on the empty class. | |
295 | + * cbq_activate_class adds it to the tail of active class list | |
296 | + * of its priority band. | |
296 | 297 | */ |
297 | 298 | |
298 | -static __inline__ void cbq_activate_class(struct cbq_class *cl) | |
299 | +static inline void cbq_activate_class(struct cbq_class *cl) | |
299 | 300 | { |
300 | 301 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
301 | 302 | int prio = cl->cpriority; |
... | ... | @@ -314,9 +315,9 @@ |
314 | 315 | } |
315 | 316 | |
316 | 317 | /* |
317 | - Unlink class from active chain. | |
318 | - Note that this same procedure is done directly in cbq_dequeue* | |
319 | - during round-robin procedure. | |
318 | + * Unlink class from active chain. | |
319 | + * Note that this same procedure is done directly in cbq_dequeue* | |
320 | + * during round-robin procedure. | |
320 | 321 | */ |
321 | 322 | |
322 | 323 | static void cbq_deactivate_class(struct cbq_class *this) |
... | ... | @@ -350,7 +351,7 @@ |
350 | 351 | { |
351 | 352 | int toplevel = q->toplevel; |
352 | 353 | |
353 | - if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) { | |
354 | + if (toplevel > cl->level && !(cl->q->flags & TCQ_F_THROTTLED)) { | |
354 | 355 | psched_time_t now; |
355 | 356 | psched_tdiff_t incr; |
356 | 357 | |
... | ... | @@ -363,7 +364,7 @@ |
363 | 364 | q->toplevel = cl->level; |
364 | 365 | return; |
365 | 366 | } |
366 | - } while ((cl=cl->borrow) != NULL && toplevel > cl->level); | |
367 | + } while ((cl = cl->borrow) != NULL && toplevel > cl->level); | |
367 | 368 | } |
368 | 369 | } |
369 | 370 | |
... | ... | @@ -418,11 +419,11 @@ |
418 | 419 | delay += cl->offtime; |
419 | 420 | |
420 | 421 | /* |
421 | - Class goes to sleep, so that it will have no | |
422 | - chance to work avgidle. Let's forgive it 8) | |
423 | - | |
424 | - BTW cbq-2.0 has a crap in this | |
425 | - place, apparently they forgot to shift it by cl->ewma_log. | |
422 | + * Class goes to sleep, so that it will have no | |
423 | + * chance to work avgidle. Let's forgive it 8) | |
424 | + * | |
425 | + * BTW cbq-2.0 has a crap in this | |
426 | + * place, apparently they forgot to shift it by cl->ewma_log. | |
426 | 427 | */ |
427 | 428 | if (cl->avgidle < 0) |
428 | 429 | delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log); |
... | ... | @@ -439,8 +440,8 @@ |
439 | 440 | q->wd_expires = delay; |
440 | 441 | |
441 | 442 | /* Dirty work! We must schedule wakeups based on |
442 | - real available rate, rather than leaf rate, | |
443 | - which may be tiny (even zero). | |
443 | + * real available rate, rather than leaf rate, | |
444 | + * which may be tiny (even zero). | |
444 | 445 | */ |
445 | 446 | if (q->toplevel == TC_CBQ_MAXLEVEL) { |
446 | 447 | struct cbq_class *b; |
... | ... | @@ -460,7 +461,7 @@ |
460 | 461 | } |
461 | 462 | |
462 | 463 | /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when |
463 | - they go overlimit | |
464 | + * they go overlimit | |
464 | 465 | */ |
465 | 466 | |
466 | 467 | static void cbq_ovl_rclassic(struct cbq_class *cl) |
... | ... | @@ -595,7 +596,7 @@ |
595 | 596 | struct Qdisc *sch = q->watchdog.qdisc; |
596 | 597 | psched_time_t now; |
597 | 598 | psched_tdiff_t delay = 0; |
598 | - unsigned pmask; | |
599 | + unsigned int pmask; | |
599 | 600 | |
600 | 601 | now = psched_get_time(); |
601 | 602 | |
602 | 603 | |
... | ... | @@ -665,15 +666,15 @@ |
665 | 666 | #endif |
666 | 667 | |
667 | 668 | /* |
668 | - It is mission critical procedure. | |
669 | + * It is mission critical procedure. | |
670 | + * | |
671 | + * We "regenerate" toplevel cutoff, if transmitting class | |
672 | + * has backlog and it is not regulated. It is not part of | |
673 | + * original CBQ description, but looks more reasonable. | |
674 | + * Probably, it is wrong. This question needs further investigation. | |
675 | + */ | |
669 | 676 | |
670 | - We "regenerate" toplevel cutoff, if transmitting class | |
671 | - has backlog and it is not regulated. It is not part of | |
672 | - original CBQ description, but looks more reasonable. | |
673 | - Probably, it is wrong. This question needs further investigation. | |
674 | -*/ | |
675 | - | |
676 | -static __inline__ void | |
677 | +static inline void | |
677 | 678 | cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, |
678 | 679 | struct cbq_class *borrowed) |
679 | 680 | { |
... | ... | @@ -684,7 +685,7 @@ |
684 | 685 | q->toplevel = borrowed->level; |
685 | 686 | return; |
686 | 687 | } |
687 | - } while ((borrowed=borrowed->borrow) != NULL); | |
688 | + } while ((borrowed = borrowed->borrow) != NULL); | |
688 | 689 | } |
689 | 690 | #if 0 |
690 | 691 | /* It is not necessary now. Uncommenting it |
... | ... | @@ -712,10 +713,10 @@ |
712 | 713 | cl->bstats.bytes += len; |
713 | 714 | |
714 | 715 | /* |
715 | - (now - last) is total time between packet right edges. | |
716 | - (last_pktlen/rate) is "virtual" busy time, so that | |
717 | - | |
718 | - idle = (now - last) - last_pktlen/rate | |
716 | + * (now - last) is total time between packet right edges. | |
717 | + * (last_pktlen/rate) is "virtual" busy time, so that | |
718 | + * | |
719 | + * idle = (now - last) - last_pktlen/rate | |
719 | 720 | */ |
720 | 721 | |
721 | 722 | idle = q->now - cl->last; |
... | ... | @@ -725,9 +726,9 @@ |
725 | 726 | idle -= L2T(cl, len); |
726 | 727 | |
727 | 728 | /* true_avgidle := (1-W)*true_avgidle + W*idle, |
728 | - where W=2^{-ewma_log}. But cl->avgidle is scaled: | |
729 | - cl->avgidle == true_avgidle/W, | |
730 | - hence: | |
729 | + * where W=2^{-ewma_log}. But cl->avgidle is scaled: | |
730 | + * cl->avgidle == true_avgidle/W, | |
731 | + * hence: | |
731 | 732 | */ |
732 | 733 | avgidle += idle - (avgidle>>cl->ewma_log); |
733 | 734 | } |
734 | 735 | |
... | ... | @@ -741,22 +742,22 @@ |
741 | 742 | cl->avgidle = avgidle; |
742 | 743 | |
743 | 744 | /* Calculate expected time, when this class |
744 | - will be allowed to send. | |
745 | - It will occur, when: | |
746 | - (1-W)*true_avgidle + W*delay = 0, i.e. | |
747 | - idle = (1/W - 1)*(-true_avgidle) | |
748 | - or | |
749 | - idle = (1 - W)*(-cl->avgidle); | |
745 | + * will be allowed to send. | |
746 | + * It will occur, when: | |
747 | + * (1-W)*true_avgidle + W*delay = 0, i.e. | |
748 | + * idle = (1/W - 1)*(-true_avgidle) | |
749 | + * or | |
750 | + * idle = (1 - W)*(-cl->avgidle); | |
750 | 751 | */ |
751 | 752 | idle = (-avgidle) - ((-avgidle) >> cl->ewma_log); |
752 | 753 | |
753 | 754 | /* |
754 | - That is not all. | |
755 | - To maintain the rate allocated to the class, | |
756 | - we add to undertime virtual clock, | |
757 | - necessary to complete transmitted packet. | |
758 | - (len/phys_bandwidth has been already passed | |
759 | - to the moment of cbq_update) | |
755 | + * That is not all. | |
756 | + * To maintain the rate allocated to the class, | |
757 | + * we add to undertime virtual clock, | |
758 | + * necessary to complete transmitted packet. | |
759 | + * (len/phys_bandwidth has been already passed | |
760 | + * to the moment of cbq_update) | |
760 | 761 | */ |
761 | 762 | |
762 | 763 | idle -= L2T(&q->link, len); |
... | ... | @@ -778,7 +779,7 @@ |
778 | 779 | cbq_update_toplevel(q, this, q->tx_borrowed); |
779 | 780 | } |
780 | 781 | |
781 | -static __inline__ struct cbq_class * | |
782 | +static inline struct cbq_class * | |
782 | 783 | cbq_under_limit(struct cbq_class *cl) |
783 | 784 | { |
784 | 785 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
785 | 786 | |
... | ... | @@ -794,16 +795,17 @@ |
794 | 795 | |
795 | 796 | do { |
796 | 797 | /* It is very suspicious place. Now overlimit |
797 | - action is generated for not bounded classes | |
798 | - only if link is completely congested. | |
799 | - Though it is in agree with ancestor-only paradigm, | |
800 | - it looks very stupid. Particularly, | |
801 | - it means that this chunk of code will either | |
802 | - never be called or result in strong amplification | |
803 | - of burstiness. Dangerous, silly, and, however, | |
804 | - no another solution exists. | |
798 | + * action is generated for not bounded classes | |
799 | + * only if link is completely congested. | |
800 | + * Though it is in agree with ancestor-only paradigm, | |
801 | + * it looks very stupid. Particularly, | |
802 | + * it means that this chunk of code will either | |
803 | + * never be called or result in strong amplification | |
804 | + * of burstiness. Dangerous, silly, and, however, | |
805 | + * no another solution exists. | |
805 | 806 | */ |
806 | - if ((cl = cl->borrow) == NULL) { | |
807 | + cl = cl->borrow; | |
808 | + if (!cl) { | |
807 | 809 | this_cl->qstats.overlimits++; |
808 | 810 | this_cl->overlimit(this_cl); |
809 | 811 | return NULL; |
... | ... | @@ -816,7 +818,7 @@ |
816 | 818 | return cl; |
817 | 819 | } |
818 | 820 | |
819 | -static __inline__ struct sk_buff * | |
821 | +static inline struct sk_buff * | |
820 | 822 | cbq_dequeue_prio(struct Qdisc *sch, int prio) |
821 | 823 | { |
822 | 824 | struct cbq_sched_data *q = qdisc_priv(sch); |
... | ... | @@ -840,7 +842,7 @@ |
840 | 842 | |
841 | 843 | if (cl->deficit <= 0) { |
842 | 844 | /* Class exhausted its allotment per |
843 | - this round. Switch to the next one. | |
845 | + * this round. Switch to the next one. | |
844 | 846 | */ |
845 | 847 | deficit = 1; |
846 | 848 | cl->deficit += cl->quantum; |
... | ... | @@ -850,8 +852,8 @@ |
850 | 852 | skb = cl->q->dequeue(cl->q); |
851 | 853 | |
852 | 854 | /* Class did not give us any skb :-( |
853 | - It could occur even if cl->q->q.qlen != 0 | |
854 | - f.e. if cl->q == "tbf" | |
855 | + * It could occur even if cl->q->q.qlen != 0 | |
856 | + * f.e. if cl->q == "tbf" | |
855 | 857 | */ |
856 | 858 | if (skb == NULL) |
857 | 859 | goto skip_class; |
... | ... | @@ -880,7 +882,7 @@ |
880 | 882 | skip_class: |
881 | 883 | if (cl->q->q.qlen == 0 || prio != cl->cpriority) { |
882 | 884 | /* Class is empty or penalized. |
883 | - Unlink it from active chain. | |
885 | + * Unlink it from active chain. | |
884 | 886 | */ |
885 | 887 | cl_prev->next_alive = cl->next_alive; |
886 | 888 | cl->next_alive = NULL; |
887 | 889 | |
888 | 890 | |
... | ... | @@ -919,14 +921,14 @@ |
919 | 921 | return NULL; |
920 | 922 | } |
921 | 923 | |
922 | -static __inline__ struct sk_buff * | |
924 | +static inline struct sk_buff * | |
923 | 925 | cbq_dequeue_1(struct Qdisc *sch) |
924 | 926 | { |
925 | 927 | struct cbq_sched_data *q = qdisc_priv(sch); |
926 | 928 | struct sk_buff *skb; |
927 | - unsigned activemask; | |
929 | + unsigned int activemask; | |
928 | 930 | |
929 | - activemask = q->activemask&0xFF; | |
931 | + activemask = q->activemask & 0xFF; | |
930 | 932 | while (activemask) { |
931 | 933 | int prio = ffz(~activemask); |
932 | 934 | activemask &= ~(1<<prio); |
... | ... | @@ -951,11 +953,11 @@ |
951 | 953 | if (q->tx_class) { |
952 | 954 | psched_tdiff_t incr2; |
953 | 955 | /* Time integrator. We calculate EOS time |
954 | - by adding expected packet transmission time. | |
955 | - If real time is greater, we warp artificial clock, | |
956 | - so that: | |
957 | - | |
958 | - cbq_time = max(real_time, work); | |
956 | + * by adding expected packet transmission time. | |
957 | + * If real time is greater, we warp artificial clock, | |
958 | + * so that: | |
959 | + * | |
960 | + * cbq_time = max(real_time, work); | |
959 | 961 | */ |
960 | 962 | incr2 = L2T(&q->link, q->tx_len); |
961 | 963 | q->now += incr2; |
962 | 964 | |
... | ... | @@ -977,23 +979,23 @@ |
977 | 979 | } |
978 | 980 | |
979 | 981 | /* All the classes are overlimit. |
982 | + * | |
983 | + * It is possible, if: | |
984 | + * | |
985 | + * 1. Scheduler is empty. | |
986 | + * 2. Toplevel cutoff inhibited borrowing. | |
987 | + * 3. Root class is overlimit. | |
988 | + * | |
989 | + * Reset 2d and 3d conditions and retry. | |
990 | + * | |
991 | + * Note, that NS and cbq-2.0 are buggy, peeking | |
992 | + * an arbitrary class is appropriate for ancestor-only | |
993 | + * sharing, but not for toplevel algorithm. | |
994 | + * | |
995 | + * Our version is better, but slower, because it requires | |
996 | + * two passes, but it is unavoidable with top-level sharing. | |
997 | + */ | |
980 | 998 | |
981 | - It is possible, if: | |
982 | - | |
983 | - 1. Scheduler is empty. | |
984 | - 2. Toplevel cutoff inhibited borrowing. | |
985 | - 3. Root class is overlimit. | |
986 | - | |
987 | - Reset 2d and 3d conditions and retry. | |
988 | - | |
989 | - Note, that NS and cbq-2.0 are buggy, peeking | |
990 | - an arbitrary class is appropriate for ancestor-only | |
991 | - sharing, but not for toplevel algorithm. | |
992 | - | |
993 | - Our version is better, but slower, because it requires | |
994 | - two passes, but it is unavoidable with top-level sharing. | |
995 | - */ | |
996 | - | |
997 | 999 | if (q->toplevel == TC_CBQ_MAXLEVEL && |
998 | 1000 | q->link.undertime == PSCHED_PASTPERFECT) |
999 | 1001 | break; |
... | ... | @@ -1003,7 +1005,8 @@ |
1003 | 1005 | } |
1004 | 1006 | |
1005 | 1007 | /* No packets in scheduler or nobody wants to give them to us :-( |
1006 | - Sigh... start watchdog timer in the last case. */ | |
1008 | + * Sigh... start watchdog timer in the last case. | |
1009 | + */ | |
1007 | 1010 | |
1008 | 1011 | if (sch->q.qlen) { |
1009 | 1012 | sch->qstats.overlimits++; |
1010 | 1013 | |
... | ... | @@ -1025,13 +1028,14 @@ |
1025 | 1028 | int level = 0; |
1026 | 1029 | struct cbq_class *cl; |
1027 | 1030 | |
1028 | - if ((cl = this->children) != NULL) { | |
1031 | + cl = this->children; | |
1032 | + if (cl) { | |
1029 | 1033 | do { |
1030 | 1034 | if (cl->level > level) |
1031 | 1035 | level = cl->level; |
1032 | 1036 | } while ((cl = cl->sibling) != this->children); |
1033 | 1037 | } |
1034 | - this->level = level+1; | |
1038 | + this->level = level + 1; | |
1035 | 1039 | } while ((this = this->tparent) != NULL); |
1036 | 1040 | } |
1037 | 1041 | |
1038 | 1042 | |
... | ... | @@ -1047,14 +1051,15 @@ |
1047 | 1051 | for (h = 0; h < q->clhash.hashsize; h++) { |
1048 | 1052 | hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) { |
1049 | 1053 | /* BUGGGG... Beware! This expression suffer of |
1050 | - arithmetic overflows! | |
1054 | + * arithmetic overflows! | |
1051 | 1055 | */ |
1052 | 1056 | if (cl->priority == prio) { |
1053 | 1057 | cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ |
1054 | 1058 | q->quanta[prio]; |
1055 | 1059 | } |
1056 | 1060 | if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) { |
1057 | - printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum); | |
1061 | + pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n", | |
1062 | + cl->common.classid, cl->quantum); | |
1058 | 1063 | cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; |
1059 | 1064 | } |
1060 | 1065 | } |
1061 | 1066 | |
1062 | 1067 | |
... | ... | @@ -1065,18 +1070,18 @@ |
1065 | 1070 | { |
1066 | 1071 | struct cbq_sched_data *q = qdisc_priv(cl->qdisc); |
1067 | 1072 | struct cbq_class *split = cl->split; |
1068 | - unsigned h; | |
1073 | + unsigned int h; | |
1069 | 1074 | int i; |
1070 | 1075 | |
1071 | 1076 | if (split == NULL) |
1072 | 1077 | return; |
1073 | 1078 | |
1074 | - for (i=0; i<=TC_PRIO_MAX; i++) { | |
1075 | - if (split->defaults[i] == cl && !(cl->defmap&(1<<i))) | |
1079 | + for (i = 0; i <= TC_PRIO_MAX; i++) { | |
1080 | + if (split->defaults[i] == cl && !(cl->defmap & (1<<i))) | |
1076 | 1081 | split->defaults[i] = NULL; |
1077 | 1082 | } |
1078 | 1083 | |
1079 | - for (i=0; i<=TC_PRIO_MAX; i++) { | |
1084 | + for (i = 0; i <= TC_PRIO_MAX; i++) { | |
1080 | 1085 | int level = split->level; |
1081 | 1086 | |
1082 | 1087 | if (split->defaults[i]) |
... | ... | @@ -1089,7 +1094,7 @@ |
1089 | 1094 | hlist_for_each_entry(c, n, &q->clhash.hash[h], |
1090 | 1095 | common.hnode) { |
1091 | 1096 | if (c->split == split && c->level < level && |
1092 | - c->defmap&(1<<i)) { | |
1097 | + c->defmap & (1<<i)) { | |
1093 | 1098 | split->defaults[i] = c; |
1094 | 1099 | level = c->level; |
1095 | 1100 | } |
... | ... | @@ -1103,7 +1108,8 @@ |
1103 | 1108 | struct cbq_class *split = NULL; |
1104 | 1109 | |
1105 | 1110 | if (splitid == 0) { |
1106 | - if ((split = cl->split) == NULL) | |
1111 | + split = cl->split; | |
1112 | + if (!split) | |
1107 | 1113 | return; |
1108 | 1114 | splitid = split->common.classid; |
1109 | 1115 | } |
1110 | 1116 | |
... | ... | @@ -1121,9 +1127,9 @@ |
1121 | 1127 | cl->defmap = 0; |
1122 | 1128 | cbq_sync_defmap(cl); |
1123 | 1129 | cl->split = split; |
1124 | - cl->defmap = def&mask; | |
1130 | + cl->defmap = def & mask; | |
1125 | 1131 | } else |
1126 | - cl->defmap = (cl->defmap&~mask)|(def&mask); | |
1132 | + cl->defmap = (cl->defmap & ~mask) | (def & mask); | |
1127 | 1133 | |
1128 | 1134 | cbq_sync_defmap(cl); |
1129 | 1135 | } |
... | ... | @@ -1136,7 +1142,7 @@ |
1136 | 1142 | qdisc_class_hash_remove(&q->clhash, &this->common); |
1137 | 1143 | |
1138 | 1144 | if (this->tparent) { |
1139 | - clp=&this->sibling; | |
1145 | + clp = &this->sibling; | |
1140 | 1146 | cl = *clp; |
1141 | 1147 | do { |
1142 | 1148 | if (cl == this) { |
... | ... | @@ -1175,7 +1181,7 @@ |
1175 | 1181 | } |
1176 | 1182 | } |
1177 | 1183 | |
1178 | -static unsigned int cbq_drop(struct Qdisc* sch) | |
1184 | +static unsigned int cbq_drop(struct Qdisc *sch) | |
1179 | 1185 | { |
1180 | 1186 | struct cbq_sched_data *q = qdisc_priv(sch); |
1181 | 1187 | struct cbq_class *cl, *cl_head; |
... | ... | @@ -1183,7 +1189,8 @@ |
1183 | 1189 | unsigned int len; |
1184 | 1190 | |
1185 | 1191 | for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) { |
1186 | - if ((cl_head = q->active[prio]) == NULL) | |
1192 | + cl_head = q->active[prio]; | |
1193 | + if (!cl_head) | |
1187 | 1194 | continue; |
1188 | 1195 | |
1189 | 1196 | cl = cl_head; |
1190 | 1197 | |
... | ... | @@ -1200,13 +1207,13 @@ |
1200 | 1207 | } |
1201 | 1208 | |
1202 | 1209 | static void |
1203 | -cbq_reset(struct Qdisc* sch) | |
1210 | +cbq_reset(struct Qdisc *sch) | |
1204 | 1211 | { |
1205 | 1212 | struct cbq_sched_data *q = qdisc_priv(sch); |
1206 | 1213 | struct cbq_class *cl; |
1207 | 1214 | struct hlist_node *n; |
1208 | 1215 | int prio; |
1209 | - unsigned h; | |
1216 | + unsigned int h; | |
1210 | 1217 | |
1211 | 1218 | q->activemask = 0; |
1212 | 1219 | q->pmask = 0; |
1213 | 1220 | |
1214 | 1221 | |
1215 | 1222 | |
1216 | 1223 | |
1217 | 1224 | |
... | ... | @@ -1238,21 +1245,21 @@ |
1238 | 1245 | |
1239 | 1246 | static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss) |
1240 | 1247 | { |
1241 | - if (lss->change&TCF_CBQ_LSS_FLAGS) { | |
1242 | - cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; | |
1243 | - cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; | |
1248 | + if (lss->change & TCF_CBQ_LSS_FLAGS) { | |
1249 | + cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent; | |
1250 | + cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent; | |
1244 | 1251 | } |
1245 | - if (lss->change&TCF_CBQ_LSS_EWMA) | |
1252 | + if (lss->change & TCF_CBQ_LSS_EWMA) | |
1246 | 1253 | cl->ewma_log = lss->ewma_log; |
1247 | - if (lss->change&TCF_CBQ_LSS_AVPKT) | |
1254 | + if (lss->change & TCF_CBQ_LSS_AVPKT) | |
1248 | 1255 | cl->avpkt = lss->avpkt; |
1249 | - if (lss->change&TCF_CBQ_LSS_MINIDLE) | |
1256 | + if (lss->change & TCF_CBQ_LSS_MINIDLE) | |
1250 | 1257 | cl->minidle = -(long)lss->minidle; |
1251 | - if (lss->change&TCF_CBQ_LSS_MAXIDLE) { | |
1258 | + if (lss->change & TCF_CBQ_LSS_MAXIDLE) { | |
1252 | 1259 | cl->maxidle = lss->maxidle; |
1253 | 1260 | cl->avgidle = lss->maxidle; |
1254 | 1261 | } |
1255 | - if (lss->change&TCF_CBQ_LSS_OFFTIME) | |
1262 | + if (lss->change & TCF_CBQ_LSS_OFFTIME) | |
1256 | 1263 | cl->offtime = lss->offtime; |
1257 | 1264 | return 0; |
1258 | 1265 | } |
1259 | 1266 | |
... | ... | @@ -1280,10 +1287,10 @@ |
1280 | 1287 | if (wrr->weight) |
1281 | 1288 | cl->weight = wrr->weight; |
1282 | 1289 | if (wrr->priority) { |
1283 | - cl->priority = wrr->priority-1; | |
1290 | + cl->priority = wrr->priority - 1; | |
1284 | 1291 | cl->cpriority = cl->priority; |
1285 | 1292 | if (cl->priority >= cl->priority2) |
1286 | - cl->priority2 = TC_CBQ_MAXPRIO-1; | |
1293 | + cl->priority2 = TC_CBQ_MAXPRIO - 1; | |
1287 | 1294 | } |
1288 | 1295 | |
1289 | 1296 | cbq_addprio(q, cl); |
1290 | 1297 | |
... | ... | @@ -1300,10 +1307,10 @@ |
1300 | 1307 | cl->overlimit = cbq_ovl_delay; |
1301 | 1308 | break; |
1302 | 1309 | case TC_CBQ_OVL_LOWPRIO: |
1303 | - if (ovl->priority2-1 >= TC_CBQ_MAXPRIO || | |
1304 | - ovl->priority2-1 <= cl->priority) | |
1310 | + if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO || | |
1311 | + ovl->priority2 - 1 <= cl->priority) | |
1305 | 1312 | return -EINVAL; |
1306 | - cl->priority2 = ovl->priority2-1; | |
1313 | + cl->priority2 = ovl->priority2 - 1; | |
1307 | 1314 | cl->overlimit = cbq_ovl_lowprio; |
1308 | 1315 | break; |
1309 | 1316 | case TC_CBQ_OVL_DROP: |
... | ... | @@ -1382,9 +1389,9 @@ |
1382 | 1389 | if (!q->link.q) |
1383 | 1390 | q->link.q = &noop_qdisc; |
1384 | 1391 | |
1385 | - q->link.priority = TC_CBQ_MAXPRIO-1; | |
1386 | - q->link.priority2 = TC_CBQ_MAXPRIO-1; | |
1387 | - q->link.cpriority = TC_CBQ_MAXPRIO-1; | |
1392 | + q->link.priority = TC_CBQ_MAXPRIO - 1; | |
1393 | + q->link.priority2 = TC_CBQ_MAXPRIO - 1; | |
1394 | + q->link.cpriority = TC_CBQ_MAXPRIO - 1; | |
1388 | 1395 | q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; |
1389 | 1396 | q->link.overlimit = cbq_ovl_classic; |
1390 | 1397 | q->link.allot = psched_mtu(qdisc_dev(sch)); |
... | ... | @@ -1415,7 +1422,7 @@ |
1415 | 1422 | return err; |
1416 | 1423 | } |
1417 | 1424 | |
1418 | -static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) | |
1425 | +static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) | |
1419 | 1426 | { |
1420 | 1427 | unsigned char *b = skb_tail_pointer(skb); |
1421 | 1428 | |
... | ... | @@ -1427,7 +1434,7 @@ |
1427 | 1434 | return -1; |
1428 | 1435 | } |
1429 | 1436 | |
1430 | -static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) | |
1437 | +static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) | |
1431 | 1438 | { |
1432 | 1439 | unsigned char *b = skb_tail_pointer(skb); |
1433 | 1440 | struct tc_cbq_lssopt opt; |
1434 | 1441 | |
... | ... | @@ -1452,15 +1459,15 @@ |
1452 | 1459 | return -1; |
1453 | 1460 | } |
1454 | 1461 | |
1455 | -static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) | |
1462 | +static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) | |
1456 | 1463 | { |
1457 | 1464 | unsigned char *b = skb_tail_pointer(skb); |
1458 | 1465 | struct tc_cbq_wrropt opt; |
1459 | 1466 | |
1460 | 1467 | opt.flags = 0; |
1461 | 1468 | opt.allot = cl->allot; |
1462 | - opt.priority = cl->priority+1; | |
1463 | - opt.cpriority = cl->cpriority+1; | |
1469 | + opt.priority = cl->priority + 1; | |
1470 | + opt.cpriority = cl->cpriority + 1; | |
1464 | 1471 | opt.weight = cl->weight; |
1465 | 1472 | NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); |
1466 | 1473 | return skb->len; |
1467 | 1474 | |
... | ... | @@ -1470,13 +1477,13 @@ |
1470 | 1477 | return -1; |
1471 | 1478 | } |
1472 | 1479 | |
1473 | -static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) | |
1480 | +static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) | |
1474 | 1481 | { |
1475 | 1482 | unsigned char *b = skb_tail_pointer(skb); |
1476 | 1483 | struct tc_cbq_ovl opt; |
1477 | 1484 | |
1478 | 1485 | opt.strategy = cl->ovl_strategy; |
1479 | - opt.priority2 = cl->priority2+1; | |
1486 | + opt.priority2 = cl->priority2 + 1; | |
1480 | 1487 | opt.pad = 0; |
1481 | 1488 | opt.penalty = cl->penalty; |
1482 | 1489 | NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); |
... | ... | @@ -1487,7 +1494,7 @@ |
1487 | 1494 | return -1; |
1488 | 1495 | } |
1489 | 1496 | |
1490 | -static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) | |
1497 | +static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) | |
1491 | 1498 | { |
1492 | 1499 | unsigned char *b = skb_tail_pointer(skb); |
1493 | 1500 | struct tc_cbq_fopt opt; |
... | ... | @@ -1506,7 +1513,7 @@ |
1506 | 1513 | } |
1507 | 1514 | |
1508 | 1515 | #ifdef CONFIG_NET_CLS_ACT |
1509 | -static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) | |
1516 | +static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) | |
1510 | 1517 | { |
1511 | 1518 | unsigned char *b = skb_tail_pointer(skb); |
1512 | 1519 | struct tc_cbq_police opt; |
... | ... | @@ -1570,7 +1577,7 @@ |
1570 | 1577 | cbq_dump_class(struct Qdisc *sch, unsigned long arg, |
1571 | 1578 | struct sk_buff *skb, struct tcmsg *tcm) |
1572 | 1579 | { |
1573 | - struct cbq_class *cl = (struct cbq_class*)arg; | |
1580 | + struct cbq_class *cl = (struct cbq_class *)arg; | |
1574 | 1581 | struct nlattr *nest; |
1575 | 1582 | |
1576 | 1583 | if (cl->tparent) |
... | ... | @@ -1598,7 +1605,7 @@ |
1598 | 1605 | struct gnet_dump *d) |
1599 | 1606 | { |
1600 | 1607 | struct cbq_sched_data *q = qdisc_priv(sch); |
1601 | - struct cbq_class *cl = (struct cbq_class*)arg; | |
1608 | + struct cbq_class *cl = (struct cbq_class *)arg; | |
1602 | 1609 | |
1603 | 1610 | cl->qstats.qlen = cl->q->q.qlen; |
1604 | 1611 | cl->xstats.avgidle = cl->avgidle; |
... | ... | @@ -1618,7 +1625,7 @@ |
1618 | 1625 | static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, |
1619 | 1626 | struct Qdisc **old) |
1620 | 1627 | { |
1621 | - struct cbq_class *cl = (struct cbq_class*)arg; | |
1628 | + struct cbq_class *cl = (struct cbq_class *)arg; | |
1622 | 1629 | |
1623 | 1630 | if (new == NULL) { |
1624 | 1631 | new = qdisc_create_dflt(sch->dev_queue, |
1625 | 1632 | |
... | ... | @@ -1641,10 +1648,9 @@ |
1641 | 1648 | return 0; |
1642 | 1649 | } |
1643 | 1650 | |
1644 | -static struct Qdisc * | |
1645 | -cbq_leaf(struct Qdisc *sch, unsigned long arg) | |
1651 | +static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg) | |
1646 | 1652 | { |
1647 | - struct cbq_class *cl = (struct cbq_class*)arg; | |
1653 | + struct cbq_class *cl = (struct cbq_class *)arg; | |
1648 | 1654 | |
1649 | 1655 | return cl->q; |
1650 | 1656 | } |
1651 | 1657 | |
... | ... | @@ -1683,13 +1689,12 @@ |
1683 | 1689 | kfree(cl); |
1684 | 1690 | } |
1685 | 1691 | |
1686 | -static void | |
1687 | -cbq_destroy(struct Qdisc* sch) | |
1692 | +static void cbq_destroy(struct Qdisc *sch) | |
1688 | 1693 | { |
1689 | 1694 | struct cbq_sched_data *q = qdisc_priv(sch); |
1690 | 1695 | struct hlist_node *n, *next; |
1691 | 1696 | struct cbq_class *cl; |
1692 | - unsigned h; | |
1697 | + unsigned int h; | |
1693 | 1698 | |
1694 | 1699 | #ifdef CONFIG_NET_CLS_ACT |
1695 | 1700 | q->rx_class = NULL; |
... | ... | @@ -1713,7 +1718,7 @@ |
1713 | 1718 | |
1714 | 1719 | static void cbq_put(struct Qdisc *sch, unsigned long arg) |
1715 | 1720 | { |
1716 | - struct cbq_class *cl = (struct cbq_class*)arg; | |
1721 | + struct cbq_class *cl = (struct cbq_class *)arg; | |
1717 | 1722 | |
1718 | 1723 | if (--cl->refcnt == 0) { |
1719 | 1724 | #ifdef CONFIG_NET_CLS_ACT |
... | ... | @@ -1736,7 +1741,7 @@ |
1736 | 1741 | { |
1737 | 1742 | int err; |
1738 | 1743 | struct cbq_sched_data *q = qdisc_priv(sch); |
1739 | - struct cbq_class *cl = (struct cbq_class*)*arg; | |
1744 | + struct cbq_class *cl = (struct cbq_class *)*arg; | |
1740 | 1745 | struct nlattr *opt = tca[TCA_OPTIONS]; |
1741 | 1746 | struct nlattr *tb[TCA_CBQ_MAX + 1]; |
1742 | 1747 | struct cbq_class *parent; |
1743 | 1748 | |
1744 | 1749 | |
... | ... | @@ -1828,13 +1833,14 @@ |
1828 | 1833 | |
1829 | 1834 | if (classid) { |
1830 | 1835 | err = -EINVAL; |
1831 | - if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid)) | |
1836 | + if (TC_H_MAJ(classid ^ sch->handle) || | |
1837 | + cbq_class_lookup(q, classid)) | |
1832 | 1838 | goto failure; |
1833 | 1839 | } else { |
1834 | 1840 | int i; |
1835 | - classid = TC_H_MAKE(sch->handle,0x8000); | |
1841 | + classid = TC_H_MAKE(sch->handle, 0x8000); | |
1836 | 1842 | |
1837 | - for (i=0; i<0x8000; i++) { | |
1843 | + for (i = 0; i < 0x8000; i++) { | |
1838 | 1844 | if (++q->hgenerator >= 0x8000) |
1839 | 1845 | q->hgenerator = 1; |
1840 | 1846 | if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) |
1841 | 1847 | |
1842 | 1848 | |
... | ... | @@ -1891,11 +1897,11 @@ |
1891 | 1897 | cl->minidle = -0x7FFFFFFF; |
1892 | 1898 | cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT])); |
1893 | 1899 | cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT])); |
1894 | - if (cl->ewma_log==0) | |
1900 | + if (cl->ewma_log == 0) | |
1895 | 1901 | cl->ewma_log = q->link.ewma_log; |
1896 | - if (cl->maxidle==0) | |
1902 | + if (cl->maxidle == 0) | |
1897 | 1903 | cl->maxidle = q->link.maxidle; |
1898 | - if (cl->avpkt==0) | |
1904 | + if (cl->avpkt == 0) | |
1899 | 1905 | cl->avpkt = q->link.avpkt; |
1900 | 1906 | cl->overlimit = cbq_ovl_classic; |
1901 | 1907 | if (tb[TCA_CBQ_OVL_STRATEGY]) |
... | ... | @@ -1921,7 +1927,7 @@ |
1921 | 1927 | static int cbq_delete(struct Qdisc *sch, unsigned long arg) |
1922 | 1928 | { |
1923 | 1929 | struct cbq_sched_data *q = qdisc_priv(sch); |
1924 | - struct cbq_class *cl = (struct cbq_class*)arg; | |
1930 | + struct cbq_class *cl = (struct cbq_class *)arg; | |
1925 | 1931 | unsigned int qlen; |
1926 | 1932 | |
1927 | 1933 | if (cl->filters || cl->children || cl == &q->link) |
... | ... | @@ -1979,7 +1985,7 @@ |
1979 | 1985 | u32 classid) |
1980 | 1986 | { |
1981 | 1987 | struct cbq_sched_data *q = qdisc_priv(sch); |
1982 | - struct cbq_class *p = (struct cbq_class*)parent; | |
1988 | + struct cbq_class *p = (struct cbq_class *)parent; | |
1983 | 1989 | struct cbq_class *cl = cbq_class_lookup(q, classid); |
1984 | 1990 | |
1985 | 1991 | if (cl) { |
... | ... | @@ -1993,7 +1999,7 @@ |
1993 | 1999 | |
1994 | 2000 | static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg) |
1995 | 2001 | { |
1996 | - struct cbq_class *cl = (struct cbq_class*)arg; | |
2002 | + struct cbq_class *cl = (struct cbq_class *)arg; | |
1997 | 2003 | |
1998 | 2004 | cl->filters--; |
1999 | 2005 | } |
... | ... | @@ -2003,7 +2009,7 @@ |
2003 | 2009 | struct cbq_sched_data *q = qdisc_priv(sch); |
2004 | 2010 | struct cbq_class *cl; |
2005 | 2011 | struct hlist_node *n; |
2006 | - unsigned h; | |
2012 | + unsigned int h; | |
2007 | 2013 | |
2008 | 2014 | if (arg->stop) |
2009 | 2015 | return; |
net/sched/sch_dsmark.c
... | ... | @@ -137,10 +137,10 @@ |
137 | 137 | mask = nla_get_u8(tb[TCA_DSMARK_MASK]); |
138 | 138 | |
139 | 139 | if (tb[TCA_DSMARK_VALUE]) |
140 | - p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]); | |
140 | + p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]); | |
141 | 141 | |
142 | 142 | if (tb[TCA_DSMARK_MASK]) |
143 | - p->mask[*arg-1] = mask; | |
143 | + p->mask[*arg - 1] = mask; | |
144 | 144 | |
145 | 145 | err = 0; |
146 | 146 | |
... | ... | @@ -155,8 +155,8 @@ |
155 | 155 | if (!dsmark_valid_index(p, arg)) |
156 | 156 | return -EINVAL; |
157 | 157 | |
158 | - p->mask[arg-1] = 0xff; | |
159 | - p->value[arg-1] = 0; | |
158 | + p->mask[arg - 1] = 0xff; | |
159 | + p->value[arg - 1] = 0; | |
160 | 160 | |
161 | 161 | return 0; |
162 | 162 | } |
... | ... | @@ -175,7 +175,7 @@ |
175 | 175 | if (p->mask[i] == 0xff && !p->value[i]) |
176 | 176 | goto ignore; |
177 | 177 | if (walker->count >= walker->skip) { |
178 | - if (walker->fn(sch, i+1, walker) < 0) { | |
178 | + if (walker->fn(sch, i + 1, walker) < 0) { | |
179 | 179 | walker->stop = 1; |
180 | 180 | break; |
181 | 181 | } |
... | ... | @@ -304,9 +304,8 @@ |
304 | 304 | * and don't need yet another qdisc as a bypass. |
305 | 305 | */ |
306 | 306 | if (p->mask[index] != 0xff || p->value[index]) |
307 | - printk(KERN_WARNING | |
308 | - "dsmark_dequeue: unsupported protocol %d\n", | |
309 | - ntohs(skb->protocol)); | |
307 | + pr_warning("dsmark_dequeue: unsupported protocol %d\n", | |
308 | + ntohs(skb->protocol)); | |
310 | 309 | break; |
311 | 310 | } |
312 | 311 | |
313 | 312 | |
... | ... | @@ -424,14 +423,14 @@ |
424 | 423 | if (!dsmark_valid_index(p, cl)) |
425 | 424 | return -EINVAL; |
426 | 425 | |
427 | - tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1); | |
426 | + tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1); | |
428 | 427 | tcm->tcm_info = p->q->handle; |
429 | 428 | |
430 | 429 | opts = nla_nest_start(skb, TCA_OPTIONS); |
431 | 430 | if (opts == NULL) |
432 | 431 | goto nla_put_failure; |
433 | - NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]); | |
434 | - NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]); | |
432 | + NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]); | |
433 | + NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]); | |
435 | 434 | |
436 | 435 | return nla_nest_end(skb, opts); |
437 | 436 |
net/sched/sch_fifo.c
... | ... | @@ -19,12 +19,11 @@ |
19 | 19 | |
20 | 20 | /* 1 band FIFO pseudo-"scheduler" */ |
21 | 21 | |
22 | -struct fifo_sched_data | |
23 | -{ | |
22 | +struct fifo_sched_data { | |
24 | 23 | u32 limit; |
25 | 24 | }; |
26 | 25 | |
27 | -static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |
26 | +static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |
28 | 27 | { |
29 | 28 | struct fifo_sched_data *q = qdisc_priv(sch); |
30 | 29 | |
... | ... | @@ -34,7 +33,7 @@ |
34 | 33 | return qdisc_reshape_fail(skb, sch); |
35 | 34 | } |
36 | 35 | |
37 | -static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |
36 | +static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |
38 | 37 | { |
39 | 38 | struct fifo_sched_data *q = qdisc_priv(sch); |
40 | 39 | |
... | ... | @@ -44,7 +43,7 @@ |
44 | 43 | return qdisc_reshape_fail(skb, sch); |
45 | 44 | } |
46 | 45 | |
47 | -static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |
46 | +static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |
48 | 47 | { |
49 | 48 | struct sk_buff *skb_head; |
50 | 49 | struct fifo_sched_data *q = qdisc_priv(sch); |
net/sched/sch_generic.c
... | ... | @@ -87,8 +87,8 @@ |
87 | 87 | */ |
88 | 88 | kfree_skb(skb); |
89 | 89 | if (net_ratelimit()) |
90 | - printk(KERN_WARNING "Dead loop on netdevice %s, " | |
91 | - "fix it urgently!\n", dev_queue->dev->name); | |
90 | + pr_warning("Dead loop on netdevice %s, fix it urgently!\n", | |
91 | + dev_queue->dev->name); | |
92 | 92 | ret = qdisc_qlen(q); |
93 | 93 | } else { |
94 | 94 | /* |
... | ... | @@ -137,8 +137,8 @@ |
137 | 137 | } else { |
138 | 138 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ |
139 | 139 | if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) |
140 | - printk(KERN_WARNING "BUG %s code %d qlen %d\n", | |
141 | - dev->name, ret, q->q.qlen); | |
140 | + pr_warning("BUG %s code %d qlen %d\n", | |
141 | + dev->name, ret, q->q.qlen); | |
142 | 142 | |
143 | 143 | ret = dev_requeue_skb(skb, q); |
144 | 144 | } |
... | ... | @@ -412,8 +412,9 @@ |
412 | 412 | }; |
413 | 413 | |
414 | 414 | |
415 | -static const u8 prio2band[TC_PRIO_MAX+1] = | |
416 | - { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; | |
415 | +static const u8 prio2band[TC_PRIO_MAX + 1] = { | |
416 | + 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 | |
417 | +}; | |
417 | 418 | |
418 | 419 | /* 3-band FIFO queue: old style, but should be a bit faster than |
419 | 420 | generic prio+fifo combination. |
... | ... | @@ -445,7 +446,7 @@ |
445 | 446 | return priv->q + band; |
446 | 447 | } |
447 | 448 | |
448 | -static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | |
449 | +static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc) | |
449 | 450 | { |
450 | 451 | if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { |
451 | 452 | int band = prio2band[skb->priority & TC_PRIO_MAX]; |
... | ... | @@ -460,7 +461,7 @@ |
460 | 461 | return qdisc_drop(skb, qdisc); |
461 | 462 | } |
462 | 463 | |
463 | -static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | |
464 | +static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) | |
464 | 465 | { |
465 | 466 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
466 | 467 | int band = bitmap2band[priv->bitmap]; |
... | ... | @@ -479,7 +480,7 @@ |
479 | 480 | return NULL; |
480 | 481 | } |
481 | 482 | |
482 | -static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) | |
483 | +static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) | |
483 | 484 | { |
484 | 485 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
485 | 486 | int band = bitmap2band[priv->bitmap]; |
... | ... | @@ -493,7 +494,7 @@ |
493 | 494 | return NULL; |
494 | 495 | } |
495 | 496 | |
496 | -static void pfifo_fast_reset(struct Qdisc* qdisc) | |
497 | +static void pfifo_fast_reset(struct Qdisc *qdisc) | |
497 | 498 | { |
498 | 499 | int prio; |
499 | 500 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
... | ... | @@ -510,7 +511,7 @@ |
510 | 511 | { |
511 | 512 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; |
512 | 513 | |
513 | - memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); | |
514 | + memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); | |
514 | 515 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
515 | 516 | return skb->len; |
516 | 517 | |
517 | 518 | |
518 | 519 | |
... | ... | @@ -681,20 +682,18 @@ |
681 | 682 | struct netdev_queue *dev_queue, |
682 | 683 | void *_unused) |
683 | 684 | { |
684 | - struct Qdisc *qdisc; | |
685 | + struct Qdisc *qdisc = &noqueue_qdisc; | |
685 | 686 | |
686 | 687 | if (dev->tx_queue_len) { |
687 | 688 | qdisc = qdisc_create_dflt(dev_queue, |
688 | 689 | &pfifo_fast_ops, TC_H_ROOT); |
689 | 690 | if (!qdisc) { |
690 | - printk(KERN_INFO "%s: activation failed\n", dev->name); | |
691 | + netdev_info(dev, "activation failed\n"); | |
691 | 692 | return; |
692 | 693 | } |
693 | 694 | |
694 | 695 | /* Can by-pass the queue discipline for default qdisc */ |
695 | 696 | qdisc->flags |= TCQ_F_CAN_BYPASS; |
696 | - } else { | |
697 | - qdisc = &noqueue_qdisc; | |
698 | 697 | } |
699 | 698 | dev_queue->qdisc_sleeping = qdisc; |
700 | 699 | } |
net/sched/sch_gred.c
... | ... | @@ -32,8 +32,7 @@ |
32 | 32 | struct gred_sched_data; |
33 | 33 | struct gred_sched; |
34 | 34 | |
35 | -struct gred_sched_data | |
36 | -{ | |
35 | +struct gred_sched_data { | |
37 | 36 | u32 limit; /* HARD maximal queue length */ |
38 | 37 | u32 DP; /* the drop pramaters */ |
39 | 38 | u32 bytesin; /* bytes seen on virtualQ so far*/ |
... | ... | @@ -50,8 +49,7 @@ |
50 | 49 | GRED_RIO_MODE, |
51 | 50 | }; |
52 | 51 | |
53 | -struct gred_sched | |
54 | -{ | |
52 | +struct gred_sched { | |
55 | 53 | struct gred_sched_data *tab[MAX_DPs]; |
56 | 54 | unsigned long flags; |
57 | 55 | u32 red_flags; |
58 | 56 | |
59 | 57 | |
60 | 58 | |
... | ... | @@ -150,17 +148,18 @@ |
150 | 148 | return t->red_flags & TC_RED_HARDDROP; |
151 | 149 | } |
152 | 150 | |
153 | -static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |
151 | +static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |
154 | 152 | { |
155 | - struct gred_sched_data *q=NULL; | |
156 | - struct gred_sched *t= qdisc_priv(sch); | |
153 | + struct gred_sched_data *q = NULL; | |
154 | + struct gred_sched *t = qdisc_priv(sch); | |
157 | 155 | unsigned long qavg = 0; |
158 | 156 | u16 dp = tc_index_to_dp(skb); |
159 | 157 | |
160 | - if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { | |
158 | + if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { | |
161 | 159 | dp = t->def; |
162 | 160 | |
163 | - if ((q = t->tab[dp]) == NULL) { | |
161 | + q = t->tab[dp]; | |
162 | + if (!q) { | |
164 | 163 | /* Pass through packets not assigned to a DP |
165 | 164 | * if no default DP has been configured. This |
166 | 165 | * allows for DP flows to be left untouched. |
... | ... | @@ -183,7 +182,7 @@ |
183 | 182 | for (i = 0; i < t->DPs; i++) { |
184 | 183 | if (t->tab[i] && t->tab[i]->prio < q->prio && |
185 | 184 | !red_is_idling(&t->tab[i]->parms)) |
186 | - qavg +=t->tab[i]->parms.qavg; | |
185 | + qavg += t->tab[i]->parms.qavg; | |
187 | 186 | } |
188 | 187 | |
189 | 188 | } |
190 | 189 | |
191 | 190 | |
192 | 191 | |
... | ... | @@ -203,28 +202,28 @@ |
203 | 202 | gred_store_wred_set(t, q); |
204 | 203 | |
205 | 204 | switch (red_action(&q->parms, q->parms.qavg + qavg)) { |
206 | - case RED_DONT_MARK: | |
207 | - break; | |
205 | + case RED_DONT_MARK: | |
206 | + break; | |
208 | 207 | |
209 | - case RED_PROB_MARK: | |
210 | - sch->qstats.overlimits++; | |
211 | - if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { | |
212 | - q->stats.prob_drop++; | |
213 | - goto congestion_drop; | |
214 | - } | |
208 | + case RED_PROB_MARK: | |
209 | + sch->qstats.overlimits++; | |
210 | + if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { | |
211 | + q->stats.prob_drop++; | |
212 | + goto congestion_drop; | |
213 | + } | |
215 | 214 | |
216 | - q->stats.prob_mark++; | |
217 | - break; | |
215 | + q->stats.prob_mark++; | |
216 | + break; | |
218 | 217 | |
219 | - case RED_HARD_MARK: | |
220 | - sch->qstats.overlimits++; | |
221 | - if (gred_use_harddrop(t) || !gred_use_ecn(t) || | |
222 | - !INET_ECN_set_ce(skb)) { | |
223 | - q->stats.forced_drop++; | |
224 | - goto congestion_drop; | |
225 | - } | |
226 | - q->stats.forced_mark++; | |
227 | - break; | |
218 | + case RED_HARD_MARK: | |
219 | + sch->qstats.overlimits++; | |
220 | + if (gred_use_harddrop(t) || !gred_use_ecn(t) || | |
221 | + !INET_ECN_set_ce(skb)) { | |
222 | + q->stats.forced_drop++; | |
223 | + goto congestion_drop; | |
224 | + } | |
225 | + q->stats.forced_mark++; | |
226 | + break; | |
228 | 227 | } |
229 | 228 | |
230 | 229 | if (q->backlog + qdisc_pkt_len(skb) <= q->limit) { |
... | ... | @@ -241,7 +240,7 @@ |
241 | 240 | return NET_XMIT_CN; |
242 | 241 | } |
243 | 242 | |
244 | -static struct sk_buff *gred_dequeue(struct Qdisc* sch) | |
243 | +static struct sk_buff *gred_dequeue(struct Qdisc *sch) | |
245 | 244 | { |
246 | 245 | struct sk_buff *skb; |
247 | 246 | struct gred_sched *t = qdisc_priv(sch); |
... | ... | @@ -254,9 +253,9 @@ |
254 | 253 | |
255 | 254 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { |
256 | 255 | if (net_ratelimit()) |
257 | - printk(KERN_WARNING "GRED: Unable to relocate " | |
258 | - "VQ 0x%x after dequeue, screwing up " | |
259 | - "backlog.\n", tc_index_to_dp(skb)); | |
256 | + pr_warning("GRED: Unable to relocate VQ 0x%x " | |
257 | + "after dequeue, screwing up " | |
258 | + "backlog.\n", tc_index_to_dp(skb)); | |
260 | 259 | } else { |
261 | 260 | q->backlog -= qdisc_pkt_len(skb); |
262 | 261 | |
... | ... | @@ -273,7 +272,7 @@ |
273 | 272 | return NULL; |
274 | 273 | } |
275 | 274 | |
276 | -static unsigned int gred_drop(struct Qdisc* sch) | |
275 | +static unsigned int gred_drop(struct Qdisc *sch) | |
277 | 276 | { |
278 | 277 | struct sk_buff *skb; |
279 | 278 | struct gred_sched *t = qdisc_priv(sch); |
... | ... | @@ -286,9 +285,9 @@ |
286 | 285 | |
287 | 286 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { |
288 | 287 | if (net_ratelimit()) |
289 | - printk(KERN_WARNING "GRED: Unable to relocate " | |
290 | - "VQ 0x%x while dropping, screwing up " | |
291 | - "backlog.\n", tc_index_to_dp(skb)); | |
288 | + pr_warning("GRED: Unable to relocate VQ 0x%x " | |
289 | + "while dropping, screwing up " | |
290 | + "backlog.\n", tc_index_to_dp(skb)); | |
292 | 291 | } else { |
293 | 292 | q->backlog -= len; |
294 | 293 | q->stats.other++; |
... | ... | @@ -308,7 +307,7 @@ |
308 | 307 | |
309 | 308 | } |
310 | 309 | |
311 | -static void gred_reset(struct Qdisc* sch) | |
310 | +static void gred_reset(struct Qdisc *sch) | |
312 | 311 | { |
313 | 312 | int i; |
314 | 313 | struct gred_sched *t = qdisc_priv(sch); |
... | ... | @@ -369,8 +368,8 @@ |
369 | 368 | |
370 | 369 | for (i = table->DPs; i < MAX_DPs; i++) { |
371 | 370 | if (table->tab[i]) { |
372 | - printk(KERN_WARNING "GRED: Warning: Destroying " | |
373 | - "shadowed VQ 0x%x\n", i); | |
371 | + pr_warning("GRED: Warning: Destroying " | |
372 | + "shadowed VQ 0x%x\n", i); | |
374 | 373 | gred_destroy_vq(table->tab[i]); |
375 | 374 | table->tab[i] = NULL; |
376 | 375 | } |
net/sched/sch_hfsc.c
... | ... | @@ -81,8 +81,7 @@ |
81 | 81 | * that are expensive on 32-bit architectures. |
82 | 82 | */ |
83 | 83 | |
84 | -struct internal_sc | |
85 | -{ | |
84 | +struct internal_sc { | |
86 | 85 | u64 sm1; /* scaled slope of the 1st segment */ |
87 | 86 | u64 ism1; /* scaled inverse-slope of the 1st segment */ |
88 | 87 | u64 dx; /* the x-projection of the 1st segment */ |
... | ... | @@ -92,8 +91,7 @@ |
92 | 91 | }; |
93 | 92 | |
94 | 93 | /* runtime service curve */ |
95 | -struct runtime_sc | |
96 | -{ | |
94 | +struct runtime_sc { | |
97 | 95 | u64 x; /* current starting position on x-axis */ |
98 | 96 | u64 y; /* current starting position on y-axis */ |
99 | 97 | u64 sm1; /* scaled slope of the 1st segment */ |
100 | 98 | |
... | ... | @@ -104,15 +102,13 @@ |
104 | 102 | u64 ism2; /* scaled inverse-slope of the 2nd segment */ |
105 | 103 | }; |
106 | 104 | |
107 | -enum hfsc_class_flags | |
108 | -{ | |
105 | +enum hfsc_class_flags { | |
109 | 106 | HFSC_RSC = 0x1, |
110 | 107 | HFSC_FSC = 0x2, |
111 | 108 | HFSC_USC = 0x4 |
112 | 109 | }; |
113 | 110 | |
114 | -struct hfsc_class | |
115 | -{ | |
111 | +struct hfsc_class { | |
116 | 112 | struct Qdisc_class_common cl_common; |
117 | 113 | unsigned int refcnt; /* usage count */ |
118 | 114 | |
... | ... | @@ -140,8 +136,8 @@ |
140 | 136 | u64 cl_cumul; /* cumulative work in bytes done by |
141 | 137 | real-time criteria */ |
142 | 138 | |
143 | - u64 cl_d; /* deadline*/ | |
144 | - u64 cl_e; /* eligible time */ | |
139 | + u64 cl_d; /* deadline*/ | |
140 | + u64 cl_e; /* eligible time */ | |
145 | 141 | u64 cl_vt; /* virtual time */ |
146 | 142 | u64 cl_f; /* time when this class will fit for |
147 | 143 | link-sharing, max(myf, cfmin) */ |
... | ... | @@ -176,8 +172,7 @@ |
176 | 172 | unsigned long cl_nactive; /* number of active children */ |
177 | 173 | }; |
178 | 174 | |
179 | -struct hfsc_sched | |
180 | -{ | |
175 | +struct hfsc_sched { | |
181 | 176 | u16 defcls; /* default class id */ |
182 | 177 | struct hfsc_class root; /* root class */ |
183 | 178 | struct Qdisc_class_hash clhash; /* class hash */ |
... | ... | @@ -693,7 +688,7 @@ |
693 | 688 | if (go_active) { |
694 | 689 | n = rb_last(&cl->cl_parent->vt_tree); |
695 | 690 | if (n != NULL) { |
696 | - max_cl = rb_entry(n, struct hfsc_class,vt_node); | |
691 | + max_cl = rb_entry(n, struct hfsc_class, vt_node); | |
697 | 692 | /* |
698 | 693 | * set vt to the average of the min and max |
699 | 694 | * classes. if the parent's period didn't |
... | ... | @@ -1177,8 +1172,10 @@ |
1177 | 1172 | return NULL; |
1178 | 1173 | } |
1179 | 1174 | #endif |
1180 | - if ((cl = (struct hfsc_class *)res.class) == NULL) { | |
1181 | - if ((cl = hfsc_find_class(res.classid, sch)) == NULL) | |
1175 | + cl = (struct hfsc_class *)res.class; | |
1176 | + if (!cl) { | |
1177 | + cl = hfsc_find_class(res.classid, sch); | |
1178 | + if (!cl) | |
1182 | 1179 | break; /* filter selected invalid classid */ |
1183 | 1180 | if (cl->level >= head->level) |
1184 | 1181 | break; /* filter may only point downwards */ |
... | ... | @@ -1316,7 +1313,7 @@ |
1316 | 1313 | return -1; |
1317 | 1314 | } |
1318 | 1315 | |
1319 | -static inline int | |
1316 | +static int | |
1320 | 1317 | hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl) |
1321 | 1318 | { |
1322 | 1319 | if ((cl->cl_flags & HFSC_RSC) && |
... | ... | @@ -1420,7 +1417,8 @@ |
1420 | 1417 | struct hfsc_class *cl; |
1421 | 1418 | u64 next_time = 0; |
1422 | 1419 | |
1423 | - if ((cl = eltree_get_minel(q)) != NULL) | |
1420 | + cl = eltree_get_minel(q); | |
1421 | + if (cl) | |
1424 | 1422 | next_time = cl->cl_e; |
1425 | 1423 | if (q->root.cl_cfmin != 0) { |
1426 | 1424 | if (next_time == 0 || next_time > q->root.cl_cfmin) |
... | ... | @@ -1626,7 +1624,8 @@ |
1626 | 1624 | * find the class with the minimum deadline among |
1627 | 1625 | * the eligible classes. |
1628 | 1626 | */ |
1629 | - if ((cl = eltree_get_mindl(q, cur_time)) != NULL) { | |
1627 | + cl = eltree_get_mindl(q, cur_time); | |
1628 | + if (cl) { | |
1630 | 1629 | realtime = 1; |
1631 | 1630 | } else { |
1632 | 1631 | /* |
net/sched/sch_htb.c
... | ... | @@ -99,9 +99,10 @@ |
99 | 99 | struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */ |
100 | 100 | struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */ |
101 | 101 | /* When class changes from state 1->2 and disconnects from |
102 | - parent's feed then we lost ptr value and start from the | |
103 | - first child again. Here we store classid of the | |
104 | - last valid ptr (used when ptr is NULL). */ | |
102 | + * parent's feed then we lost ptr value and start from the | |
103 | + * first child again. Here we store classid of the | |
104 | + * last valid ptr (used when ptr is NULL). | |
105 | + */ | |
105 | 106 | u32 last_ptr_id[TC_HTB_NUMPRIO]; |
106 | 107 | } inner; |
107 | 108 | } un; |
... | ... | @@ -185,7 +186,7 @@ |
185 | 186 | * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull |
186 | 187 | * then finish and return direct queue. |
187 | 188 | */ |
188 | -#define HTB_DIRECT (struct htb_class*)-1 | |
189 | +#define HTB_DIRECT ((struct htb_class *)-1L) | |
189 | 190 | |
190 | 191 | static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, |
191 | 192 | int *qerr) |
192 | 193 | |
... | ... | @@ -197,11 +198,13 @@ |
197 | 198 | int result; |
198 | 199 | |
199 | 200 | /* allow to select class by setting skb->priority to valid classid; |
200 | - note that nfmark can be used too by attaching filter fw with no | |
201 | - rules in it */ | |
201 | + * note that nfmark can be used too by attaching filter fw with no | |
202 | + * rules in it | |
203 | + */ | |
202 | 204 | if (skb->priority == sch->handle) |
203 | 205 | return HTB_DIRECT; /* X:0 (direct flow) selected */ |
204 | - if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) | |
206 | + cl = htb_find(skb->priority, sch); | |
207 | + if (cl && cl->level == 0) | |
205 | 208 | return cl; |
206 | 209 | |
207 | 210 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
208 | 211 | |
... | ... | @@ -216,10 +219,12 @@ |
216 | 219 | return NULL; |
217 | 220 | } |
218 | 221 | #endif |
219 | - if ((cl = (void *)res.class) == NULL) { | |
222 | + cl = (void *)res.class; | |
223 | + if (!cl) { | |
220 | 224 | if (res.classid == sch->handle) |
221 | 225 | return HTB_DIRECT; /* X:0 (direct flow) */ |
222 | - if ((cl = htb_find(res.classid, sch)) == NULL) | |
226 | + cl = htb_find(res.classid, sch); | |
227 | + if (!cl) | |
223 | 228 | break; /* filter selected invalid classid */ |
224 | 229 | } |
225 | 230 | if (!cl->level) |
... | ... | @@ -378,7 +383,8 @@ |
378 | 383 | |
379 | 384 | if (p->un.inner.feed[prio].rb_node) |
380 | 385 | /* parent already has its feed in use so that |
381 | - reset bit in mask as parent is already ok */ | |
386 | + * reset bit in mask as parent is already ok | |
387 | + */ | |
382 | 388 | mask &= ~(1 << prio); |
383 | 389 | |
384 | 390 | htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio); |
... | ... | @@ -413,8 +419,9 @@ |
413 | 419 | |
414 | 420 | if (p->un.inner.ptr[prio] == cl->node + prio) { |
415 | 421 | /* we are removing child which is pointed to from |
416 | - parent feed - forget the pointer but remember | |
417 | - classid */ | |
422 | + * parent feed - forget the pointer but remember | |
423 | + * classid | |
424 | + */ | |
418 | 425 | p->un.inner.last_ptr_id[prio] = cl->common.classid; |
419 | 426 | p->un.inner.ptr[prio] = NULL; |
420 | 427 | } |
... | ... | @@ -664,8 +671,9 @@ |
664 | 671 | unsigned long start) |
665 | 672 | { |
666 | 673 | /* don't run for longer than 2 jiffies; 2 is used instead of |
667 | - 1 to simplify things when jiffy is going to be incremented | |
668 | - too soon */ | |
674 | + * 1 to simplify things when jiffy is going to be incremented | |
675 | + * too soon | |
676 | + */ | |
669 | 677 | unsigned long stop_at = start + 2; |
670 | 678 | while (time_before(jiffies, stop_at)) { |
671 | 679 | struct htb_class *cl; |
... | ... | @@ -688,7 +696,7 @@ |
688 | 696 | |
689 | 697 | /* too much load - let's continue after a break for scheduling */ |
690 | 698 | if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { |
691 | - printk(KERN_WARNING "htb: too many events!\n"); | |
699 | + pr_warning("htb: too many events!\n"); | |
692 | 700 | q->warned |= HTB_WARN_TOOMANYEVENTS; |
693 | 701 | } |
694 | 702 | |
... | ... | @@ -696,7 +704,8 @@ |
696 | 704 | } |
697 | 705 | |
698 | 706 | /* Returns class->node+prio from id-tree where classe's id is >= id. NULL |
699 | - is no such one exists. */ | |
707 | + * is no such one exists. | |
708 | + */ | |
700 | 709 | static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n, |
701 | 710 | u32 id) |
702 | 711 | { |
703 | 712 | |
... | ... | @@ -740,12 +749,14 @@ |
740 | 749 | for (i = 0; i < 65535; i++) { |
741 | 750 | if (!*sp->pptr && *sp->pid) { |
742 | 751 | /* ptr was invalidated but id is valid - try to recover |
743 | - the original or next ptr */ | |
752 | + * the original or next ptr | |
753 | + */ | |
744 | 754 | *sp->pptr = |
745 | 755 | htb_id_find_next_upper(prio, sp->root, *sp->pid); |
746 | 756 | } |
747 | 757 | *sp->pid = 0; /* ptr is valid now so that remove this hint as it |
748 | - can become out of date quickly */ | |
758 | + * can become out of date quickly | |
759 | + */ | |
749 | 760 | if (!*sp->pptr) { /* we are at right end; rewind & go up */ |
750 | 761 | *sp->pptr = sp->root; |
751 | 762 | while ((*sp->pptr)->rb_left) |
... | ... | @@ -773,7 +784,8 @@ |
773 | 784 | } |
774 | 785 | |
775 | 786 | /* dequeues packet at given priority and level; call only if |
776 | - you are sure that there is active class at prio/level */ | |
787 | + * you are sure that there is active class at prio/level | |
788 | + */ | |
777 | 789 | static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio, |
778 | 790 | int level) |
779 | 791 | { |
... | ... | @@ -790,9 +802,10 @@ |
790 | 802 | return NULL; |
791 | 803 | |
792 | 804 | /* class can be empty - it is unlikely but can be true if leaf |
793 | - qdisc drops packets in enqueue routine or if someone used | |
794 | - graft operation on the leaf since last dequeue; | |
795 | - simply deactivate and skip such class */ | |
805 | + * qdisc drops packets in enqueue routine or if someone used | |
806 | + * graft operation on the leaf since last dequeue; | |
807 | + * simply deactivate and skip such class | |
808 | + */ | |
796 | 809 | if (unlikely(cl->un.leaf.q->q.qlen == 0)) { |
797 | 810 | struct htb_class *next; |
798 | 811 | htb_deactivate(q, cl); |
... | ... | @@ -832,7 +845,8 @@ |
832 | 845 | ptr[0]) + prio); |
833 | 846 | } |
834 | 847 | /* this used to be after charge_class but this constelation |
835 | - gives us slightly better performance */ | |
848 | + * gives us slightly better performance | |
849 | + */ | |
836 | 850 | if (!cl->un.leaf.q->q.qlen) |
837 | 851 | htb_deactivate(q, cl); |
838 | 852 | htb_charge_class(q, cl, level, skb); |
... | ... | @@ -882,6 +896,7 @@ |
882 | 896 | m = ~q->row_mask[level]; |
883 | 897 | while (m != (int)(-1)) { |
884 | 898 | int prio = ffz(m); |
899 | + | |
885 | 900 | m |= 1 << prio; |
886 | 901 | skb = htb_dequeue_tree(q, prio, level); |
887 | 902 | if (likely(skb != NULL)) { |
888 | 903 | |
... | ... | @@ -989,13 +1004,12 @@ |
989 | 1004 | return err; |
990 | 1005 | |
991 | 1006 | if (tb[TCA_HTB_INIT] == NULL) { |
992 | - printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n"); | |
1007 | + pr_err("HTB: hey probably you have bad tc tool ?\n"); | |
993 | 1008 | return -EINVAL; |
994 | 1009 | } |
995 | 1010 | gopt = nla_data(tb[TCA_HTB_INIT]); |
996 | 1011 | if (gopt->version != HTB_VER >> 16) { |
997 | - printk(KERN_ERR | |
998 | - "HTB: need tc/htb version %d (minor is %d), you have %d\n", | |
1012 | + pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n", | |
999 | 1013 | HTB_VER >> 16, HTB_VER & 0xffff, gopt->version); |
1000 | 1014 | return -EINVAL; |
1001 | 1015 | } |
... | ... | @@ -1208,9 +1222,10 @@ |
1208 | 1222 | cancel_work_sync(&q->work); |
1209 | 1223 | qdisc_watchdog_cancel(&q->watchdog); |
1210 | 1224 | /* This line used to be after htb_destroy_class call below |
1211 | - and surprisingly it worked in 2.4. But it must precede it | |
1212 | - because filter need its target class alive to be able to call | |
1213 | - unbind_filter on it (without Oops). */ | |
1225 | + * and surprisingly it worked in 2.4. But it must precede it | |
1226 | + * because filter need its target class alive to be able to call | |
1227 | + * unbind_filter on it (without Oops). | |
1228 | + */ | |
1214 | 1229 | tcf_destroy_chain(&q->filter_list); |
1215 | 1230 | |
1216 | 1231 | for (i = 0; i < q->clhash.hashsize; i++) { |
1217 | 1232 | |
... | ... | @@ -1344,11 +1359,12 @@ |
1344 | 1359 | |
1345 | 1360 | /* check maximal depth */ |
1346 | 1361 | if (parent && parent->parent && parent->parent->level < 2) { |
1347 | - printk(KERN_ERR "htb: tree is too deep\n"); | |
1362 | + pr_err("htb: tree is too deep\n"); | |
1348 | 1363 | goto failure; |
1349 | 1364 | } |
1350 | 1365 | err = -ENOBUFS; |
1351 | - if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) | |
1366 | + cl = kzalloc(sizeof(*cl), GFP_KERNEL); | |
1367 | + if (!cl) | |
1352 | 1368 | goto failure; |
1353 | 1369 | |
1354 | 1370 | err = gen_new_estimator(&cl->bstats, &cl->rate_est, |
... | ... | @@ -1368,8 +1384,9 @@ |
1368 | 1384 | RB_CLEAR_NODE(&cl->node[prio]); |
1369 | 1385 | |
1370 | 1386 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) |
1371 | - so that can't be used inside of sch_tree_lock | |
1372 | - -- thanks to Karlis Peisenieks */ | |
1387 | + * so that can't be used inside of sch_tree_lock | |
1388 | + * -- thanks to Karlis Peisenieks | |
1389 | + */ | |
1373 | 1390 | new_q = qdisc_create_dflt(sch->dev_queue, |
1374 | 1391 | &pfifo_qdisc_ops, classid); |
1375 | 1392 | sch_tree_lock(sch); |
1376 | 1393 | |
1377 | 1394 | |
... | ... | @@ -1421,17 +1438,18 @@ |
1421 | 1438 | } |
1422 | 1439 | |
1423 | 1440 | /* it used to be a nasty bug here, we have to check that node |
1424 | - is really leaf before changing cl->un.leaf ! */ | |
1441 | + * is really leaf before changing cl->un.leaf ! | |
1442 | + */ | |
1425 | 1443 | if (!cl->level) { |
1426 | 1444 | cl->quantum = rtab->rate.rate / q->rate2quantum; |
1427 | 1445 | if (!hopt->quantum && cl->quantum < 1000) { |
1428 | - printk(KERN_WARNING | |
1446 | + pr_warning( | |
1429 | 1447 | "HTB: quantum of class %X is small. Consider r2q change.\n", |
1430 | 1448 | cl->common.classid); |
1431 | 1449 | cl->quantum = 1000; |
1432 | 1450 | } |
1433 | 1451 | if (!hopt->quantum && cl->quantum > 200000) { |
1434 | - printk(KERN_WARNING | |
1452 | + pr_warning( | |
1435 | 1453 | "HTB: quantum of class %X is big. Consider r2q change.\n", |
1436 | 1454 | cl->common.classid); |
1437 | 1455 | cl->quantum = 200000; |
... | ... | @@ -1480,13 +1498,13 @@ |
1480 | 1498 | struct htb_class *cl = htb_find(classid, sch); |
1481 | 1499 | |
1482 | 1500 | /*if (cl && !cl->level) return 0; |
1483 | - The line above used to be there to prevent attaching filters to | |
1484 | - leaves. But at least tc_index filter uses this just to get class | |
1485 | - for other reasons so that we have to allow for it. | |
1486 | - ---- | |
1487 | - 19.6.2002 As Werner explained it is ok - bind filter is just | |
1488 | - another way to "lock" the class - unlike "get" this lock can | |
1489 | - be broken by class during destroy IIUC. | |
1501 | + * The line above used to be there to prevent attaching filters to | |
1502 | + * leaves. But at least tc_index filter uses this just to get class | |
1503 | + * for other reasons so that we have to allow for it. | |
1504 | + * ---- | |
1505 | + * 19.6.2002 As Werner explained it is ok - bind filter is just | |
1506 | + * another way to "lock" the class - unlike "get" this lock can | |
1507 | + * be broken by class during destroy IIUC. | |
1490 | 1508 | */ |
1491 | 1509 | if (cl) |
1492 | 1510 | cl->filter_cnt++; |
net/sched/sch_multiq.c
... | ... | @@ -156,7 +156,7 @@ |
156 | 156 | unsigned int len; |
157 | 157 | struct Qdisc *qdisc; |
158 | 158 | |
159 | - for (band = q->bands-1; band >= 0; band--) { | |
159 | + for (band = q->bands - 1; band >= 0; band--) { | |
160 | 160 | qdisc = q->queues[band]; |
161 | 161 | if (qdisc->ops->drop) { |
162 | 162 | len = qdisc->ops->drop(qdisc); |
... | ... | @@ -265,7 +265,7 @@ |
265 | 265 | for (i = 0; i < q->max_bands; i++) |
266 | 266 | q->queues[i] = &noop_qdisc; |
267 | 267 | |
268 | - err = multiq_tune(sch,opt); | |
268 | + err = multiq_tune(sch, opt); | |
269 | 269 | |
270 | 270 | if (err) |
271 | 271 | kfree(q->queues); |
... | ... | @@ -346,7 +346,7 @@ |
346 | 346 | struct multiq_sched_data *q = qdisc_priv(sch); |
347 | 347 | |
348 | 348 | tcm->tcm_handle |= TC_H_MIN(cl); |
349 | - tcm->tcm_info = q->queues[cl-1]->handle; | |
349 | + tcm->tcm_info = q->queues[cl - 1]->handle; | |
350 | 350 | return 0; |
351 | 351 | } |
352 | 352 | |
... | ... | @@ -378,7 +378,7 @@ |
378 | 378 | arg->count++; |
379 | 379 | continue; |
380 | 380 | } |
381 | - if (arg->fn(sch, band+1, arg) < 0) { | |
381 | + if (arg->fn(sch, band + 1, arg) < 0) { | |
382 | 382 | arg->stop = 1; |
383 | 383 | break; |
384 | 384 | } |
net/sched/sch_netem.c
... | ... | @@ -211,8 +211,8 @@ |
211 | 211 | } |
212 | 212 | |
213 | 213 | cb = netem_skb_cb(skb); |
214 | - if (q->gap == 0 || /* not doing reordering */ | |
215 | - q->counter < q->gap || /* inside last reordering gap */ | |
214 | + if (q->gap == 0 || /* not doing reordering */ | |
215 | + q->counter < q->gap || /* inside last reordering gap */ | |
216 | 216 | q->reorder < get_crandom(&q->reorder_cor)) { |
217 | 217 | psched_time_t now; |
218 | 218 | psched_tdiff_t delay; |
... | ... | @@ -249,7 +249,7 @@ |
249 | 249 | return ret; |
250 | 250 | } |
251 | 251 | |
252 | -static unsigned int netem_drop(struct Qdisc* sch) | |
252 | +static unsigned int netem_drop(struct Qdisc *sch) | |
253 | 253 | { |
254 | 254 | struct netem_sched_data *q = qdisc_priv(sch); |
255 | 255 | unsigned int len = 0; |
net/sched/sch_prio.c
... | ... | @@ -22,8 +22,7 @@ |
22 | 22 | #include <net/pkt_sched.h> |
23 | 23 | |
24 | 24 | |
25 | -struct prio_sched_data | |
26 | -{ | |
25 | +struct prio_sched_data { | |
27 | 26 | int bands; |
28 | 27 | struct tcf_proto *filter_list; |
29 | 28 | u8 prio2band[TC_PRIO_MAX+1]; |
... | ... | @@ -54,7 +53,7 @@ |
54 | 53 | if (!q->filter_list || err < 0) { |
55 | 54 | if (TC_H_MAJ(band)) |
56 | 55 | band = 0; |
57 | - return q->queues[q->prio2band[band&TC_PRIO_MAX]]; | |
56 | + return q->queues[q->prio2band[band & TC_PRIO_MAX]]; | |
58 | 57 | } |
59 | 58 | band = res.classid; |
60 | 59 | } |
... | ... | @@ -107,7 +106,7 @@ |
107 | 106 | return NULL; |
108 | 107 | } |
109 | 108 | |
110 | -static struct sk_buff *prio_dequeue(struct Qdisc* sch) | |
109 | +static struct sk_buff *prio_dequeue(struct Qdisc *sch) | |
111 | 110 | { |
112 | 111 | struct prio_sched_data *q = qdisc_priv(sch); |
113 | 112 | int prio; |
... | ... | @@ -124,7 +123,7 @@ |
124 | 123 | |
125 | 124 | } |
126 | 125 | |
127 | -static unsigned int prio_drop(struct Qdisc* sch) | |
126 | +static unsigned int prio_drop(struct Qdisc *sch) | |
128 | 127 | { |
129 | 128 | struct prio_sched_data *q = qdisc_priv(sch); |
130 | 129 | int prio; |
131 | 130 | |
132 | 131 | |
133 | 132 | |
... | ... | @@ -143,24 +142,24 @@ |
143 | 142 | |
144 | 143 | |
145 | 144 | static void |
146 | -prio_reset(struct Qdisc* sch) | |
145 | +prio_reset(struct Qdisc *sch) | |
147 | 146 | { |
148 | 147 | int prio; |
149 | 148 | struct prio_sched_data *q = qdisc_priv(sch); |
150 | 149 | |
151 | - for (prio=0; prio<q->bands; prio++) | |
150 | + for (prio = 0; prio < q->bands; prio++) | |
152 | 151 | qdisc_reset(q->queues[prio]); |
153 | 152 | sch->q.qlen = 0; |
154 | 153 | } |
155 | 154 | |
156 | 155 | static void |
157 | -prio_destroy(struct Qdisc* sch) | |
156 | +prio_destroy(struct Qdisc *sch) | |
158 | 157 | { |
159 | 158 | int prio; |
160 | 159 | struct prio_sched_data *q = qdisc_priv(sch); |
161 | 160 | |
162 | 161 | tcf_destroy_chain(&q->filter_list); |
163 | - for (prio=0; prio<q->bands; prio++) | |
162 | + for (prio = 0; prio < q->bands; prio++) | |
164 | 163 | qdisc_destroy(q->queues[prio]); |
165 | 164 | } |
166 | 165 | |
... | ... | @@ -177,7 +176,7 @@ |
177 | 176 | if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2) |
178 | 177 | return -EINVAL; |
179 | 178 | |
180 | - for (i=0; i<=TC_PRIO_MAX; i++) { | |
179 | + for (i = 0; i <= TC_PRIO_MAX; i++) { | |
181 | 180 | if (qopt->priomap[i] >= qopt->bands) |
182 | 181 | return -EINVAL; |
183 | 182 | } |
... | ... | @@ -186,7 +185,7 @@ |
186 | 185 | q->bands = qopt->bands; |
187 | 186 | memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); |
188 | 187 | |
189 | - for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { | |
188 | + for (i = q->bands; i < TCQ_PRIO_BANDS; i++) { | |
190 | 189 | struct Qdisc *child = q->queues[i]; |
191 | 190 | q->queues[i] = &noop_qdisc; |
192 | 191 | if (child != &noop_qdisc) { |
193 | 192 | |
... | ... | @@ -196,9 +195,10 @@ |
196 | 195 | } |
197 | 196 | sch_tree_unlock(sch); |
198 | 197 | |
199 | - for (i=0; i<q->bands; i++) { | |
198 | + for (i = 0; i < q->bands; i++) { | |
200 | 199 | if (q->queues[i] == &noop_qdisc) { |
201 | 200 | struct Qdisc *child, *old; |
201 | + | |
202 | 202 | child = qdisc_create_dflt(sch->dev_queue, |
203 | 203 | &pfifo_qdisc_ops, |
204 | 204 | TC_H_MAKE(sch->handle, i + 1)); |
... | ... | @@ -224,7 +224,7 @@ |
224 | 224 | struct prio_sched_data *q = qdisc_priv(sch); |
225 | 225 | int i; |
226 | 226 | |
227 | - for (i=0; i<TCQ_PRIO_BANDS; i++) | |
227 | + for (i = 0; i < TCQ_PRIO_BANDS; i++) | |
228 | 228 | q->queues[i] = &noop_qdisc; |
229 | 229 | |
230 | 230 | if (opt == NULL) { |
... | ... | @@ -232,7 +232,7 @@ |
232 | 232 | } else { |
233 | 233 | int err; |
234 | 234 | |
235 | - if ((err= prio_tune(sch, opt)) != 0) | |
235 | + if ((err = prio_tune(sch, opt)) != 0) | |
236 | 236 | return err; |
237 | 237 | } |
238 | 238 | return 0; |
... | ... | @@ -245,7 +245,7 @@ |
245 | 245 | struct tc_prio_qopt opt; |
246 | 246 | |
247 | 247 | opt.bands = q->bands; |
248 | - memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); | |
248 | + memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1); | |
249 | 249 | |
250 | 250 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
251 | 251 | |
... | ... | @@ -342,7 +342,7 @@ |
342 | 342 | arg->count++; |
343 | 343 | continue; |
344 | 344 | } |
345 | - if (arg->fn(sch, prio+1, arg) < 0) { | |
345 | + if (arg->fn(sch, prio + 1, arg) < 0) { | |
346 | 346 | arg->stop = 1; |
347 | 347 | break; |
348 | 348 | } |
... | ... | @@ -350,7 +350,7 @@ |
350 | 350 | } |
351 | 351 | } |
352 | 352 | |
353 | -static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl) | |
353 | +static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl) | |
354 | 354 | { |
355 | 355 | struct prio_sched_data *q = qdisc_priv(sch); |
356 | 356 |
net/sched/sch_red.c
... | ... | @@ -36,8 +36,7 @@ |
36 | 36 | if RED works correctly. |
37 | 37 | */ |
38 | 38 | |
39 | -struct red_sched_data | |
40 | -{ | |
39 | +struct red_sched_data { | |
41 | 40 | u32 limit; /* HARD maximal queue length */ |
42 | 41 | unsigned char flags; |
43 | 42 | struct red_parms parms; |
... | ... | @@ -55,7 +54,7 @@ |
55 | 54 | return q->flags & TC_RED_HARDDROP; |
56 | 55 | } |
57 | 56 | |
58 | -static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |
57 | +static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |
59 | 58 | { |
60 | 59 | struct red_sched_data *q = qdisc_priv(sch); |
61 | 60 | struct Qdisc *child = q->qdisc; |
62 | 61 | |
63 | 62 | |
64 | 63 | |
65 | 64 | |
... | ... | @@ -67,29 +66,29 @@ |
67 | 66 | red_end_of_idle_period(&q->parms); |
68 | 67 | |
69 | 68 | switch (red_action(&q->parms, q->parms.qavg)) { |
70 | - case RED_DONT_MARK: | |
71 | - break; | |
69 | + case RED_DONT_MARK: | |
70 | + break; | |
72 | 71 | |
73 | - case RED_PROB_MARK: | |
74 | - sch->qstats.overlimits++; | |
75 | - if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { | |
76 | - q->stats.prob_drop++; | |
77 | - goto congestion_drop; | |
78 | - } | |
72 | + case RED_PROB_MARK: | |
73 | + sch->qstats.overlimits++; | |
74 | + if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { | |
75 | + q->stats.prob_drop++; | |
76 | + goto congestion_drop; | |
77 | + } | |
79 | 78 | |
80 | - q->stats.prob_mark++; | |
81 | - break; | |
79 | + q->stats.prob_mark++; | |
80 | + break; | |
82 | 81 | |
83 | - case RED_HARD_MARK: | |
84 | - sch->qstats.overlimits++; | |
85 | - if (red_use_harddrop(q) || !red_use_ecn(q) || | |
86 | - !INET_ECN_set_ce(skb)) { | |
87 | - q->stats.forced_drop++; | |
88 | - goto congestion_drop; | |
89 | - } | |
82 | + case RED_HARD_MARK: | |
83 | + sch->qstats.overlimits++; | |
84 | + if (red_use_harddrop(q) || !red_use_ecn(q) || | |
85 | + !INET_ECN_set_ce(skb)) { | |
86 | + q->stats.forced_drop++; | |
87 | + goto congestion_drop; | |
88 | + } | |
90 | 89 | |
91 | - q->stats.forced_mark++; | |
92 | - break; | |
90 | + q->stats.forced_mark++; | |
91 | + break; | |
93 | 92 | } |
94 | 93 | |
95 | 94 | ret = qdisc_enqueue(skb, child); |
... | ... | @@ -107,7 +106,7 @@ |
107 | 106 | return NET_XMIT_CN; |
108 | 107 | } |
109 | 108 | |
110 | -static struct sk_buff * red_dequeue(struct Qdisc* sch) | |
109 | +static struct sk_buff *red_dequeue(struct Qdisc *sch) | |
111 | 110 | { |
112 | 111 | struct sk_buff *skb; |
113 | 112 | struct red_sched_data *q = qdisc_priv(sch); |
... | ... | @@ -122,7 +121,7 @@ |
122 | 121 | return skb; |
123 | 122 | } |
124 | 123 | |
125 | -static struct sk_buff * red_peek(struct Qdisc* sch) | |
124 | +static struct sk_buff *red_peek(struct Qdisc *sch) | |
126 | 125 | { |
127 | 126 | struct red_sched_data *q = qdisc_priv(sch); |
128 | 127 | struct Qdisc *child = q->qdisc; |
... | ... | @@ -130,7 +129,7 @@ |
130 | 129 | return child->ops->peek(child); |
131 | 130 | } |
132 | 131 | |
133 | -static unsigned int red_drop(struct Qdisc* sch) | |
132 | +static unsigned int red_drop(struct Qdisc *sch) | |
134 | 133 | { |
135 | 134 | struct red_sched_data *q = qdisc_priv(sch); |
136 | 135 | struct Qdisc *child = q->qdisc; |
... | ... | @@ -149,7 +148,7 @@ |
149 | 148 | return 0; |
150 | 149 | } |
151 | 150 | |
152 | -static void red_reset(struct Qdisc* sch) | |
151 | +static void red_reset(struct Qdisc *sch) | |
153 | 152 | { |
154 | 153 | struct red_sched_data *q = qdisc_priv(sch); |
155 | 154 | |
... | ... | @@ -216,7 +215,7 @@ |
216 | 215 | return 0; |
217 | 216 | } |
218 | 217 | |
219 | -static int red_init(struct Qdisc* sch, struct nlattr *opt) | |
218 | +static int red_init(struct Qdisc *sch, struct nlattr *opt) | |
220 | 219 | { |
221 | 220 | struct red_sched_data *q = qdisc_priv(sch); |
222 | 221 |
net/sched/sch_sfq.c
... | ... | @@ -92,8 +92,7 @@ |
92 | 92 | * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1] |
93 | 93 | * are 'pointers' to dep[] array |
94 | 94 | */ |
95 | -struct sfq_head | |
96 | -{ | |
95 | +struct sfq_head { | |
97 | 96 | sfq_index next; |
98 | 97 | sfq_index prev; |
99 | 98 | }; |
100 | 99 | |
... | ... | @@ -108,11 +107,10 @@ |
108 | 107 | short allot; /* credit for this slot */ |
109 | 108 | }; |
110 | 109 | |
111 | -struct sfq_sched_data | |
112 | -{ | |
110 | +struct sfq_sched_data { | |
113 | 111 | /* Parameters */ |
114 | 112 | int perturb_period; |
115 | - unsigned quantum; /* Allotment per round: MUST BE >= MTU */ | |
113 | + unsigned int quantum; /* Allotment per round: MUST BE >= MTU */ | |
116 | 114 | int limit; |
117 | 115 | |
118 | 116 | /* Variables */ |
119 | 117 | |
... | ... | @@ -137,12 +135,12 @@ |
137 | 135 | return &q->dep[val - SFQ_SLOTS]; |
138 | 136 | } |
139 | 137 | |
140 | -static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) | |
138 | +static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) | |
141 | 139 | { |
142 | 140 | return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1); |
143 | 141 | } |
144 | 142 | |
145 | -static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |
143 | +static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) | |
146 | 144 | { |
147 | 145 | u32 h, h2; |
148 | 146 | |
149 | 147 | |
... | ... | @@ -157,13 +155,13 @@ |
157 | 155 | iph = ip_hdr(skb); |
158 | 156 | h = (__force u32)iph->daddr; |
159 | 157 | h2 = (__force u32)iph->saddr ^ iph->protocol; |
160 | - if (iph->frag_off & htons(IP_MF|IP_OFFSET)) | |
158 | + if (iph->frag_off & htons(IP_MF | IP_OFFSET)) | |
161 | 159 | break; |
162 | 160 | poff = proto_ports_offset(iph->protocol); |
163 | 161 | if (poff >= 0 && |
164 | 162 | pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) { |
165 | 163 | iph = ip_hdr(skb); |
166 | - h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff); | |
164 | + h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff); | |
167 | 165 | } |
168 | 166 | break; |
169 | 167 | } |
... | ... | @@ -181,7 +179,7 @@ |
181 | 179 | if (poff >= 0 && |
182 | 180 | pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) { |
183 | 181 | iph = ipv6_hdr(skb); |
184 | - h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff); | |
182 | + h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff); | |
185 | 183 | } |
186 | 184 | break; |
187 | 185 | } |
net/sched/sch_tbf.c
... | ... | @@ -97,8 +97,7 @@ |
97 | 97 | changed the limit is not effective anymore. |
98 | 98 | */ |
99 | 99 | |
100 | -struct tbf_sched_data | |
101 | -{ | |
100 | +struct tbf_sched_data { | |
102 | 101 | /* Parameters */ |
103 | 102 | u32 limit; /* Maximal length of backlog: bytes */ |
104 | 103 | u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */ |
105 | 104 | |
... | ... | @@ -115,10 +114,10 @@ |
115 | 114 | struct qdisc_watchdog watchdog; /* Watchdog timer */ |
116 | 115 | }; |
117 | 116 | |
118 | -#define L2T(q,L) qdisc_l2t((q)->R_tab,L) | |
119 | -#define L2T_P(q,L) qdisc_l2t((q)->P_tab,L) | |
117 | +#define L2T(q, L) qdisc_l2t((q)->R_tab, L) | |
118 | +#define L2T_P(q, L) qdisc_l2t((q)->P_tab, L) | |
120 | 119 | |
121 | -static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |
120 | +static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |
122 | 121 | { |
123 | 122 | struct tbf_sched_data *q = qdisc_priv(sch); |
124 | 123 | int ret; |
... | ... | @@ -138,7 +137,7 @@ |
138 | 137 | return NET_XMIT_SUCCESS; |
139 | 138 | } |
140 | 139 | |
141 | -static unsigned int tbf_drop(struct Qdisc* sch) | |
140 | +static unsigned int tbf_drop(struct Qdisc *sch) | |
142 | 141 | { |
143 | 142 | struct tbf_sched_data *q = qdisc_priv(sch); |
144 | 143 | unsigned int len = 0; |
... | ... | @@ -150,7 +149,7 @@ |
150 | 149 | return len; |
151 | 150 | } |
152 | 151 | |
153 | -static struct sk_buff *tbf_dequeue(struct Qdisc* sch) | |
152 | +static struct sk_buff *tbf_dequeue(struct Qdisc *sch) | |
154 | 153 | { |
155 | 154 | struct tbf_sched_data *q = qdisc_priv(sch); |
156 | 155 | struct sk_buff *skb; |
... | ... | @@ -209,7 +208,7 @@ |
209 | 208 | return NULL; |
210 | 209 | } |
211 | 210 | |
212 | -static void tbf_reset(struct Qdisc* sch) | |
211 | +static void tbf_reset(struct Qdisc *sch) | |
213 | 212 | { |
214 | 213 | struct tbf_sched_data *q = qdisc_priv(sch); |
215 | 214 | |
... | ... | @@ -227,7 +226,7 @@ |
227 | 226 | [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, |
228 | 227 | }; |
229 | 228 | |
230 | -static int tbf_change(struct Qdisc* sch, struct nlattr *opt) | |
229 | +static int tbf_change(struct Qdisc *sch, struct nlattr *opt) | |
231 | 230 | { |
232 | 231 | int err; |
233 | 232 | struct tbf_sched_data *q = qdisc_priv(sch); |
... | ... | @@ -236,7 +235,7 @@ |
236 | 235 | struct qdisc_rate_table *rtab = NULL; |
237 | 236 | struct qdisc_rate_table *ptab = NULL; |
238 | 237 | struct Qdisc *child = NULL; |
239 | - int max_size,n; | |
238 | + int max_size, n; | |
240 | 239 | |
241 | 240 | err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy); |
242 | 241 | if (err < 0) |
243 | 242 | |
... | ... | @@ -259,15 +258,18 @@ |
259 | 258 | } |
260 | 259 | |
261 | 260 | for (n = 0; n < 256; n++) |
262 | - if (rtab->data[n] > qopt->buffer) break; | |
263 | - max_size = (n << qopt->rate.cell_log)-1; | |
261 | + if (rtab->data[n] > qopt->buffer) | |
262 | + break; | |
263 | + max_size = (n << qopt->rate.cell_log) - 1; | |
264 | 264 | if (ptab) { |
265 | 265 | int size; |
266 | 266 | |
267 | 267 | for (n = 0; n < 256; n++) |
268 | - if (ptab->data[n] > qopt->mtu) break; | |
269 | - size = (n << qopt->peakrate.cell_log)-1; | |
270 | - if (size < max_size) max_size = size; | |
268 | + if (ptab->data[n] > qopt->mtu) | |
269 | + break; | |
270 | + size = (n << qopt->peakrate.cell_log) - 1; | |
271 | + if (size < max_size) | |
272 | + max_size = size; | |
271 | 273 | } |
272 | 274 | if (max_size < 0) |
273 | 275 | goto done; |
... | ... | @@ -310,7 +312,7 @@ |
310 | 312 | return err; |
311 | 313 | } |
312 | 314 | |
313 | -static int tbf_init(struct Qdisc* sch, struct nlattr *opt) | |
315 | +static int tbf_init(struct Qdisc *sch, struct nlattr *opt) | |
314 | 316 | { |
315 | 317 | struct tbf_sched_data *q = qdisc_priv(sch); |
316 | 318 | |
... | ... | @@ -422,8 +424,7 @@ |
422 | 424 | } |
423 | 425 | } |
424 | 426 | |
425 | -static const struct Qdisc_class_ops tbf_class_ops = | |
426 | -{ | |
427 | +static const struct Qdisc_class_ops tbf_class_ops = { | |
427 | 428 | .graft = tbf_graft, |
428 | 429 | .leaf = tbf_leaf, |
429 | 430 | .get = tbf_get, |
net/sched/sch_teql.c
... | ... | @@ -53,8 +53,7 @@ |
53 | 53 | which will not break load balancing, though native slave |
54 | 54 | traffic will have the highest priority. */ |
55 | 55 | |
56 | -struct teql_master | |
57 | -{ | |
56 | +struct teql_master { | |
58 | 57 | struct Qdisc_ops qops; |
59 | 58 | struct net_device *dev; |
60 | 59 | struct Qdisc *slaves; |
61 | 60 | |
62 | 61 | |
63 | 62 | |
... | ... | @@ -65,22 +64,21 @@ |
65 | 64 | unsigned long tx_dropped; |
66 | 65 | }; |
67 | 66 | |
68 | -struct teql_sched_data | |
69 | -{ | |
67 | +struct teql_sched_data { | |
70 | 68 | struct Qdisc *next; |
71 | 69 | struct teql_master *m; |
72 | 70 | struct neighbour *ncache; |
73 | 71 | struct sk_buff_head q; |
74 | 72 | }; |
75 | 73 | |
76 | -#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next) | |
74 | +#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next) | |
77 | 75 | |
78 | -#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT) | |
76 | +#define FMASK (IFF_BROADCAST | IFF_POINTOPOINT) | |
79 | 77 | |
80 | 78 | /* "teql*" qdisc routines */ |
81 | 79 | |
82 | 80 | static int |
83 | -teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |
81 | +teql_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |
84 | 82 | { |
85 | 83 | struct net_device *dev = qdisc_dev(sch); |
86 | 84 | struct teql_sched_data *q = qdisc_priv(sch); |
... | ... | @@ -97,7 +95,7 @@ |
97 | 95 | } |
98 | 96 | |
99 | 97 | static struct sk_buff * |
100 | -teql_dequeue(struct Qdisc* sch) | |
98 | +teql_dequeue(struct Qdisc *sch) | |
101 | 99 | { |
102 | 100 | struct teql_sched_data *dat = qdisc_priv(sch); |
103 | 101 | struct netdev_queue *dat_queue; |
104 | 102 | |
... | ... | @@ -117,13 +115,13 @@ |
117 | 115 | } |
118 | 116 | |
119 | 117 | static struct sk_buff * |
120 | -teql_peek(struct Qdisc* sch) | |
118 | +teql_peek(struct Qdisc *sch) | |
121 | 119 | { |
122 | 120 | /* teql is meant to be used as root qdisc */ |
123 | 121 | return NULL; |
124 | 122 | } |
125 | 123 | |
126 | -static __inline__ void | |
124 | +static inline void | |
127 | 125 | teql_neigh_release(struct neighbour *n) |
128 | 126 | { |
129 | 127 | if (n) |
... | ... | @@ -131,7 +129,7 @@ |
131 | 129 | } |
132 | 130 | |
133 | 131 | static void |
134 | -teql_reset(struct Qdisc* sch) | |
132 | +teql_reset(struct Qdisc *sch) | |
135 | 133 | { |
136 | 134 | struct teql_sched_data *dat = qdisc_priv(sch); |
137 | 135 | |
138 | 136 | |
... | ... | @@ -141,13 +139,14 @@ |
141 | 139 | } |
142 | 140 | |
143 | 141 | static void |
144 | -teql_destroy(struct Qdisc* sch) | |
142 | +teql_destroy(struct Qdisc *sch) | |
145 | 143 | { |
146 | 144 | struct Qdisc *q, *prev; |
147 | 145 | struct teql_sched_data *dat = qdisc_priv(sch); |
148 | 146 | struct teql_master *master = dat->m; |
149 | 147 | |
150 | - if ((prev = master->slaves) != NULL) { | |
148 | + prev = master->slaves; | |
149 | + if (prev) { | |
151 | 150 | do { |
152 | 151 | q = NEXT_SLAVE(prev); |
153 | 152 | if (q == sch) { |
... | ... | @@ -179,7 +178,7 @@ |
179 | 178 | static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) |
180 | 179 | { |
181 | 180 | struct net_device *dev = qdisc_dev(sch); |
182 | - struct teql_master *m = (struct teql_master*)sch->ops; | |
181 | + struct teql_master *m = (struct teql_master *)sch->ops; | |
183 | 182 | struct teql_sched_data *q = qdisc_priv(sch); |
184 | 183 | |
185 | 184 | if (dev->hard_header_len > m->dev->hard_header_len) |
... | ... | @@ -290,7 +289,8 @@ |
290 | 289 | nores = 0; |
291 | 290 | busy = 0; |
292 | 291 | |
293 | - if ((q = start) == NULL) | |
292 | + q = start; | |
293 | + if (!q) | |
294 | 294 | goto drop; |
295 | 295 | |
296 | 296 | do { |
297 | 297 | |
... | ... | @@ -355,10 +355,10 @@ |
355 | 355 | |
356 | 356 | static int teql_master_open(struct net_device *dev) |
357 | 357 | { |
358 | - struct Qdisc * q; | |
358 | + struct Qdisc *q; | |
359 | 359 | struct teql_master *m = netdev_priv(dev); |
360 | 360 | int mtu = 0xFFFE; |
361 | - unsigned flags = IFF_NOARP|IFF_MULTICAST; | |
361 | + unsigned int flags = IFF_NOARP | IFF_MULTICAST; | |
362 | 362 | |
363 | 363 | if (m->slaves == NULL) |
364 | 364 | return -EUNATCH; |
... | ... | @@ -426,7 +426,7 @@ |
426 | 426 | do { |
427 | 427 | if (new_mtu > qdisc_dev(q)->mtu) |
428 | 428 | return -EINVAL; |
429 | - } while ((q=NEXT_SLAVE(q)) != m->slaves); | |
429 | + } while ((q = NEXT_SLAVE(q)) != m->slaves); | |
430 | 430 | } |
431 | 431 | |
432 | 432 | dev->mtu = new_mtu; |