Commit c4106aa88a440430d387e022f2ad6dc1e0d52e98

Authored by Harvey Harrison
Committed by David S. Miller
1 parent 5b9ab2ec04

decnet: remove private wrappers of endian helpers

Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Reviewed-by: Steven Whitehouse <swhiteho@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 10 changed files with 94 additions and 94 deletions Inline Diff

1 #ifndef _NET_DN_H 1 #ifndef _NET_DN_H
2 #define _NET_DN_H 2 #define _NET_DN_H
3 3
4 #include <linux/dn.h> 4 #include <linux/dn.h>
5 #include <net/sock.h> 5 #include <net/sock.h>
6 #include <asm/byteorder.h> 6 #include <asm/byteorder.h>
7 #include <asm/unalignedh>
7 8
8 #define dn_ntohs(x) le16_to_cpu(x)
9 #define dn_htons(x) cpu_to_le16(x)
10
11 struct dn_scp /* Session Control Port */ 9 struct dn_scp /* Session Control Port */
12 { 10 {
13 unsigned char state; 11 unsigned char state;
14 #define DN_O 1 /* Open */ 12 #define DN_O 1 /* Open */
15 #define DN_CR 2 /* Connect Receive */ 13 #define DN_CR 2 /* Connect Receive */
16 #define DN_DR 3 /* Disconnect Reject */ 14 #define DN_DR 3 /* Disconnect Reject */
17 #define DN_DRC 4 /* Discon. Rej. Complete*/ 15 #define DN_DRC 4 /* Discon. Rej. Complete*/
18 #define DN_CC 5 /* Connect Confirm */ 16 #define DN_CC 5 /* Connect Confirm */
19 #define DN_CI 6 /* Connect Initiate */ 17 #define DN_CI 6 /* Connect Initiate */
20 #define DN_NR 7 /* No resources */ 18 #define DN_NR 7 /* No resources */
21 #define DN_NC 8 /* No communication */ 19 #define DN_NC 8 /* No communication */
22 #define DN_CD 9 /* Connect Delivery */ 20 #define DN_CD 9 /* Connect Delivery */
23 #define DN_RJ 10 /* Rejected */ 21 #define DN_RJ 10 /* Rejected */
24 #define DN_RUN 11 /* Running */ 22 #define DN_RUN 11 /* Running */
25 #define DN_DI 12 /* Disconnect Initiate */ 23 #define DN_DI 12 /* Disconnect Initiate */
26 #define DN_DIC 13 /* Disconnect Complete */ 24 #define DN_DIC 13 /* Disconnect Complete */
27 #define DN_DN 14 /* Disconnect Notificat */ 25 #define DN_DN 14 /* Disconnect Notificat */
28 #define DN_CL 15 /* Closed */ 26 #define DN_CL 15 /* Closed */
29 #define DN_CN 16 /* Closed Notification */ 27 #define DN_CN 16 /* Closed Notification */
30 28
31 __le16 addrloc; 29 __le16 addrloc;
32 __le16 addrrem; 30 __le16 addrrem;
33 __u16 numdat; 31 __u16 numdat;
34 __u16 numoth; 32 __u16 numoth;
35 __u16 numoth_rcv; 33 __u16 numoth_rcv;
36 __u16 numdat_rcv; 34 __u16 numdat_rcv;
37 __u16 ackxmt_dat; 35 __u16 ackxmt_dat;
38 __u16 ackxmt_oth; 36 __u16 ackxmt_oth;
39 __u16 ackrcv_dat; 37 __u16 ackrcv_dat;
40 __u16 ackrcv_oth; 38 __u16 ackrcv_oth;
41 __u8 flowrem_sw; 39 __u8 flowrem_sw;
42 __u8 flowloc_sw; 40 __u8 flowloc_sw;
43 #define DN_SEND 2 41 #define DN_SEND 2
44 #define DN_DONTSEND 1 42 #define DN_DONTSEND 1
45 #define DN_NOCHANGE 0 43 #define DN_NOCHANGE 0
46 __u16 flowrem_dat; 44 __u16 flowrem_dat;
47 __u16 flowrem_oth; 45 __u16 flowrem_oth;
48 __u16 flowloc_dat; 46 __u16 flowloc_dat;
49 __u16 flowloc_oth; 47 __u16 flowloc_oth;
50 __u8 services_rem; 48 __u8 services_rem;
51 __u8 services_loc; 49 __u8 services_loc;
52 __u8 info_rem; 50 __u8 info_rem;
53 __u8 info_loc; 51 __u8 info_loc;
54 52
55 __u16 segsize_rem; 53 __u16 segsize_rem;
56 __u16 segsize_loc; 54 __u16 segsize_loc;
57 55
58 __u8 nonagle; 56 __u8 nonagle;
59 __u8 multi_ireq; 57 __u8 multi_ireq;
60 __u8 accept_mode; 58 __u8 accept_mode;
61 unsigned long seg_total; /* Running total of current segment */ 59 unsigned long seg_total; /* Running total of current segment */
62 60
63 struct optdata_dn conndata_in; 61 struct optdata_dn conndata_in;
64 struct optdata_dn conndata_out; 62 struct optdata_dn conndata_out;
65 struct optdata_dn discdata_in; 63 struct optdata_dn discdata_in;
66 struct optdata_dn discdata_out; 64 struct optdata_dn discdata_out;
67 struct accessdata_dn accessdata; 65 struct accessdata_dn accessdata;
68 66
69 struct sockaddr_dn addr; /* Local address */ 67 struct sockaddr_dn addr; /* Local address */
70 struct sockaddr_dn peer; /* Remote address */ 68 struct sockaddr_dn peer; /* Remote address */
71 69
72 /* 70 /*
73 * In this case the RTT estimation is not specified in the 71 * In this case the RTT estimation is not specified in the
74 * docs, nor is any back off algorithm. Here we follow well 72 * docs, nor is any back off algorithm. Here we follow well
75 * known tcp algorithms with a few small variations. 73 * known tcp algorithms with a few small variations.
76 * 74 *
77 * snd_window: Max number of packets we send before we wait for 75 * snd_window: Max number of packets we send before we wait for
78 * an ack to come back. This will become part of a 76 * an ack to come back. This will become part of a
79 * more complicated scheme when we support flow 77 * more complicated scheme when we support flow
80 * control. 78 * control.
81 * 79 *
82 * nsp_srtt: Round-Trip-Time (x8) in jiffies. This is a rolling 80 * nsp_srtt: Round-Trip-Time (x8) in jiffies. This is a rolling
83 * average. 81 * average.
84 * nsp_rttvar: Round-Trip-Time-Varience (x4) in jiffies. This is the 82 * nsp_rttvar: Round-Trip-Time-Varience (x4) in jiffies. This is the
85 * varience of the smoothed average (but calculated in 83 * varience of the smoothed average (but calculated in
86 * a simpler way than for normal statistical varience 84 * a simpler way than for normal statistical varience
87 * calculations). 85 * calculations).
88 * 86 *
89 * nsp_rxtshift: Backoff counter. Value is zero normally, each time 87 * nsp_rxtshift: Backoff counter. Value is zero normally, each time
90 * a packet is lost is increases by one until an ack 88 * a packet is lost is increases by one until an ack
91 * is received. Its used to index an array of backoff 89 * is received. Its used to index an array of backoff
92 * multipliers. 90 * multipliers.
93 */ 91 */
94 #define NSP_MIN_WINDOW 1 92 #define NSP_MIN_WINDOW 1
95 #define NSP_MAX_WINDOW (0x07fe) 93 #define NSP_MAX_WINDOW (0x07fe)
96 unsigned long max_window; 94 unsigned long max_window;
97 unsigned long snd_window; 95 unsigned long snd_window;
98 #define NSP_INITIAL_SRTT (HZ) 96 #define NSP_INITIAL_SRTT (HZ)
99 unsigned long nsp_srtt; 97 unsigned long nsp_srtt;
100 #define NSP_INITIAL_RTTVAR (HZ*3) 98 #define NSP_INITIAL_RTTVAR (HZ*3)
101 unsigned long nsp_rttvar; 99 unsigned long nsp_rttvar;
102 #define NSP_MAXRXTSHIFT 12 100 #define NSP_MAXRXTSHIFT 12
103 unsigned long nsp_rxtshift; 101 unsigned long nsp_rxtshift;
104 102
105 /* 103 /*
106 * Output queues, one for data, one for otherdata/linkservice 104 * Output queues, one for data, one for otherdata/linkservice
107 */ 105 */
108 struct sk_buff_head data_xmit_queue; 106 struct sk_buff_head data_xmit_queue;
109 struct sk_buff_head other_xmit_queue; 107 struct sk_buff_head other_xmit_queue;
110 108
111 /* 109 /*
112 * Input queue for other data 110 * Input queue for other data
113 */ 111 */
114 struct sk_buff_head other_receive_queue; 112 struct sk_buff_head other_receive_queue;
115 int other_report; 113 int other_report;
116 114
117 /* 115 /*
118 * Stuff to do with the slow timer 116 * Stuff to do with the slow timer
119 */ 117 */
120 unsigned long stamp; /* time of last transmit */ 118 unsigned long stamp; /* time of last transmit */
121 unsigned long persist; 119 unsigned long persist;
122 int (*persist_fxn)(struct sock *sk); 120 int (*persist_fxn)(struct sock *sk);
123 unsigned long keepalive; 121 unsigned long keepalive;
124 void (*keepalive_fxn)(struct sock *sk); 122 void (*keepalive_fxn)(struct sock *sk);
125 123
126 /* 124 /*
127 * This stuff is for the fast timer for delayed acks 125 * This stuff is for the fast timer for delayed acks
128 */ 126 */
129 struct timer_list delack_timer; 127 struct timer_list delack_timer;
130 int delack_pending; 128 int delack_pending;
131 void (*delack_fxn)(struct sock *sk); 129 void (*delack_fxn)(struct sock *sk);
132 130
133 }; 131 };
134 132
135 static inline struct dn_scp *DN_SK(struct sock *sk) 133 static inline struct dn_scp *DN_SK(struct sock *sk)
136 { 134 {
137 return (struct dn_scp *)(sk + 1); 135 return (struct dn_scp *)(sk + 1);
138 } 136 }
139 137
140 /* 138 /*
141 * src,dst : Source and Destination DECnet addresses 139 * src,dst : Source and Destination DECnet addresses
142 * hops : Number of hops through the network 140 * hops : Number of hops through the network
143 * dst_port, src_port : NSP port numbers 141 * dst_port, src_port : NSP port numbers
144 * services, info : Useful data extracted from conninit messages 142 * services, info : Useful data extracted from conninit messages
145 * rt_flags : Routing flags byte 143 * rt_flags : Routing flags byte
146 * nsp_flags : NSP layer flags byte 144 * nsp_flags : NSP layer flags byte
147 * segsize : Size of segment 145 * segsize : Size of segment
148 * segnum : Number, for data, otherdata and linkservice 146 * segnum : Number, for data, otherdata and linkservice
149 * xmit_count : Number of times we've transmitted this skb 147 * xmit_count : Number of times we've transmitted this skb
150 * stamp : Time stamp of most recent transmission, used in RTT calculations 148 * stamp : Time stamp of most recent transmission, used in RTT calculations
151 * iif: Input interface number 149 * iif: Input interface number
152 * 150 *
153 * As a general policy, this structure keeps all addresses in network 151 * As a general policy, this structure keeps all addresses in network
154 * byte order, and all else in host byte order. Thus dst, src, dst_port 152 * byte order, and all else in host byte order. Thus dst, src, dst_port
155 * and src_port are in network order. All else is in host order. 153 * and src_port are in network order. All else is in host order.
156 * 154 *
157 */ 155 */
158 #define DN_SKB_CB(skb) ((struct dn_skb_cb *)(skb)->cb) 156 #define DN_SKB_CB(skb) ((struct dn_skb_cb *)(skb)->cb)
159 struct dn_skb_cb { 157 struct dn_skb_cb {
160 __le16 dst; 158 __le16 dst;
161 __le16 src; 159 __le16 src;
162 __u16 hops; 160 __u16 hops;
163 __le16 dst_port; 161 __le16 dst_port;
164 __le16 src_port; 162 __le16 src_port;
165 __u8 services; 163 __u8 services;
166 __u8 info; 164 __u8 info;
167 __u8 rt_flags; 165 __u8 rt_flags;
168 __u8 nsp_flags; 166 __u8 nsp_flags;
169 __u16 segsize; 167 __u16 segsize;
170 __u16 segnum; 168 __u16 segnum;
171 __u16 xmit_count; 169 __u16 xmit_count;
172 unsigned long stamp; 170 unsigned long stamp;
173 int iif; 171 int iif;
174 }; 172 };
175 173
176 static inline __le16 dn_eth2dn(unsigned char *ethaddr) 174 static inline __le16 dn_eth2dn(unsigned char *ethaddr)
177 { 175 {
178 return dn_htons(ethaddr[4] | (ethaddr[5] << 8)); 176 return get_unaligned((__le16 *)(ethaddr + 4));
179 } 177 }
180 178
181 static inline __le16 dn_saddr2dn(struct sockaddr_dn *saddr) 179 static inline __le16 dn_saddr2dn(struct sockaddr_dn *saddr)
182 { 180 {
183 return *(__le16 *)saddr->sdn_nodeaddr; 181 return *(__le16 *)saddr->sdn_nodeaddr;
184 } 182 }
185 183
186 static inline void dn_dn2eth(unsigned char *ethaddr, __le16 addr) 184 static inline void dn_dn2eth(unsigned char *ethaddr, __le16 addr)
187 { 185 {
188 __u16 a = dn_ntohs(addr); 186 __u16 a = le16_to_cpu(addr);
189 ethaddr[0] = 0xAA; 187 ethaddr[0] = 0xAA;
190 ethaddr[1] = 0x00; 188 ethaddr[1] = 0x00;
191 ethaddr[2] = 0x04; 189 ethaddr[2] = 0x04;
192 ethaddr[3] = 0x00; 190 ethaddr[3] = 0x00;
193 ethaddr[4] = (__u8)(a & 0xff); 191 ethaddr[4] = (__u8)(a & 0xff);
194 ethaddr[5] = (__u8)(a >> 8); 192 ethaddr[5] = (__u8)(a >> 8);
195 } 193 }
196 194
197 static inline void dn_sk_ports_copy(struct flowi *fl, struct dn_scp *scp) 195 static inline void dn_sk_ports_copy(struct flowi *fl, struct dn_scp *scp)
198 { 196 {
199 fl->uli_u.dnports.sport = scp->addrloc; 197 fl->uli_u.dnports.sport = scp->addrloc;
200 fl->uli_u.dnports.dport = scp->addrrem; 198 fl->uli_u.dnports.dport = scp->addrrem;
201 } 199 }
202 200
203 extern unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu); 201 extern unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu);
204 202
205 #define DN_MENUVER_ACC 0x01 203 #define DN_MENUVER_ACC 0x01
206 #define DN_MENUVER_USR 0x02 204 #define DN_MENUVER_USR 0x02
207 #define DN_MENUVER_PRX 0x04 205 #define DN_MENUVER_PRX 0x04
208 #define DN_MENUVER_UIC 0x08 206 #define DN_MENUVER_UIC 0x08
209 207
210 extern struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr); 208 extern struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
211 extern struct sock *dn_find_by_skb(struct sk_buff *skb); 209 extern struct sock *dn_find_by_skb(struct sk_buff *skb);
212 #define DN_ASCBUF_LEN 9 210 #define DN_ASCBUF_LEN 9
213 extern char *dn_addr2asc(__u16, char *); 211 extern char *dn_addr2asc(__u16, char *);
214 extern int dn_destroy_timer(struct sock *sk); 212 extern int dn_destroy_timer(struct sock *sk);
215 213
216 extern int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf, unsigned char type); 214 extern int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf, unsigned char type);
217 extern int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr, unsigned char *type); 215 extern int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr, unsigned char *type);
218 216
219 extern void dn_start_slow_timer(struct sock *sk); 217 extern void dn_start_slow_timer(struct sock *sk);
220 extern void dn_stop_slow_timer(struct sock *sk); 218 extern void dn_stop_slow_timer(struct sock *sk);
221 219
222 extern __le16 decnet_address; 220 extern __le16 decnet_address;
223 extern int decnet_debug_level; 221 extern int decnet_debug_level;
224 extern int decnet_time_wait; 222 extern int decnet_time_wait;
225 extern int decnet_dn_count; 223 extern int decnet_dn_count;
226 extern int decnet_di_count; 224 extern int decnet_di_count;
227 extern int decnet_dr_count; 225 extern int decnet_dr_count;
228 extern int decnet_no_fc_max_cwnd; 226 extern int decnet_no_fc_max_cwnd;
229 227
230 extern int sysctl_decnet_mem[3]; 228 extern int sysctl_decnet_mem[3];
231 extern int sysctl_decnet_wmem[3]; 229 extern int sysctl_decnet_wmem[3];
232 extern int sysctl_decnet_rmem[3]; 230 extern int sysctl_decnet_rmem[3];
233 231
234 #endif /* _NET_DN_H */ 232 #endif /* _NET_DN_H */
include/net/dn_fib.h
1 #ifndef _NET_DN_FIB_H 1 #ifndef _NET_DN_FIB_H
2 #define _NET_DN_FIB_H 2 #define _NET_DN_FIB_H
3 3
4 /* WARNING: The ordering of these elements must match ordering 4 /* WARNING: The ordering of these elements must match ordering
5 * of RTA_* rtnetlink attribute numbers. 5 * of RTA_* rtnetlink attribute numbers.
6 */ 6 */
7 struct dn_kern_rta 7 struct dn_kern_rta
8 { 8 {
9 void *rta_dst; 9 void *rta_dst;
10 void *rta_src; 10 void *rta_src;
11 int *rta_iif; 11 int *rta_iif;
12 int *rta_oif; 12 int *rta_oif;
13 void *rta_gw; 13 void *rta_gw;
14 u32 *rta_priority; 14 u32 *rta_priority;
15 void *rta_prefsrc; 15 void *rta_prefsrc;
16 struct rtattr *rta_mx; 16 struct rtattr *rta_mx;
17 struct rtattr *rta_mp; 17 struct rtattr *rta_mp;
18 unsigned char *rta_protoinfo; 18 unsigned char *rta_protoinfo;
19 u32 *rta_flow; 19 u32 *rta_flow;
20 struct rta_cacheinfo *rta_ci; 20 struct rta_cacheinfo *rta_ci;
21 struct rta_session *rta_sess; 21 struct rta_session *rta_sess;
22 }; 22 };
23 23
24 struct dn_fib_res { 24 struct dn_fib_res {
25 struct fib_rule *r; 25 struct fib_rule *r;
26 struct dn_fib_info *fi; 26 struct dn_fib_info *fi;
27 unsigned char prefixlen; 27 unsigned char prefixlen;
28 unsigned char nh_sel; 28 unsigned char nh_sel;
29 unsigned char type; 29 unsigned char type;
30 unsigned char scope; 30 unsigned char scope;
31 }; 31 };
32 32
33 struct dn_fib_nh { 33 struct dn_fib_nh {
34 struct net_device *nh_dev; 34 struct net_device *nh_dev;
35 unsigned nh_flags; 35 unsigned nh_flags;
36 unsigned char nh_scope; 36 unsigned char nh_scope;
37 int nh_weight; 37 int nh_weight;
38 int nh_power; 38 int nh_power;
39 int nh_oif; 39 int nh_oif;
40 __le16 nh_gw; 40 __le16 nh_gw;
41 }; 41 };
42 42
43 struct dn_fib_info { 43 struct dn_fib_info {
44 struct dn_fib_info *fib_next; 44 struct dn_fib_info *fib_next;
45 struct dn_fib_info *fib_prev; 45 struct dn_fib_info *fib_prev;
46 int fib_treeref; 46 int fib_treeref;
47 atomic_t fib_clntref; 47 atomic_t fib_clntref;
48 int fib_dead; 48 int fib_dead;
49 unsigned fib_flags; 49 unsigned fib_flags;
50 int fib_protocol; 50 int fib_protocol;
51 __le16 fib_prefsrc; 51 __le16 fib_prefsrc;
52 __u32 fib_priority; 52 __u32 fib_priority;
53 __u32 fib_metrics[RTAX_MAX]; 53 __u32 fib_metrics[RTAX_MAX];
54 #define dn_fib_mtu fib_metrics[RTAX_MTU-1] 54 #define dn_fib_mtu fib_metrics[RTAX_MTU-1]
55 #define dn_fib_window fib_metrics[RTAX_WINDOW-1] 55 #define dn_fib_window fib_metrics[RTAX_WINDOW-1]
56 #define dn_fib_rtt fib_metrics[RTAX_RTT-1] 56 #define dn_fib_rtt fib_metrics[RTAX_RTT-1]
57 #define dn_fib_advmss fib_metrics[RTAX_ADVMSS-1] 57 #define dn_fib_advmss fib_metrics[RTAX_ADVMSS-1]
58 int fib_nhs; 58 int fib_nhs;
59 int fib_power; 59 int fib_power;
60 struct dn_fib_nh fib_nh[0]; 60 struct dn_fib_nh fib_nh[0];
61 #define dn_fib_dev fib_nh[0].nh_dev 61 #define dn_fib_dev fib_nh[0].nh_dev
62 }; 62 };
63 63
64 64
65 #define DN_FIB_RES_RESET(res) ((res).nh_sel = 0) 65 #define DN_FIB_RES_RESET(res) ((res).nh_sel = 0)
66 #define DN_FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel]) 66 #define DN_FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
67 67
68 #define DN_FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : __dn_fib_res_prefsrc(&res)) 68 #define DN_FIB_RES_PREFSRC(res) ((res).fi->fib_prefsrc ? : __dn_fib_res_prefsrc(&res))
69 #define DN_FIB_RES_GW(res) (DN_FIB_RES_NH(res).nh_gw) 69 #define DN_FIB_RES_GW(res) (DN_FIB_RES_NH(res).nh_gw)
70 #define DN_FIB_RES_DEV(res) (DN_FIB_RES_NH(res).nh_dev) 70 #define DN_FIB_RES_DEV(res) (DN_FIB_RES_NH(res).nh_dev)
71 #define DN_FIB_RES_OIF(res) (DN_FIB_RES_NH(res).nh_oif) 71 #define DN_FIB_RES_OIF(res) (DN_FIB_RES_NH(res).nh_oif)
72 72
73 typedef struct { 73 typedef struct {
74 __le16 datum; 74 __le16 datum;
75 } dn_fib_key_t; 75 } dn_fib_key_t;
76 76
77 typedef struct { 77 typedef struct {
78 __le16 datum; 78 __le16 datum;
79 } dn_fib_hash_t; 79 } dn_fib_hash_t;
80 80
81 typedef struct { 81 typedef struct {
82 __u16 datum; 82 __u16 datum;
83 } dn_fib_idx_t; 83 } dn_fib_idx_t;
84 84
85 struct dn_fib_node { 85 struct dn_fib_node {
86 struct dn_fib_node *fn_next; 86 struct dn_fib_node *fn_next;
87 struct dn_fib_info *fn_info; 87 struct dn_fib_info *fn_info;
88 #define DN_FIB_INFO(f) ((f)->fn_info) 88 #define DN_FIB_INFO(f) ((f)->fn_info)
89 dn_fib_key_t fn_key; 89 dn_fib_key_t fn_key;
90 u8 fn_type; 90 u8 fn_type;
91 u8 fn_scope; 91 u8 fn_scope;
92 u8 fn_state; 92 u8 fn_state;
93 }; 93 };
94 94
95 95
96 struct dn_fib_table { 96 struct dn_fib_table {
97 struct hlist_node hlist; 97 struct hlist_node hlist;
98 u32 n; 98 u32 n;
99 99
100 int (*insert)(struct dn_fib_table *t, struct rtmsg *r, 100 int (*insert)(struct dn_fib_table *t, struct rtmsg *r,
101 struct dn_kern_rta *rta, struct nlmsghdr *n, 101 struct dn_kern_rta *rta, struct nlmsghdr *n,
102 struct netlink_skb_parms *req); 102 struct netlink_skb_parms *req);
103 int (*delete)(struct dn_fib_table *t, struct rtmsg *r, 103 int (*delete)(struct dn_fib_table *t, struct rtmsg *r,
104 struct dn_kern_rta *rta, struct nlmsghdr *n, 104 struct dn_kern_rta *rta, struct nlmsghdr *n,
105 struct netlink_skb_parms *req); 105 struct netlink_skb_parms *req);
106 int (*lookup)(struct dn_fib_table *t, const struct flowi *fl, 106 int (*lookup)(struct dn_fib_table *t, const struct flowi *fl,
107 struct dn_fib_res *res); 107 struct dn_fib_res *res);
108 int (*flush)(struct dn_fib_table *t); 108 int (*flush)(struct dn_fib_table *t);
109 int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb); 109 int (*dump)(struct dn_fib_table *t, struct sk_buff *skb, struct netlink_callback *cb);
110 110
111 unsigned char data[0]; 111 unsigned char data[0];
112 }; 112 };
113 113
114 #ifdef CONFIG_DECNET_ROUTER 114 #ifdef CONFIG_DECNET_ROUTER
115 /* 115 /*
116 * dn_fib.c 116 * dn_fib.c
117 */ 117 */
118 extern void dn_fib_init(void); 118 extern void dn_fib_init(void);
119 extern void dn_fib_cleanup(void); 119 extern void dn_fib_cleanup(void);
120 120
121 extern int dn_fib_ioctl(struct socket *sock, unsigned int cmd, 121 extern int dn_fib_ioctl(struct socket *sock, unsigned int cmd,
122 unsigned long arg); 122 unsigned long arg);
123 extern struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, 123 extern struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
124 struct dn_kern_rta *rta, 124 struct dn_kern_rta *rta,
125 const struct nlmsghdr *nlh, int *errp); 125 const struct nlmsghdr *nlh, int *errp);
126 extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi, 126 extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
127 const struct flowi *fl, 127 const struct flowi *fl,
128 struct dn_fib_res *res); 128 struct dn_fib_res *res);
129 extern void dn_fib_release_info(struct dn_fib_info *fi); 129 extern void dn_fib_release_info(struct dn_fib_info *fi);
130 extern __le16 dn_fib_get_attr16(struct rtattr *attr, int attrlen, int type); 130 extern __le16 dn_fib_get_attr16(struct rtattr *attr, int attrlen, int type);
131 extern void dn_fib_flush(void); 131 extern void dn_fib_flush(void);
132 extern void dn_fib_select_multipath(const struct flowi *fl, 132 extern void dn_fib_select_multipath(const struct flowi *fl,
133 struct dn_fib_res *res); 133 struct dn_fib_res *res);
134 134
135 /* 135 /*
136 * dn_tables.c 136 * dn_tables.c
137 */ 137 */
138 extern struct dn_fib_table *dn_fib_get_table(u32 n, int creat); 138 extern struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
139 extern struct dn_fib_table *dn_fib_empty_table(void); 139 extern struct dn_fib_table *dn_fib_empty_table(void);
140 extern void dn_fib_table_init(void); 140 extern void dn_fib_table_init(void);
141 extern void dn_fib_table_cleanup(void); 141 extern void dn_fib_table_cleanup(void);
142 142
143 /* 143 /*
144 * dn_rules.c 144 * dn_rules.c
145 */ 145 */
146 extern void dn_fib_rules_init(void); 146 extern void dn_fib_rules_init(void);
147 extern void dn_fib_rules_cleanup(void); 147 extern void dn_fib_rules_cleanup(void);
148 extern unsigned dnet_addr_type(__le16 addr); 148 extern unsigned dnet_addr_type(__le16 addr);
149 extern int dn_fib_lookup(struct flowi *fl, struct dn_fib_res *res); 149 extern int dn_fib_lookup(struct flowi *fl, struct dn_fib_res *res);
150 150
151 extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb); 151 extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
152 152
153 extern void dn_fib_free_info(struct dn_fib_info *fi); 153 extern void dn_fib_free_info(struct dn_fib_info *fi);
154 154
155 static inline void dn_fib_info_put(struct dn_fib_info *fi) 155 static inline void dn_fib_info_put(struct dn_fib_info *fi)
156 { 156 {
157 if (atomic_dec_and_test(&fi->fib_clntref)) 157 if (atomic_dec_and_test(&fi->fib_clntref))
158 dn_fib_free_info(fi); 158 dn_fib_free_info(fi);
159 } 159 }
160 160
161 static inline void dn_fib_res_put(struct dn_fib_res *res) 161 static inline void dn_fib_res_put(struct dn_fib_res *res)
162 { 162 {
163 if (res->fi) 163 if (res->fi)
164 dn_fib_info_put(res->fi); 164 dn_fib_info_put(res->fi);
165 if (res->r) 165 if (res->r)
166 fib_rule_put(res->r); 166 fib_rule_put(res->r);
167 } 167 }
168 168
169 #else /* Endnode */ 169 #else /* Endnode */
170 170
171 #define dn_fib_init() do { } while(0) 171 #define dn_fib_init() do { } while(0)
172 #define dn_fib_cleanup() do { } while(0) 172 #define dn_fib_cleanup() do { } while(0)
173 173
174 #define dn_fib_lookup(fl, res) (-ESRCH) 174 #define dn_fib_lookup(fl, res) (-ESRCH)
175 #define dn_fib_info_put(fi) do { } while(0) 175 #define dn_fib_info_put(fi) do { } while(0)
176 #define dn_fib_select_multipath(fl, res) do { } while(0) 176 #define dn_fib_select_multipath(fl, res) do { } while(0)
177 #define dn_fib_rules_policy(saddr,res,flags) (0) 177 #define dn_fib_rules_policy(saddr,res,flags) (0)
178 #define dn_fib_res_put(res) do { } while(0) 178 #define dn_fib_res_put(res) do { } while(0)
179 179
180 #endif /* CONFIG_DECNET_ROUTER */ 180 #endif /* CONFIG_DECNET_ROUTER */
181 181
182 static inline __le16 dnet_make_mask(int n) 182 static inline __le16 dnet_make_mask(int n)
183 { 183 {
184 if (n) 184 if (n)
185 return dn_htons(~((1<<(16-n))-1)); 185 return cpu_to_le16(~((1 << (16 - n)) - 1));
186 return 0; 186 return cpu_to_le16(0);
187 } 187 }
188 188
189 #endif /* _NET_DN_FIB_H */ 189 #endif /* _NET_DN_FIB_H */
190 190
net/decnet/af_decnet.c
1 1
2 /* 2 /*
3 * DECnet An implementation of the DECnet protocol suite for the LINUX 3 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket 4 * operating system. DECnet is implemented using the BSD Socket
5 * interface as the means of communication with the user level. 5 * interface as the means of communication with the user level.
6 * 6 *
7 * DECnet Socket Layer Interface 7 * DECnet Socket Layer Interface
8 * 8 *
9 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com> 9 * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * Patrick Caulfield <patrick@pandh.demon.co.uk> 10 * Patrick Caulfield <patrick@pandh.demon.co.uk>
11 * 11 *
12 * Changes: 12 * Changes:
13 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's 13 * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's
14 * version of the code. Original copyright preserved 14 * version of the code. Original copyright preserved
15 * below. 15 * below.
16 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it 16 * Steve Whitehouse: Some bug fixes, cleaning up some code to make it
17 * compatible with my routing layer. 17 * compatible with my routing layer.
18 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick 18 * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick
19 * Caulfield. 19 * Caulfield.
20 * Steve Whitehouse: Further bug fixes, checking module code still works 20 * Steve Whitehouse: Further bug fixes, checking module code still works
21 * with new routing layer. 21 * with new routing layer.
22 * Steve Whitehouse: Additional set/get_sockopt() calls. 22 * Steve Whitehouse: Additional set/get_sockopt() calls.
23 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new 23 * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new
24 * code. 24 * code.
25 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like 25 * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like
26 * way. Didn't manage it entirely, but its better. 26 * way. Didn't manage it entirely, but its better.
27 * Steve Whitehouse: ditto for sendmsg(). 27 * Steve Whitehouse: ditto for sendmsg().
28 * Steve Whitehouse: A selection of bug fixes to various things. 28 * Steve Whitehouse: A selection of bug fixes to various things.
29 * Steve Whitehouse: Added TIOCOUTQ ioctl. 29 * Steve Whitehouse: Added TIOCOUTQ ioctl.
30 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username. 30 * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username.
31 * Steve Whitehouse: Fixes to connect() error returns. 31 * Steve Whitehouse: Fixes to connect() error returns.
32 * Patrick Caulfield: Fixes to delayed acceptance logic. 32 * Patrick Caulfield: Fixes to delayed acceptance logic.
33 * David S. Miller: New socket locking 33 * David S. Miller: New socket locking
34 * Steve Whitehouse: Socket list hashing/locking 34 * Steve Whitehouse: Socket list hashing/locking
35 * Arnaldo C. Melo: use capable, not suser 35 * Arnaldo C. Melo: use capable, not suser
36 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation 36 * Steve Whitehouse: Removed unused code. Fix to use sk->allocation
37 * when required. 37 * when required.
38 * Patrick Caulfield: /proc/net/decnet now has object name/number 38 * Patrick Caulfield: /proc/net/decnet now has object name/number
39 * Steve Whitehouse: Fixed local port allocation, hashed sk list 39 * Steve Whitehouse: Fixed local port allocation, hashed sk list
40 * Matthew Wilcox: Fixes for dn_ioctl() 40 * Matthew Wilcox: Fixes for dn_ioctl()
41 * Steve Whitehouse: New connect/accept logic to allow timeouts and 41 * Steve Whitehouse: New connect/accept logic to allow timeouts and
42 * prepare for sendpage etc. 42 * prepare for sendpage etc.
43 */ 43 */
44 44
45 45
46 /****************************************************************************** 46 /******************************************************************************
47 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 47 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
48 48
49 This program is free software; you can redistribute it and/or modify 49 This program is free software; you can redistribute it and/or modify
50 it under the terms of the GNU General Public License as published by 50 it under the terms of the GNU General Public License as published by
51 the Free Software Foundation; either version 2 of the License, or 51 the Free Software Foundation; either version 2 of the License, or
52 any later version. 52 any later version.
53 53
54 This program is distributed in the hope that it will be useful, 54 This program is distributed in the hope that it will be useful,
55 but WITHOUT ANY WARRANTY; without even the implied warranty of 55 but WITHOUT ANY WARRANTY; without even the implied warranty of
56 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 56 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
57 GNU General Public License for more details. 57 GNU General Public License for more details.
58 58
59 HISTORY: 59 HISTORY:
60 60
61 Version Kernel Date Author/Comments 61 Version Kernel Date Author/Comments
62 ------- ------ ---- --------------- 62 ------- ------ ---- ---------------
63 Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat 63 Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat
64 (emserrat@geocities.com) 64 (emserrat@geocities.com)
65 65
66 First Development of DECnet Socket La- 66 First Development of DECnet Socket La-
67 yer for Linux. Only supports outgoing 67 yer for Linux. Only supports outgoing
68 connections. 68 connections.
69 69
70 Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield 70 Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield
71 (patrick@pandh.demon.co.uk) 71 (patrick@pandh.demon.co.uk)
72 72
73 Port to new kernel development version. 73 Port to new kernel development version.
74 74
75 Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat 75 Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat
76 (emserrat@geocities.com) 76 (emserrat@geocities.com)
77 _ 77 _
78 Added support for incoming connections 78 Added support for incoming connections
79 so we can start developing server apps 79 so we can start developing server apps
80 on Linux. 80 on Linux.
81 - 81 -
82 Module Support 82 Module Support
83 Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat 83 Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat
84 (emserrat@geocities.com) 84 (emserrat@geocities.com)
85 _ 85 _
86 Added support for X11R6.4. Now we can 86 Added support for X11R6.4. Now we can
87 use DECnet transport for X on Linux!!! 87 use DECnet transport for X on Linux!!!
88 - 88 -
89 Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat 89 Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat
90 (emserrat@geocities.com) 90 (emserrat@geocities.com)
91 Removed bugs on flow control 91 Removed bugs on flow control
92 Removed bugs on incoming accessdata 92 Removed bugs on incoming accessdata
93 order 93 order
94 - 94 -
95 Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat 95 Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
96 dn_recvmsg fixes 96 dn_recvmsg fixes
97 97
98 Patrick J. Caulfield 98 Patrick J. Caulfield
99 dn_bind fixes 99 dn_bind fixes
100 *******************************************************************************/ 100 *******************************************************************************/
101 101
102 #include <linux/module.h> 102 #include <linux/module.h>
103 #include <linux/errno.h> 103 #include <linux/errno.h>
104 #include <linux/types.h> 104 #include <linux/types.h>
105 #include <linux/slab.h> 105 #include <linux/slab.h>
106 #include <linux/socket.h> 106 #include <linux/socket.h>
107 #include <linux/in.h> 107 #include <linux/in.h>
108 #include <linux/kernel.h> 108 #include <linux/kernel.h>
109 #include <linux/sched.h> 109 #include <linux/sched.h>
110 #include <linux/timer.h> 110 #include <linux/timer.h>
111 #include <linux/string.h> 111 #include <linux/string.h>
112 #include <linux/sockios.h> 112 #include <linux/sockios.h>
113 #include <linux/net.h> 113 #include <linux/net.h>
114 #include <linux/netdevice.h> 114 #include <linux/netdevice.h>
115 #include <linux/inet.h> 115 #include <linux/inet.h>
116 #include <linux/route.h> 116 #include <linux/route.h>
117 #include <linux/netfilter.h> 117 #include <linux/netfilter.h>
118 #include <linux/seq_file.h> 118 #include <linux/seq_file.h>
119 #include <net/sock.h> 119 #include <net/sock.h>
120 #include <net/tcp_states.h> 120 #include <net/tcp_states.h>
121 #include <net/flow.h> 121 #include <net/flow.h>
122 #include <asm/system.h> 122 #include <asm/system.h>
123 #include <asm/ioctls.h> 123 #include <asm/ioctls.h>
124 #include <linux/capability.h> 124 #include <linux/capability.h>
125 #include <linux/mm.h> 125 #include <linux/mm.h>
126 #include <linux/interrupt.h> 126 #include <linux/interrupt.h>
127 #include <linux/proc_fs.h> 127 #include <linux/proc_fs.h>
128 #include <linux/stat.h> 128 #include <linux/stat.h>
129 #include <linux/init.h> 129 #include <linux/init.h>
130 #include <linux/poll.h> 130 #include <linux/poll.h>
131 #include <net/net_namespace.h> 131 #include <net/net_namespace.h>
132 #include <net/neighbour.h> 132 #include <net/neighbour.h>
133 #include <net/dst.h> 133 #include <net/dst.h>
134 #include <net/fib_rules.h> 134 #include <net/fib_rules.h>
135 #include <net/dn.h> 135 #include <net/dn.h>
136 #include <net/dn_nsp.h> 136 #include <net/dn_nsp.h>
137 #include <net/dn_dev.h> 137 #include <net/dn_dev.h>
138 #include <net/dn_route.h> 138 #include <net/dn_route.h>
139 #include <net/dn_fib.h> 139 #include <net/dn_fib.h>
140 #include <net/dn_neigh.h> 140 #include <net/dn_neigh.h>
141 141
142 struct dn_sock { 142 struct dn_sock {
143 struct sock sk; 143 struct sock sk;
144 struct dn_scp scp; 144 struct dn_scp scp;
145 }; 145 };
146 146
147 static void dn_keepalive(struct sock *sk); 147 static void dn_keepalive(struct sock *sk);
148 148
149 #define DN_SK_HASH_SHIFT 8 149 #define DN_SK_HASH_SHIFT 8
150 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT) 150 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT)
151 #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1) 151 #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1)
152 152
153 153
154 static const struct proto_ops dn_proto_ops; 154 static const struct proto_ops dn_proto_ops;
155 static DEFINE_RWLOCK(dn_hash_lock); 155 static DEFINE_RWLOCK(dn_hash_lock);
156 static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; 156 static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
157 static struct hlist_head dn_wild_sk; 157 static struct hlist_head dn_wild_sk;
158 static atomic_t decnet_memory_allocated; 158 static atomic_t decnet_memory_allocated;
159 159
160 static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen, int flags); 160 static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen, int flags);
161 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); 161 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
162 162
163 static struct hlist_head *dn_find_list(struct sock *sk) 163 static struct hlist_head *dn_find_list(struct sock *sk)
164 { 164 {
165 struct dn_scp *scp = DN_SK(sk); 165 struct dn_scp *scp = DN_SK(sk);
166 166
167 if (scp->addr.sdn_flags & SDF_WILD) 167 if (scp->addr.sdn_flags & SDF_WILD)
168 return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL; 168 return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL;
169 169
170 return &dn_sk_hash[dn_ntohs(scp->addrloc) & DN_SK_HASH_MASK]; 170 return &dn_sk_hash[le16_to_cpu(scp->addrloc) & DN_SK_HASH_MASK];
171 } 171 }
172 172
173 /* 173 /*
174 * Valid ports are those greater than zero and not already in use. 174 * Valid ports are those greater than zero and not already in use.
175 */ 175 */
176 static int check_port(__le16 port) 176 static int check_port(__le16 port)
177 { 177 {
178 struct sock *sk; 178 struct sock *sk;
179 struct hlist_node *node; 179 struct hlist_node *node;
180 180
181 if (port == 0) 181 if (port == 0)
182 return -1; 182 return -1;
183 183
184 sk_for_each(sk, node, &dn_sk_hash[dn_ntohs(port) & DN_SK_HASH_MASK]) { 184 sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
185 struct dn_scp *scp = DN_SK(sk); 185 struct dn_scp *scp = DN_SK(sk);
186 if (scp->addrloc == port) 186 if (scp->addrloc == port)
187 return -1; 187 return -1;
188 } 188 }
189 return 0; 189 return 0;
190 } 190 }
191 191
192 static unsigned short port_alloc(struct sock *sk) 192 static unsigned short port_alloc(struct sock *sk)
193 { 193 {
194 struct dn_scp *scp = DN_SK(sk); 194 struct dn_scp *scp = DN_SK(sk);
195 static unsigned short port = 0x2000; 195 static unsigned short port = 0x2000;
196 unsigned short i_port = port; 196 unsigned short i_port = port;
197 197
198 while(check_port(dn_htons(++port)) != 0) { 198 while(check_port(cpu_to_le16(++port)) != 0) {
199 if (port == i_port) 199 if (port == i_port)
200 return 0; 200 return 0;
201 } 201 }
202 202
203 scp->addrloc = dn_htons(port); 203 scp->addrloc = cpu_to_le16(port);
204 204
205 return 1; 205 return 1;
206 } 206 }
207 207
208 /* 208 /*
209 * Since this is only ever called from user 209 * Since this is only ever called from user
210 * level, we don't need a write_lock() version 210 * level, we don't need a write_lock() version
211 * of this. 211 * of this.
212 */ 212 */
213 static int dn_hash_sock(struct sock *sk) 213 static int dn_hash_sock(struct sock *sk)
214 { 214 {
215 struct dn_scp *scp = DN_SK(sk); 215 struct dn_scp *scp = DN_SK(sk);
216 struct hlist_head *list; 216 struct hlist_head *list;
217 int rv = -EUSERS; 217 int rv = -EUSERS;
218 218
219 BUG_ON(sk_hashed(sk)); 219 BUG_ON(sk_hashed(sk));
220 220
221 write_lock_bh(&dn_hash_lock); 221 write_lock_bh(&dn_hash_lock);
222 222
223 if (!scp->addrloc && !port_alloc(sk)) 223 if (!scp->addrloc && !port_alloc(sk))
224 goto out; 224 goto out;
225 225
226 rv = -EADDRINUSE; 226 rv = -EADDRINUSE;
227 if ((list = dn_find_list(sk)) == NULL) 227 if ((list = dn_find_list(sk)) == NULL)
228 goto out; 228 goto out;
229 229
230 sk_add_node(sk, list); 230 sk_add_node(sk, list);
231 rv = 0; 231 rv = 0;
232 out: 232 out:
233 write_unlock_bh(&dn_hash_lock); 233 write_unlock_bh(&dn_hash_lock);
234 return rv; 234 return rv;
235 } 235 }
236 236
237 static void dn_unhash_sock(struct sock *sk) 237 static void dn_unhash_sock(struct sock *sk)
238 { 238 {
239 write_lock(&dn_hash_lock); 239 write_lock(&dn_hash_lock);
240 sk_del_node_init(sk); 240 sk_del_node_init(sk);
241 write_unlock(&dn_hash_lock); 241 write_unlock(&dn_hash_lock);
242 } 242 }
243 243
244 static void dn_unhash_sock_bh(struct sock *sk) 244 static void dn_unhash_sock_bh(struct sock *sk)
245 { 245 {
246 write_lock_bh(&dn_hash_lock); 246 write_lock_bh(&dn_hash_lock);
247 sk_del_node_init(sk); 247 sk_del_node_init(sk);
248 write_unlock_bh(&dn_hash_lock); 248 write_unlock_bh(&dn_hash_lock);
249 } 249 }
250 250
251 static struct hlist_head *listen_hash(struct sockaddr_dn *addr) 251 static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
252 { 252 {
253 int i; 253 int i;
254 unsigned hash = addr->sdn_objnum; 254 unsigned hash = addr->sdn_objnum;
255 255
256 if (hash == 0) { 256 if (hash == 0) {
257 hash = addr->sdn_objnamel; 257 hash = addr->sdn_objnamel;
258 for(i = 0; i < dn_ntohs(addr->sdn_objnamel); i++) { 258 for(i = 0; i < le16_to_cpu(addr->sdn_objnamel); i++) {
259 hash ^= addr->sdn_objname[i]; 259 hash ^= addr->sdn_objname[i];
260 hash ^= (hash << 3); 260 hash ^= (hash << 3);
261 } 261 }
262 } 262 }
263 263
264 return &dn_sk_hash[hash & DN_SK_HASH_MASK]; 264 return &dn_sk_hash[hash & DN_SK_HASH_MASK];
265 } 265 }
266 266
267 /* 267 /*
268 * Called to transform a socket from bound (i.e. with a local address) 268 * Called to transform a socket from bound (i.e. with a local address)
269 * into a listening socket (doesn't need a local port number) and rehashes 269 * into a listening socket (doesn't need a local port number) and rehashes
270 * based upon the object name/number. 270 * based upon the object name/number.
271 */ 271 */
272 static void dn_rehash_sock(struct sock *sk) 272 static void dn_rehash_sock(struct sock *sk)
273 { 273 {
274 struct hlist_head *list; 274 struct hlist_head *list;
275 struct dn_scp *scp = DN_SK(sk); 275 struct dn_scp *scp = DN_SK(sk);
276 276
277 if (scp->addr.sdn_flags & SDF_WILD) 277 if (scp->addr.sdn_flags & SDF_WILD)
278 return; 278 return;
279 279
280 write_lock_bh(&dn_hash_lock); 280 write_lock_bh(&dn_hash_lock);
281 sk_del_node_init(sk); 281 sk_del_node_init(sk);
282 DN_SK(sk)->addrloc = 0; 282 DN_SK(sk)->addrloc = 0;
283 list = listen_hash(&DN_SK(sk)->addr); 283 list = listen_hash(&DN_SK(sk)->addr);
284 sk_add_node(sk, list); 284 sk_add_node(sk, list);
285 write_unlock_bh(&dn_hash_lock); 285 write_unlock_bh(&dn_hash_lock);
286 } 286 }
287 287
288 int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type) 288 int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type)
289 { 289 {
290 int len = 2; 290 int len = 2;
291 291
292 *buf++ = type; 292 *buf++ = type;
293 293
294 switch(type) { 294 switch(type) {
295 case 0: 295 case 0:
296 *buf++ = sdn->sdn_objnum; 296 *buf++ = sdn->sdn_objnum;
297 break; 297 break;
298 case 1: 298 case 1:
299 *buf++ = 0; 299 *buf++ = 0;
300 *buf++ = dn_ntohs(sdn->sdn_objnamel); 300 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
301 memcpy(buf, sdn->sdn_objname, dn_ntohs(sdn->sdn_objnamel)); 301 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
302 len = 3 + dn_ntohs(sdn->sdn_objnamel); 302 len = 3 + le16_to_cpu(sdn->sdn_objnamel);
303 break; 303 break;
304 case 2: 304 case 2:
305 memset(buf, 0, 5); 305 memset(buf, 0, 5);
306 buf += 5; 306 buf += 5;
307 *buf++ = dn_ntohs(sdn->sdn_objnamel); 307 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
308 memcpy(buf, sdn->sdn_objname, dn_ntohs(sdn->sdn_objnamel)); 308 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
309 len = 7 + dn_ntohs(sdn->sdn_objnamel); 309 len = 7 + le16_to_cpu(sdn->sdn_objnamel);
310 break; 310 break;
311 } 311 }
312 312
313 return len; 313 return len;
314 } 314 }
315 315
316 /* 316 /*
317 * On reception of usernames, we handle types 1 and 0 for destination 317 * On reception of usernames, we handle types 1 and 0 for destination
318 * addresses only. Types 2 and 4 are used for source addresses, but the 318 * addresses only. Types 2 and 4 are used for source addresses, but the
319 * UIC, GIC are ignored and they are both treated the same way. Type 3 319 * UIC, GIC are ignored and they are both treated the same way. Type 3
320 * is never used as I've no idea what its purpose might be or what its 320 * is never used as I've no idea what its purpose might be or what its
321 * format is. 321 * format is.
322 */ 322 */
323 int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt) 323 int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt)
324 { 324 {
325 unsigned char type; 325 unsigned char type;
326 int size = len; 326 int size = len;
327 int namel = 12; 327 int namel = 12;
328 328
329 sdn->sdn_objnum = 0; 329 sdn->sdn_objnum = 0;
330 sdn->sdn_objnamel = dn_htons(0); 330 sdn->sdn_objnamel = cpu_to_le16(0);
331 memset(sdn->sdn_objname, 0, DN_MAXOBJL); 331 memset(sdn->sdn_objname, 0, DN_MAXOBJL);
332 332
333 if (len < 2) 333 if (len < 2)
334 return -1; 334 return -1;
335 335
336 len -= 2; 336 len -= 2;
337 *fmt = *data++; 337 *fmt = *data++;
338 type = *data++; 338 type = *data++;
339 339
340 switch(*fmt) { 340 switch(*fmt) {
341 case 0: 341 case 0:
342 sdn->sdn_objnum = type; 342 sdn->sdn_objnum = type;
343 return 2; 343 return 2;
344 case 1: 344 case 1:
345 namel = 16; 345 namel = 16;
346 break; 346 break;
347 case 2: 347 case 2:
348 len -= 4; 348 len -= 4;
349 data += 4; 349 data += 4;
350 break; 350 break;
351 case 4: 351 case 4:
352 len -= 8; 352 len -= 8;
353 data += 8; 353 data += 8;
354 break; 354 break;
355 default: 355 default:
356 return -1; 356 return -1;
357 } 357 }
358 358
359 len -= 1; 359 len -= 1;
360 360
361 if (len < 0) 361 if (len < 0)
362 return -1; 362 return -1;
363 363
364 sdn->sdn_objnamel = dn_htons(*data++); 364 sdn->sdn_objnamel = cpu_to_le16(*data++);
365 len -= dn_ntohs(sdn->sdn_objnamel); 365 len -= le16_to_cpu(sdn->sdn_objnamel);
366 366
367 if ((len < 0) || (dn_ntohs(sdn->sdn_objnamel) > namel)) 367 if ((len < 0) || (le16_to_cpu(sdn->sdn_objnamel) > namel))
368 return -1; 368 return -1;
369 369
370 memcpy(sdn->sdn_objname, data, dn_ntohs(sdn->sdn_objnamel)); 370 memcpy(sdn->sdn_objname, data, le16_to_cpu(sdn->sdn_objnamel));
371 371
372 return size - len; 372 return size - len;
373 } 373 }
374 374
375 struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr) 375 struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr)
376 { 376 {
377 struct hlist_head *list = listen_hash(addr); 377 struct hlist_head *list = listen_hash(addr);
378 struct hlist_node *node; 378 struct hlist_node *node;
379 struct sock *sk; 379 struct sock *sk;
380 380
381 read_lock(&dn_hash_lock); 381 read_lock(&dn_hash_lock);
382 sk_for_each(sk, node, list) { 382 sk_for_each(sk, node, list) {
383 struct dn_scp *scp = DN_SK(sk); 383 struct dn_scp *scp = DN_SK(sk);
384 if (sk->sk_state != TCP_LISTEN) 384 if (sk->sk_state != TCP_LISTEN)
385 continue; 385 continue;
386 if (scp->addr.sdn_objnum) { 386 if (scp->addr.sdn_objnum) {
387 if (scp->addr.sdn_objnum != addr->sdn_objnum) 387 if (scp->addr.sdn_objnum != addr->sdn_objnum)
388 continue; 388 continue;
389 } else { 389 } else {
390 if (addr->sdn_objnum) 390 if (addr->sdn_objnum)
391 continue; 391 continue;
392 if (scp->addr.sdn_objnamel != addr->sdn_objnamel) 392 if (scp->addr.sdn_objnamel != addr->sdn_objnamel)
393 continue; 393 continue;
394 if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, dn_ntohs(addr->sdn_objnamel)) != 0) 394 if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, le16_to_cpu(addr->sdn_objnamel)) != 0)
395 continue; 395 continue;
396 } 396 }
397 sock_hold(sk); 397 sock_hold(sk);
398 read_unlock(&dn_hash_lock); 398 read_unlock(&dn_hash_lock);
399 return sk; 399 return sk;
400 } 400 }
401 401
402 sk = sk_head(&dn_wild_sk); 402 sk = sk_head(&dn_wild_sk);
403 if (sk) { 403 if (sk) {
404 if (sk->sk_state == TCP_LISTEN) 404 if (sk->sk_state == TCP_LISTEN)
405 sock_hold(sk); 405 sock_hold(sk);
406 else 406 else
407 sk = NULL; 407 sk = NULL;
408 } 408 }
409 409
410 read_unlock(&dn_hash_lock); 410 read_unlock(&dn_hash_lock);
411 return sk; 411 return sk;
412 } 412 }
413 413
414 struct sock *dn_find_by_skb(struct sk_buff *skb) 414 struct sock *dn_find_by_skb(struct sk_buff *skb)
415 { 415 {
416 struct dn_skb_cb *cb = DN_SKB_CB(skb); 416 struct dn_skb_cb *cb = DN_SKB_CB(skb);
417 struct sock *sk; 417 struct sock *sk;
418 struct hlist_node *node; 418 struct hlist_node *node;
419 struct dn_scp *scp; 419 struct dn_scp *scp;
420 420
421 read_lock(&dn_hash_lock); 421 read_lock(&dn_hash_lock);
422 sk_for_each(sk, node, &dn_sk_hash[dn_ntohs(cb->dst_port) & DN_SK_HASH_MASK]) { 422 sk_for_each(sk, node, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
423 scp = DN_SK(sk); 423 scp = DN_SK(sk);
424 if (cb->src != dn_saddr2dn(&scp->peer)) 424 if (cb->src != dn_saddr2dn(&scp->peer))
425 continue; 425 continue;
426 if (cb->dst_port != scp->addrloc) 426 if (cb->dst_port != scp->addrloc)
427 continue; 427 continue;
428 if (scp->addrrem && (cb->src_port != scp->addrrem)) 428 if (scp->addrrem && (cb->src_port != scp->addrrem))
429 continue; 429 continue;
430 sock_hold(sk); 430 sock_hold(sk);
431 goto found; 431 goto found;
432 } 432 }
433 sk = NULL; 433 sk = NULL;
434 found: 434 found:
435 read_unlock(&dn_hash_lock); 435 read_unlock(&dn_hash_lock);
436 return sk; 436 return sk;
437 } 437 }
438 438
439 439
440 440
441 static void dn_destruct(struct sock *sk) 441 static void dn_destruct(struct sock *sk)
442 { 442 {
443 struct dn_scp *scp = DN_SK(sk); 443 struct dn_scp *scp = DN_SK(sk);
444 444
445 skb_queue_purge(&scp->data_xmit_queue); 445 skb_queue_purge(&scp->data_xmit_queue);
446 skb_queue_purge(&scp->other_xmit_queue); 446 skb_queue_purge(&scp->other_xmit_queue);
447 skb_queue_purge(&scp->other_receive_queue); 447 skb_queue_purge(&scp->other_receive_queue);
448 448
449 dst_release(xchg(&sk->sk_dst_cache, NULL)); 449 dst_release(xchg(&sk->sk_dst_cache, NULL));
450 } 450 }
451 451
452 static int dn_memory_pressure; 452 static int dn_memory_pressure;
453 453
454 static void dn_enter_memory_pressure(struct sock *sk) 454 static void dn_enter_memory_pressure(struct sock *sk)
455 { 455 {
456 if (!dn_memory_pressure) { 456 if (!dn_memory_pressure) {
457 dn_memory_pressure = 1; 457 dn_memory_pressure = 1;
458 } 458 }
459 } 459 }
460 460
461 static struct proto dn_proto = { 461 static struct proto dn_proto = {
462 .name = "NSP", 462 .name = "NSP",
463 .owner = THIS_MODULE, 463 .owner = THIS_MODULE,
464 .enter_memory_pressure = dn_enter_memory_pressure, 464 .enter_memory_pressure = dn_enter_memory_pressure,
465 .memory_pressure = &dn_memory_pressure, 465 .memory_pressure = &dn_memory_pressure,
466 .memory_allocated = &decnet_memory_allocated, 466 .memory_allocated = &decnet_memory_allocated,
467 .sysctl_mem = sysctl_decnet_mem, 467 .sysctl_mem = sysctl_decnet_mem,
468 .sysctl_wmem = sysctl_decnet_wmem, 468 .sysctl_wmem = sysctl_decnet_wmem,
469 .sysctl_rmem = sysctl_decnet_rmem, 469 .sysctl_rmem = sysctl_decnet_rmem,
470 .max_header = DN_MAX_NSP_DATA_HEADER + 64, 470 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
471 .obj_size = sizeof(struct dn_sock), 471 .obj_size = sizeof(struct dn_sock),
472 }; 472 };
473 473
474 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp) 474 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
475 { 475 {
476 struct dn_scp *scp; 476 struct dn_scp *scp;
477 struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto); 477 struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto);
478 478
479 if (!sk) 479 if (!sk)
480 goto out; 480 goto out;
481 481
482 if (sock) 482 if (sock)
483 sock->ops = &dn_proto_ops; 483 sock->ops = &dn_proto_ops;
484 sock_init_data(sock, sk); 484 sock_init_data(sock, sk);
485 485
486 sk->sk_backlog_rcv = dn_nsp_backlog_rcv; 486 sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
487 sk->sk_destruct = dn_destruct; 487 sk->sk_destruct = dn_destruct;
488 sk->sk_no_check = 1; 488 sk->sk_no_check = 1;
489 sk->sk_family = PF_DECnet; 489 sk->sk_family = PF_DECnet;
490 sk->sk_protocol = 0; 490 sk->sk_protocol = 0;
491 sk->sk_allocation = gfp; 491 sk->sk_allocation = gfp;
492 sk->sk_sndbuf = sysctl_decnet_wmem[1]; 492 sk->sk_sndbuf = sysctl_decnet_wmem[1];
493 sk->sk_rcvbuf = sysctl_decnet_rmem[1]; 493 sk->sk_rcvbuf = sysctl_decnet_rmem[1];
494 494
495 /* Initialization of DECnet Session Control Port */ 495 /* Initialization of DECnet Session Control Port */
496 scp = DN_SK(sk); 496 scp = DN_SK(sk);
497 scp->state = DN_O; /* Open */ 497 scp->state = DN_O; /* Open */
498 scp->numdat = 1; /* Next data seg to tx */ 498 scp->numdat = 1; /* Next data seg to tx */
499 scp->numoth = 1; /* Next oth data to tx */ 499 scp->numoth = 1; /* Next oth data to tx */
500 scp->ackxmt_dat = 0; /* Last data seg ack'ed */ 500 scp->ackxmt_dat = 0; /* Last data seg ack'ed */
501 scp->ackxmt_oth = 0; /* Last oth data ack'ed */ 501 scp->ackxmt_oth = 0; /* Last oth data ack'ed */
502 scp->ackrcv_dat = 0; /* Highest data ack recv*/ 502 scp->ackrcv_dat = 0; /* Highest data ack recv*/
503 scp->ackrcv_oth = 0; /* Last oth data ack rec*/ 503 scp->ackrcv_oth = 0; /* Last oth data ack rec*/
504 scp->flowrem_sw = DN_SEND; 504 scp->flowrem_sw = DN_SEND;
505 scp->flowloc_sw = DN_SEND; 505 scp->flowloc_sw = DN_SEND;
506 scp->flowrem_dat = 0; 506 scp->flowrem_dat = 0;
507 scp->flowrem_oth = 1; 507 scp->flowrem_oth = 1;
508 scp->flowloc_dat = 0; 508 scp->flowloc_dat = 0;
509 scp->flowloc_oth = 1; 509 scp->flowloc_oth = 1;
510 scp->services_rem = 0; 510 scp->services_rem = 0;
511 scp->services_loc = 1 | NSP_FC_NONE; 511 scp->services_loc = 1 | NSP_FC_NONE;
512 scp->info_rem = 0; 512 scp->info_rem = 0;
513 scp->info_loc = 0x03; /* NSP version 4.1 */ 513 scp->info_loc = 0x03; /* NSP version 4.1 */
514 scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */ 514 scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */
515 scp->nonagle = 0; 515 scp->nonagle = 0;
516 scp->multi_ireq = 1; 516 scp->multi_ireq = 1;
517 scp->accept_mode = ACC_IMMED; 517 scp->accept_mode = ACC_IMMED;
518 scp->addr.sdn_family = AF_DECnet; 518 scp->addr.sdn_family = AF_DECnet;
519 scp->peer.sdn_family = AF_DECnet; 519 scp->peer.sdn_family = AF_DECnet;
520 scp->accessdata.acc_accl = 5; 520 scp->accessdata.acc_accl = 5;
521 memcpy(scp->accessdata.acc_acc, "LINUX", 5); 521 memcpy(scp->accessdata.acc_acc, "LINUX", 5);
522 522
523 scp->max_window = NSP_MAX_WINDOW; 523 scp->max_window = NSP_MAX_WINDOW;
524 scp->snd_window = NSP_MIN_WINDOW; 524 scp->snd_window = NSP_MIN_WINDOW;
525 scp->nsp_srtt = NSP_INITIAL_SRTT; 525 scp->nsp_srtt = NSP_INITIAL_SRTT;
526 scp->nsp_rttvar = NSP_INITIAL_RTTVAR; 526 scp->nsp_rttvar = NSP_INITIAL_RTTVAR;
527 scp->nsp_rxtshift = 0; 527 scp->nsp_rxtshift = 0;
528 528
529 skb_queue_head_init(&scp->data_xmit_queue); 529 skb_queue_head_init(&scp->data_xmit_queue);
530 skb_queue_head_init(&scp->other_xmit_queue); 530 skb_queue_head_init(&scp->other_xmit_queue);
531 skb_queue_head_init(&scp->other_receive_queue); 531 skb_queue_head_init(&scp->other_receive_queue);
532 532
533 scp->persist = 0; 533 scp->persist = 0;
534 scp->persist_fxn = NULL; 534 scp->persist_fxn = NULL;
535 scp->keepalive = 10 * HZ; 535 scp->keepalive = 10 * HZ;
536 scp->keepalive_fxn = dn_keepalive; 536 scp->keepalive_fxn = dn_keepalive;
537 537
538 init_timer(&scp->delack_timer); 538 init_timer(&scp->delack_timer);
539 scp->delack_pending = 0; 539 scp->delack_pending = 0;
540 scp->delack_fxn = dn_nsp_delayed_ack; 540 scp->delack_fxn = dn_nsp_delayed_ack;
541 541
542 dn_start_slow_timer(sk); 542 dn_start_slow_timer(sk);
543 out: 543 out:
544 return sk; 544 return sk;
545 } 545 }
546 546
547 /* 547 /*
548 * Keepalive timer. 548 * Keepalive timer.
549 * FIXME: Should respond to SO_KEEPALIVE etc. 549 * FIXME: Should respond to SO_KEEPALIVE etc.
550 */ 550 */
551 static void dn_keepalive(struct sock *sk) 551 static void dn_keepalive(struct sock *sk)
552 { 552 {
553 struct dn_scp *scp = DN_SK(sk); 553 struct dn_scp *scp = DN_SK(sk);
554 554
555 /* 555 /*
556 * By checking the other_data transmit queue is empty 556 * By checking the other_data transmit queue is empty
557 * we are double checking that we are not sending too 557 * we are double checking that we are not sending too
558 * many of these keepalive frames. 558 * many of these keepalive frames.
559 */ 559 */
560 if (skb_queue_empty(&scp->other_xmit_queue)) 560 if (skb_queue_empty(&scp->other_xmit_queue))
561 dn_nsp_send_link(sk, DN_NOCHANGE, 0); 561 dn_nsp_send_link(sk, DN_NOCHANGE, 0);
562 } 562 }
563 563
564 564
565 /* 565 /*
566 * Timer for shutdown/destroyed sockets. 566 * Timer for shutdown/destroyed sockets.
567 * When socket is dead & no packets have been sent for a 567 * When socket is dead & no packets have been sent for a
568 * certain amount of time, they are removed by this 568 * certain amount of time, they are removed by this
569 * routine. Also takes care of sending out DI & DC 569 * routine. Also takes care of sending out DI & DC
570 * frames at correct times. 570 * frames at correct times.
571 */ 571 */
572 int dn_destroy_timer(struct sock *sk) 572 int dn_destroy_timer(struct sock *sk)
573 { 573 {
574 struct dn_scp *scp = DN_SK(sk); 574 struct dn_scp *scp = DN_SK(sk);
575 575
576 scp->persist = dn_nsp_persist(sk); 576 scp->persist = dn_nsp_persist(sk);
577 577
578 switch(scp->state) { 578 switch(scp->state) {
579 case DN_DI: 579 case DN_DI:
580 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC); 580 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
581 if (scp->nsp_rxtshift >= decnet_di_count) 581 if (scp->nsp_rxtshift >= decnet_di_count)
582 scp->state = DN_CN; 582 scp->state = DN_CN;
583 return 0; 583 return 0;
584 584
585 case DN_DR: 585 case DN_DR:
586 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC); 586 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
587 if (scp->nsp_rxtshift >= decnet_dr_count) 587 if (scp->nsp_rxtshift >= decnet_dr_count)
588 scp->state = DN_DRC; 588 scp->state = DN_DRC;
589 return 0; 589 return 0;
590 590
591 case DN_DN: 591 case DN_DN:
592 if (scp->nsp_rxtshift < decnet_dn_count) { 592 if (scp->nsp_rxtshift < decnet_dn_count) {
593 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */ 593 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
594 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC); 594 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
595 return 0; 595 return 0;
596 } 596 }
597 } 597 }
598 598
599 scp->persist = (HZ * decnet_time_wait); 599 scp->persist = (HZ * decnet_time_wait);
600 600
601 if (sk->sk_socket) 601 if (sk->sk_socket)
602 return 0; 602 return 0;
603 603
604 if ((jiffies - scp->stamp) >= (HZ * decnet_time_wait)) { 604 if ((jiffies - scp->stamp) >= (HZ * decnet_time_wait)) {
605 dn_unhash_sock(sk); 605 dn_unhash_sock(sk);
606 sock_put(sk); 606 sock_put(sk);
607 return 1; 607 return 1;
608 } 608 }
609 609
610 return 0; 610 return 0;
611 } 611 }
612 612
613 static void dn_destroy_sock(struct sock *sk) 613 static void dn_destroy_sock(struct sock *sk)
614 { 614 {
615 struct dn_scp *scp = DN_SK(sk); 615 struct dn_scp *scp = DN_SK(sk);
616 616
617 scp->nsp_rxtshift = 0; /* reset back off */ 617 scp->nsp_rxtshift = 0; /* reset back off */
618 618
619 if (sk->sk_socket) { 619 if (sk->sk_socket) {
620 if (sk->sk_socket->state != SS_UNCONNECTED) 620 if (sk->sk_socket->state != SS_UNCONNECTED)
621 sk->sk_socket->state = SS_DISCONNECTING; 621 sk->sk_socket->state = SS_DISCONNECTING;
622 } 622 }
623 623
624 sk->sk_state = TCP_CLOSE; 624 sk->sk_state = TCP_CLOSE;
625 625
626 switch(scp->state) { 626 switch(scp->state) {
627 case DN_DN: 627 case DN_DN:
628 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, 628 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
629 sk->sk_allocation); 629 sk->sk_allocation);
630 scp->persist_fxn = dn_destroy_timer; 630 scp->persist_fxn = dn_destroy_timer;
631 scp->persist = dn_nsp_persist(sk); 631 scp->persist = dn_nsp_persist(sk);
632 break; 632 break;
633 case DN_CR: 633 case DN_CR:
634 scp->state = DN_DR; 634 scp->state = DN_DR;
635 goto disc_reject; 635 goto disc_reject;
636 case DN_RUN: 636 case DN_RUN:
637 scp->state = DN_DI; 637 scp->state = DN_DI;
638 case DN_DI: 638 case DN_DI:
639 case DN_DR: 639 case DN_DR:
640 disc_reject: 640 disc_reject:
641 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation); 641 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
642 case DN_NC: 642 case DN_NC:
643 case DN_NR: 643 case DN_NR:
644 case DN_RJ: 644 case DN_RJ:
645 case DN_DIC: 645 case DN_DIC:
646 case DN_CN: 646 case DN_CN:
647 case DN_DRC: 647 case DN_DRC:
648 case DN_CI: 648 case DN_CI:
649 case DN_CD: 649 case DN_CD:
650 scp->persist_fxn = dn_destroy_timer; 650 scp->persist_fxn = dn_destroy_timer;
651 scp->persist = dn_nsp_persist(sk); 651 scp->persist = dn_nsp_persist(sk);
652 break; 652 break;
653 default: 653 default:
654 printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n"); 654 printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
655 case DN_O: 655 case DN_O:
656 dn_stop_slow_timer(sk); 656 dn_stop_slow_timer(sk);
657 657
658 dn_unhash_sock_bh(sk); 658 dn_unhash_sock_bh(sk);
659 sock_put(sk); 659 sock_put(sk);
660 660
661 break; 661 break;
662 } 662 }
663 } 663 }
664 664
665 char *dn_addr2asc(__u16 addr, char *buf) 665 char *dn_addr2asc(__u16 addr, char *buf)
666 { 666 {
667 unsigned short node, area; 667 unsigned short node, area;
668 668
669 node = addr & 0x03ff; 669 node = addr & 0x03ff;
670 area = addr >> 10; 670 area = addr >> 10;
671 sprintf(buf, "%hd.%hd", area, node); 671 sprintf(buf, "%hd.%hd", area, node);
672 672
673 return buf; 673 return buf;
674 } 674 }
675 675
676 676
677 677
678 static int dn_create(struct net *net, struct socket *sock, int protocol) 678 static int dn_create(struct net *net, struct socket *sock, int protocol)
679 { 679 {
680 struct sock *sk; 680 struct sock *sk;
681 681
682 if (net != &init_net) 682 if (net != &init_net)
683 return -EAFNOSUPPORT; 683 return -EAFNOSUPPORT;
684 684
685 switch(sock->type) { 685 switch(sock->type) {
686 case SOCK_SEQPACKET: 686 case SOCK_SEQPACKET:
687 if (protocol != DNPROTO_NSP) 687 if (protocol != DNPROTO_NSP)
688 return -EPROTONOSUPPORT; 688 return -EPROTONOSUPPORT;
689 break; 689 break;
690 case SOCK_STREAM: 690 case SOCK_STREAM:
691 break; 691 break;
692 default: 692 default:
693 return -ESOCKTNOSUPPORT; 693 return -ESOCKTNOSUPPORT;
694 } 694 }
695 695
696 696
697 if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL)) == NULL) 697 if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL)) == NULL)
698 return -ENOBUFS; 698 return -ENOBUFS;
699 699
700 sk->sk_protocol = protocol; 700 sk->sk_protocol = protocol;
701 701
702 return 0; 702 return 0;
703 } 703 }
704 704
705 705
706 static int 706 static int
707 dn_release(struct socket *sock) 707 dn_release(struct socket *sock)
708 { 708 {
709 struct sock *sk = sock->sk; 709 struct sock *sk = sock->sk;
710 710
711 if (sk) { 711 if (sk) {
712 sock_orphan(sk); 712 sock_orphan(sk);
713 sock_hold(sk); 713 sock_hold(sk);
714 lock_sock(sk); 714 lock_sock(sk);
715 dn_destroy_sock(sk); 715 dn_destroy_sock(sk);
716 release_sock(sk); 716 release_sock(sk);
717 sock_put(sk); 717 sock_put(sk);
718 } 718 }
719 719
720 return 0; 720 return 0;
721 } 721 }
722 722
723 static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 723 static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
724 { 724 {
725 struct sock *sk = sock->sk; 725 struct sock *sk = sock->sk;
726 struct dn_scp *scp = DN_SK(sk); 726 struct dn_scp *scp = DN_SK(sk);
727 struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr; 727 struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr;
728 struct net_device *dev, *ldev; 728 struct net_device *dev, *ldev;
729 int rv; 729 int rv;
730 730
731 if (addr_len != sizeof(struct sockaddr_dn)) 731 if (addr_len != sizeof(struct sockaddr_dn))
732 return -EINVAL; 732 return -EINVAL;
733 733
734 if (saddr->sdn_family != AF_DECnet) 734 if (saddr->sdn_family != AF_DECnet)
735 return -EINVAL; 735 return -EINVAL;
736 736
737 if (dn_ntohs(saddr->sdn_nodeaddrl) && (dn_ntohs(saddr->sdn_nodeaddrl) != 2)) 737 if (le16_to_cpu(saddr->sdn_nodeaddrl) && (le16_to_cpu(saddr->sdn_nodeaddrl) != 2))
738 return -EINVAL; 738 return -EINVAL;
739 739
740 if (dn_ntohs(saddr->sdn_objnamel) > DN_MAXOBJL) 740 if (le16_to_cpu(saddr->sdn_objnamel) > DN_MAXOBJL)
741 return -EINVAL; 741 return -EINVAL;
742 742
743 if (saddr->sdn_flags & ~SDF_WILD) 743 if (saddr->sdn_flags & ~SDF_WILD)
744 return -EINVAL; 744 return -EINVAL;
745 745
746 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum || 746 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
747 (saddr->sdn_flags & SDF_WILD))) 747 (saddr->sdn_flags & SDF_WILD)))
748 return -EACCES; 748 return -EACCES;
749 749
750 if (!(saddr->sdn_flags & SDF_WILD)) { 750 if (!(saddr->sdn_flags & SDF_WILD)) {
751 if (dn_ntohs(saddr->sdn_nodeaddrl)) { 751 if (le16_to_cpu(saddr->sdn_nodeaddrl)) {
752 read_lock(&dev_base_lock); 752 read_lock(&dev_base_lock);
753 ldev = NULL; 753 ldev = NULL;
754 for_each_netdev(&init_net, dev) { 754 for_each_netdev(&init_net, dev) {
755 if (!dev->dn_ptr) 755 if (!dev->dn_ptr)
756 continue; 756 continue;
757 if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) { 757 if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) {
758 ldev = dev; 758 ldev = dev;
759 break; 759 break;
760 } 760 }
761 } 761 }
762 read_unlock(&dev_base_lock); 762 read_unlock(&dev_base_lock);
763 if (ldev == NULL) 763 if (ldev == NULL)
764 return -EADDRNOTAVAIL; 764 return -EADDRNOTAVAIL;
765 } 765 }
766 } 766 }
767 767
768 rv = -EINVAL; 768 rv = -EINVAL;
769 lock_sock(sk); 769 lock_sock(sk);
770 if (sock_flag(sk, SOCK_ZAPPED)) { 770 if (sock_flag(sk, SOCK_ZAPPED)) {
771 memcpy(&scp->addr, saddr, addr_len); 771 memcpy(&scp->addr, saddr, addr_len);
772 sock_reset_flag(sk, SOCK_ZAPPED); 772 sock_reset_flag(sk, SOCK_ZAPPED);
773 773
774 rv = dn_hash_sock(sk); 774 rv = dn_hash_sock(sk);
775 if (rv) 775 if (rv)
776 sock_set_flag(sk, SOCK_ZAPPED); 776 sock_set_flag(sk, SOCK_ZAPPED);
777 } 777 }
778 release_sock(sk); 778 release_sock(sk);
779 779
780 return rv; 780 return rv;
781 } 781 }
782 782
783 783
784 static int dn_auto_bind(struct socket *sock) 784 static int dn_auto_bind(struct socket *sock)
785 { 785 {
786 struct sock *sk = sock->sk; 786 struct sock *sk = sock->sk;
787 struct dn_scp *scp = DN_SK(sk); 787 struct dn_scp *scp = DN_SK(sk);
788 int rv; 788 int rv;
789 789
790 sock_reset_flag(sk, SOCK_ZAPPED); 790 sock_reset_flag(sk, SOCK_ZAPPED);
791 791
792 scp->addr.sdn_flags = 0; 792 scp->addr.sdn_flags = 0;
793 scp->addr.sdn_objnum = 0; 793 scp->addr.sdn_objnum = 0;
794 794
795 /* 795 /*
796 * This stuff is to keep compatibility with Eduardo's 796 * This stuff is to keep compatibility with Eduardo's
797 * patch. I hope I can dispense with it shortly... 797 * patch. I hope I can dispense with it shortly...
798 */ 798 */
799 if ((scp->accessdata.acc_accl != 0) && 799 if ((scp->accessdata.acc_accl != 0) &&
800 (scp->accessdata.acc_accl <= 12)) { 800 (scp->accessdata.acc_accl <= 12)) {
801 801
802 scp->addr.sdn_objnamel = dn_htons(scp->accessdata.acc_accl); 802 scp->addr.sdn_objnamel = cpu_to_le16(scp->accessdata.acc_accl);
803 memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, dn_ntohs(scp->addr.sdn_objnamel)); 803 memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, le16_to_cpu(scp->addr.sdn_objnamel));
804 804
805 scp->accessdata.acc_accl = 0; 805 scp->accessdata.acc_accl = 0;
806 memset(scp->accessdata.acc_acc, 0, 40); 806 memset(scp->accessdata.acc_acc, 0, 40);
807 } 807 }
808 /* End of compatibility stuff */ 808 /* End of compatibility stuff */
809 809
810 scp->addr.sdn_add.a_len = dn_htons(2); 810 scp->addr.sdn_add.a_len = cpu_to_le16(2);
811 rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr); 811 rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr);
812 if (rv == 0) { 812 if (rv == 0) {
813 rv = dn_hash_sock(sk); 813 rv = dn_hash_sock(sk);
814 if (rv) 814 if (rv)
815 sock_set_flag(sk, SOCK_ZAPPED); 815 sock_set_flag(sk, SOCK_ZAPPED);
816 } 816 }
817 817
818 return rv; 818 return rv;
819 } 819 }
820 820
821 static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) 821 static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
822 { 822 {
823 struct dn_scp *scp = DN_SK(sk); 823 struct dn_scp *scp = DN_SK(sk);
824 DEFINE_WAIT(wait); 824 DEFINE_WAIT(wait);
825 int err; 825 int err;
826 826
827 if (scp->state != DN_CR) 827 if (scp->state != DN_CR)
828 return -EINVAL; 828 return -EINVAL;
829 829
830 scp->state = DN_CC; 830 scp->state = DN_CC;
831 scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS); 831 scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS);
832 dn_send_conn_conf(sk, allocation); 832 dn_send_conn_conf(sk, allocation);
833 833
834 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 834 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
835 for(;;) { 835 for(;;) {
836 release_sock(sk); 836 release_sock(sk);
837 if (scp->state == DN_CC) 837 if (scp->state == DN_CC)
838 *timeo = schedule_timeout(*timeo); 838 *timeo = schedule_timeout(*timeo);
839 lock_sock(sk); 839 lock_sock(sk);
840 err = 0; 840 err = 0;
841 if (scp->state == DN_RUN) 841 if (scp->state == DN_RUN)
842 break; 842 break;
843 err = sock_error(sk); 843 err = sock_error(sk);
844 if (err) 844 if (err)
845 break; 845 break;
846 err = sock_intr_errno(*timeo); 846 err = sock_intr_errno(*timeo);
847 if (signal_pending(current)) 847 if (signal_pending(current))
848 break; 848 break;
849 err = -EAGAIN; 849 err = -EAGAIN;
850 if (!*timeo) 850 if (!*timeo)
851 break; 851 break;
852 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 852 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
853 } 853 }
854 finish_wait(sk->sk_sleep, &wait); 854 finish_wait(sk->sk_sleep, &wait);
855 if (err == 0) { 855 if (err == 0) {
856 sk->sk_socket->state = SS_CONNECTED; 856 sk->sk_socket->state = SS_CONNECTED;
857 } else if (scp->state != DN_CC) { 857 } else if (scp->state != DN_CC) {
858 sk->sk_socket->state = SS_UNCONNECTED; 858 sk->sk_socket->state = SS_UNCONNECTED;
859 } 859 }
860 return err; 860 return err;
861 } 861 }
862 862
863 static int dn_wait_run(struct sock *sk, long *timeo) 863 static int dn_wait_run(struct sock *sk, long *timeo)
864 { 864 {
865 struct dn_scp *scp = DN_SK(sk); 865 struct dn_scp *scp = DN_SK(sk);
866 DEFINE_WAIT(wait); 866 DEFINE_WAIT(wait);
867 int err = 0; 867 int err = 0;
868 868
869 if (scp->state == DN_RUN) 869 if (scp->state == DN_RUN)
870 goto out; 870 goto out;
871 871
872 if (!*timeo) 872 if (!*timeo)
873 return -EALREADY; 873 return -EALREADY;
874 874
875 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 875 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
876 for(;;) { 876 for(;;) {
877 release_sock(sk); 877 release_sock(sk);
878 if (scp->state == DN_CI || scp->state == DN_CC) 878 if (scp->state == DN_CI || scp->state == DN_CC)
879 *timeo = schedule_timeout(*timeo); 879 *timeo = schedule_timeout(*timeo);
880 lock_sock(sk); 880 lock_sock(sk);
881 err = 0; 881 err = 0;
882 if (scp->state == DN_RUN) 882 if (scp->state == DN_RUN)
883 break; 883 break;
884 err = sock_error(sk); 884 err = sock_error(sk);
885 if (err) 885 if (err)
886 break; 886 break;
887 err = sock_intr_errno(*timeo); 887 err = sock_intr_errno(*timeo);
888 if (signal_pending(current)) 888 if (signal_pending(current))
889 break; 889 break;
890 err = -ETIMEDOUT; 890 err = -ETIMEDOUT;
891 if (!*timeo) 891 if (!*timeo)
892 break; 892 break;
893 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 893 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
894 } 894 }
895 finish_wait(sk->sk_sleep, &wait); 895 finish_wait(sk->sk_sleep, &wait);
896 out: 896 out:
897 if (err == 0) { 897 if (err == 0) {
898 sk->sk_socket->state = SS_CONNECTED; 898 sk->sk_socket->state = SS_CONNECTED;
899 } else if (scp->state != DN_CI && scp->state != DN_CC) { 899 } else if (scp->state != DN_CI && scp->state != DN_CC) {
900 sk->sk_socket->state = SS_UNCONNECTED; 900 sk->sk_socket->state = SS_UNCONNECTED;
901 } 901 }
902 return err; 902 return err;
903 } 903 }
904 904
905 static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags) 905 static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
906 { 906 {
907 struct socket *sock = sk->sk_socket; 907 struct socket *sock = sk->sk_socket;
908 struct dn_scp *scp = DN_SK(sk); 908 struct dn_scp *scp = DN_SK(sk);
909 int err = -EISCONN; 909 int err = -EISCONN;
910 struct flowi fl; 910 struct flowi fl;
911 911
912 if (sock->state == SS_CONNECTED) 912 if (sock->state == SS_CONNECTED)
913 goto out; 913 goto out;
914 914
915 if (sock->state == SS_CONNECTING) { 915 if (sock->state == SS_CONNECTING) {
916 err = 0; 916 err = 0;
917 if (scp->state == DN_RUN) { 917 if (scp->state == DN_RUN) {
918 sock->state = SS_CONNECTED; 918 sock->state = SS_CONNECTED;
919 goto out; 919 goto out;
920 } 920 }
921 err = -ECONNREFUSED; 921 err = -ECONNREFUSED;
922 if (scp->state != DN_CI && scp->state != DN_CC) { 922 if (scp->state != DN_CI && scp->state != DN_CC) {
923 sock->state = SS_UNCONNECTED; 923 sock->state = SS_UNCONNECTED;
924 goto out; 924 goto out;
925 } 925 }
926 return dn_wait_run(sk, timeo); 926 return dn_wait_run(sk, timeo);
927 } 927 }
928 928
929 err = -EINVAL; 929 err = -EINVAL;
930 if (scp->state != DN_O) 930 if (scp->state != DN_O)
931 goto out; 931 goto out;
932 932
933 if (addr == NULL || addrlen != sizeof(struct sockaddr_dn)) 933 if (addr == NULL || addrlen != sizeof(struct sockaddr_dn))
934 goto out; 934 goto out;
935 if (addr->sdn_family != AF_DECnet) 935 if (addr->sdn_family != AF_DECnet)
936 goto out; 936 goto out;
937 if (addr->sdn_flags & SDF_WILD) 937 if (addr->sdn_flags & SDF_WILD)
938 goto out; 938 goto out;
939 939
940 if (sock_flag(sk, SOCK_ZAPPED)) { 940 if (sock_flag(sk, SOCK_ZAPPED)) {
941 err = dn_auto_bind(sk->sk_socket); 941 err = dn_auto_bind(sk->sk_socket);
942 if (err) 942 if (err)
943 goto out; 943 goto out;
944 } 944 }
945 945
946 memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn)); 946 memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
947 947
948 err = -EHOSTUNREACH; 948 err = -EHOSTUNREACH;
949 memset(&fl, 0, sizeof(fl)); 949 memset(&fl, 0, sizeof(fl));
950 fl.oif = sk->sk_bound_dev_if; 950 fl.oif = sk->sk_bound_dev_if;
951 fl.fld_dst = dn_saddr2dn(&scp->peer); 951 fl.fld_dst = dn_saddr2dn(&scp->peer);
952 fl.fld_src = dn_saddr2dn(&scp->addr); 952 fl.fld_src = dn_saddr2dn(&scp->addr);
953 dn_sk_ports_copy(&fl, scp); 953 dn_sk_ports_copy(&fl, scp);
954 fl.proto = DNPROTO_NSP; 954 fl.proto = DNPROTO_NSP;
955 if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, flags) < 0) 955 if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, flags) < 0)
956 goto out; 956 goto out;
957 sk->sk_route_caps = sk->sk_dst_cache->dev->features; 957 sk->sk_route_caps = sk->sk_dst_cache->dev->features;
958 sock->state = SS_CONNECTING; 958 sock->state = SS_CONNECTING;
959 scp->state = DN_CI; 959 scp->state = DN_CI;
960 scp->segsize_loc = dst_metric(sk->sk_dst_cache, RTAX_ADVMSS); 960 scp->segsize_loc = dst_metric(sk->sk_dst_cache, RTAX_ADVMSS);
961 961
962 dn_nsp_send_conninit(sk, NSP_CI); 962 dn_nsp_send_conninit(sk, NSP_CI);
963 err = -EINPROGRESS; 963 err = -EINPROGRESS;
964 if (*timeo) { 964 if (*timeo) {
965 err = dn_wait_run(sk, timeo); 965 err = dn_wait_run(sk, timeo);
966 } 966 }
967 out: 967 out:
968 return err; 968 return err;
969 } 969 }
970 970
971 static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags) 971 static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags)
972 { 972 {
973 struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr; 973 struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr;
974 struct sock *sk = sock->sk; 974 struct sock *sk = sock->sk;
975 int err; 975 int err;
976 long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); 976 long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
977 977
978 lock_sock(sk); 978 lock_sock(sk);
979 err = __dn_connect(sk, addr, addrlen, &timeo, 0); 979 err = __dn_connect(sk, addr, addrlen, &timeo, 0);
980 release_sock(sk); 980 release_sock(sk);
981 981
982 return err; 982 return err;
983 } 983 }
984 984
985 static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags) 985 static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
986 { 986 {
987 struct dn_scp *scp = DN_SK(sk); 987 struct dn_scp *scp = DN_SK(sk);
988 988
989 switch(scp->state) { 989 switch(scp->state) {
990 case DN_RUN: 990 case DN_RUN:
991 return 0; 991 return 0;
992 case DN_CR: 992 case DN_CR:
993 return dn_confirm_accept(sk, timeo, sk->sk_allocation); 993 return dn_confirm_accept(sk, timeo, sk->sk_allocation);
994 case DN_CI: 994 case DN_CI:
995 case DN_CC: 995 case DN_CC:
996 return dn_wait_run(sk, timeo); 996 return dn_wait_run(sk, timeo);
997 case DN_O: 997 case DN_O:
998 return __dn_connect(sk, addr, addrlen, timeo, flags); 998 return __dn_connect(sk, addr, addrlen, timeo, flags);
999 } 999 }
1000 1000
1001 return -EINVAL; 1001 return -EINVAL;
1002 } 1002 }
1003 1003
1004 1004
1005 static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc) 1005 static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc)
1006 { 1006 {
1007 unsigned char *ptr = skb->data; 1007 unsigned char *ptr = skb->data;
1008 1008
1009 acc->acc_userl = *ptr++; 1009 acc->acc_userl = *ptr++;
1010 memcpy(&acc->acc_user, ptr, acc->acc_userl); 1010 memcpy(&acc->acc_user, ptr, acc->acc_userl);
1011 ptr += acc->acc_userl; 1011 ptr += acc->acc_userl;
1012 1012
1013 acc->acc_passl = *ptr++; 1013 acc->acc_passl = *ptr++;
1014 memcpy(&acc->acc_pass, ptr, acc->acc_passl); 1014 memcpy(&acc->acc_pass, ptr, acc->acc_passl);
1015 ptr += acc->acc_passl; 1015 ptr += acc->acc_passl;
1016 1016
1017 acc->acc_accl = *ptr++; 1017 acc->acc_accl = *ptr++;
1018 memcpy(&acc->acc_acc, ptr, acc->acc_accl); 1018 memcpy(&acc->acc_acc, ptr, acc->acc_accl);
1019 1019
1020 skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3); 1020 skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3);
1021 1021
1022 } 1022 }
1023 1023
1024 static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt) 1024 static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt)
1025 { 1025 {
1026 unsigned char *ptr = skb->data; 1026 unsigned char *ptr = skb->data;
1027 u16 len = *ptr++; /* yes, it's 8bit on the wire */ 1027 u16 len = *ptr++; /* yes, it's 8bit on the wire */
1028 1028
1029 BUG_ON(len > 16); /* we've checked the contents earlier */ 1029 BUG_ON(len > 16); /* we've checked the contents earlier */
1030 opt->opt_optl = dn_htons(len); 1030 opt->opt_optl = cpu_to_le16(len);
1031 opt->opt_status = 0; 1031 opt->opt_status = 0;
1032 memcpy(opt->opt_data, ptr, len); 1032 memcpy(opt->opt_data, ptr, len);
1033 skb_pull(skb, len + 1); 1033 skb_pull(skb, len + 1);
1034 } 1034 }
1035 1035
1036 static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) 1036 static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1037 { 1037 {
1038 DEFINE_WAIT(wait); 1038 DEFINE_WAIT(wait);
1039 struct sk_buff *skb = NULL; 1039 struct sk_buff *skb = NULL;
1040 int err = 0; 1040 int err = 0;
1041 1041
1042 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1042 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1043 for(;;) { 1043 for(;;) {
1044 release_sock(sk); 1044 release_sock(sk);
1045 skb = skb_dequeue(&sk->sk_receive_queue); 1045 skb = skb_dequeue(&sk->sk_receive_queue);
1046 if (skb == NULL) { 1046 if (skb == NULL) {
1047 *timeo = schedule_timeout(*timeo); 1047 *timeo = schedule_timeout(*timeo);
1048 skb = skb_dequeue(&sk->sk_receive_queue); 1048 skb = skb_dequeue(&sk->sk_receive_queue);
1049 } 1049 }
1050 lock_sock(sk); 1050 lock_sock(sk);
1051 if (skb != NULL) 1051 if (skb != NULL)
1052 break; 1052 break;
1053 err = -EINVAL; 1053 err = -EINVAL;
1054 if (sk->sk_state != TCP_LISTEN) 1054 if (sk->sk_state != TCP_LISTEN)
1055 break; 1055 break;
1056 err = sock_intr_errno(*timeo); 1056 err = sock_intr_errno(*timeo);
1057 if (signal_pending(current)) 1057 if (signal_pending(current))
1058 break; 1058 break;
1059 err = -EAGAIN; 1059 err = -EAGAIN;
1060 if (!*timeo) 1060 if (!*timeo)
1061 break; 1061 break;
1062 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1062 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1063 } 1063 }
1064 finish_wait(sk->sk_sleep, &wait); 1064 finish_wait(sk->sk_sleep, &wait);
1065 1065
1066 return skb == NULL ? ERR_PTR(err) : skb; 1066 return skb == NULL ? ERR_PTR(err) : skb;
1067 } 1067 }
1068 1068
1069 static int dn_accept(struct socket *sock, struct socket *newsock, int flags) 1069 static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1070 { 1070 {
1071 struct sock *sk = sock->sk, *newsk; 1071 struct sock *sk = sock->sk, *newsk;
1072 struct sk_buff *skb = NULL; 1072 struct sk_buff *skb = NULL;
1073 struct dn_skb_cb *cb; 1073 struct dn_skb_cb *cb;
1074 unsigned char menuver; 1074 unsigned char menuver;
1075 int err = 0; 1075 int err = 0;
1076 unsigned char type; 1076 unsigned char type;
1077 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 1077 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1078 1078
1079 lock_sock(sk); 1079 lock_sock(sk);
1080 1080
1081 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) { 1081 if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
1082 release_sock(sk); 1082 release_sock(sk);
1083 return -EINVAL; 1083 return -EINVAL;
1084 } 1084 }
1085 1085
1086 skb = skb_dequeue(&sk->sk_receive_queue); 1086 skb = skb_dequeue(&sk->sk_receive_queue);
1087 if (skb == NULL) { 1087 if (skb == NULL) {
1088 skb = dn_wait_for_connect(sk, &timeo); 1088 skb = dn_wait_for_connect(sk, &timeo);
1089 if (IS_ERR(skb)) { 1089 if (IS_ERR(skb)) {
1090 release_sock(sk); 1090 release_sock(sk);
1091 return PTR_ERR(skb); 1091 return PTR_ERR(skb);
1092 } 1092 }
1093 } 1093 }
1094 1094
1095 cb = DN_SKB_CB(skb); 1095 cb = DN_SKB_CB(skb);
1096 sk->sk_ack_backlog--; 1096 sk->sk_ack_backlog--;
1097 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation); 1097 newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation);
1098 if (newsk == NULL) { 1098 if (newsk == NULL) {
1099 release_sock(sk); 1099 release_sock(sk);
1100 kfree_skb(skb); 1100 kfree_skb(skb);
1101 return -ENOBUFS; 1101 return -ENOBUFS;
1102 } 1102 }
1103 release_sock(sk); 1103 release_sock(sk);
1104 1104
1105 dst_release(xchg(&newsk->sk_dst_cache, skb->dst)); 1105 dst_release(xchg(&newsk->sk_dst_cache, skb->dst));
1106 skb->dst = NULL; 1106 skb->dst = NULL;
1107 1107
1108 DN_SK(newsk)->state = DN_CR; 1108 DN_SK(newsk)->state = DN_CR;
1109 DN_SK(newsk)->addrrem = cb->src_port; 1109 DN_SK(newsk)->addrrem = cb->src_port;
1110 DN_SK(newsk)->services_rem = cb->services; 1110 DN_SK(newsk)->services_rem = cb->services;
1111 DN_SK(newsk)->info_rem = cb->info; 1111 DN_SK(newsk)->info_rem = cb->info;
1112 DN_SK(newsk)->segsize_rem = cb->segsize; 1112 DN_SK(newsk)->segsize_rem = cb->segsize;
1113 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode; 1113 DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode;
1114 1114
1115 if (DN_SK(newsk)->segsize_rem < 230) 1115 if (DN_SK(newsk)->segsize_rem < 230)
1116 DN_SK(newsk)->segsize_rem = 230; 1116 DN_SK(newsk)->segsize_rem = 230;
1117 1117
1118 if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE) 1118 if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
1119 DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd; 1119 DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd;
1120 1120
1121 newsk->sk_state = TCP_LISTEN; 1121 newsk->sk_state = TCP_LISTEN;
1122 memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn)); 1122 memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
1123 1123
1124 /* 1124 /*
1125 * If we are listening on a wild socket, we don't want 1125 * If we are listening on a wild socket, we don't want
1126 * the newly created socket on the wrong hash queue. 1126 * the newly created socket on the wrong hash queue.
1127 */ 1127 */
1128 DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD; 1128 DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD;
1129 1129
1130 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type)); 1130 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type));
1131 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type)); 1131 skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
1132 *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src; 1132 *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src;
1133 *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst; 1133 *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst;
1134 1134
1135 menuver = *skb->data; 1135 menuver = *skb->data;
1136 skb_pull(skb, 1); 1136 skb_pull(skb, 1);
1137 1137
1138 if (menuver & DN_MENUVER_ACC) 1138 if (menuver & DN_MENUVER_ACC)
1139 dn_access_copy(skb, &(DN_SK(newsk)->accessdata)); 1139 dn_access_copy(skb, &(DN_SK(newsk)->accessdata));
1140 1140
1141 if (menuver & DN_MENUVER_USR) 1141 if (menuver & DN_MENUVER_USR)
1142 dn_user_copy(skb, &(DN_SK(newsk)->conndata_in)); 1142 dn_user_copy(skb, &(DN_SK(newsk)->conndata_in));
1143 1143
1144 if (menuver & DN_MENUVER_PRX) 1144 if (menuver & DN_MENUVER_PRX)
1145 DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY; 1145 DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY;
1146 1146
1147 if (menuver & DN_MENUVER_UIC) 1147 if (menuver & DN_MENUVER_UIC)
1148 DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY; 1148 DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY;
1149 1149
1150 kfree_skb(skb); 1150 kfree_skb(skb);
1151 1151
1152 memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out), 1152 memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out),
1153 sizeof(struct optdata_dn)); 1153 sizeof(struct optdata_dn));
1154 memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out), 1154 memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out),
1155 sizeof(struct optdata_dn)); 1155 sizeof(struct optdata_dn));
1156 1156
1157 lock_sock(newsk); 1157 lock_sock(newsk);
1158 err = dn_hash_sock(newsk); 1158 err = dn_hash_sock(newsk);
1159 if (err == 0) { 1159 if (err == 0) {
1160 sock_reset_flag(newsk, SOCK_ZAPPED); 1160 sock_reset_flag(newsk, SOCK_ZAPPED);
1161 dn_send_conn_ack(newsk); 1161 dn_send_conn_ack(newsk);
1162 1162
1163 /* 1163 /*
1164 * Here we use sk->sk_allocation since although the conn conf is 1164 * Here we use sk->sk_allocation since although the conn conf is
1165 * for the newsk, the context is the old socket. 1165 * for the newsk, the context is the old socket.
1166 */ 1166 */
1167 if (DN_SK(newsk)->accept_mode == ACC_IMMED) 1167 if (DN_SK(newsk)->accept_mode == ACC_IMMED)
1168 err = dn_confirm_accept(newsk, &timeo, 1168 err = dn_confirm_accept(newsk, &timeo,
1169 sk->sk_allocation); 1169 sk->sk_allocation);
1170 } 1170 }
1171 release_sock(newsk); 1171 release_sock(newsk);
1172 return err; 1172 return err;
1173 } 1173 }
1174 1174
1175 1175
1176 static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len,int peer) 1176 static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len,int peer)
1177 { 1177 {
1178 struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr; 1178 struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr;
1179 struct sock *sk = sock->sk; 1179 struct sock *sk = sock->sk;
1180 struct dn_scp *scp = DN_SK(sk); 1180 struct dn_scp *scp = DN_SK(sk);
1181 1181
1182 *uaddr_len = sizeof(struct sockaddr_dn); 1182 *uaddr_len = sizeof(struct sockaddr_dn);
1183 1183
1184 lock_sock(sk); 1184 lock_sock(sk);
1185 1185
1186 if (peer) { 1186 if (peer) {
1187 if ((sock->state != SS_CONNECTED && 1187 if ((sock->state != SS_CONNECTED &&
1188 sock->state != SS_CONNECTING) && 1188 sock->state != SS_CONNECTING) &&
1189 scp->accept_mode == ACC_IMMED) { 1189 scp->accept_mode == ACC_IMMED) {
1190 release_sock(sk); 1190 release_sock(sk);
1191 return -ENOTCONN; 1191 return -ENOTCONN;
1192 } 1192 }
1193 1193
1194 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn)); 1194 memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
1195 } else { 1195 } else {
1196 memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn)); 1196 memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn));
1197 } 1197 }
1198 1198
1199 release_sock(sk); 1199 release_sock(sk);
1200 1200
1201 return 0; 1201 return 0;
1202 } 1202 }
1203 1203
1204 1204
1205 static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table *wait) 1205 static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table *wait)
1206 { 1206 {
1207 struct sock *sk = sock->sk; 1207 struct sock *sk = sock->sk;
1208 struct dn_scp *scp = DN_SK(sk); 1208 struct dn_scp *scp = DN_SK(sk);
1209 int mask = datagram_poll(file, sock, wait); 1209 int mask = datagram_poll(file, sock, wait);
1210 1210
1211 if (!skb_queue_empty(&scp->other_receive_queue)) 1211 if (!skb_queue_empty(&scp->other_receive_queue))
1212 mask |= POLLRDBAND; 1212 mask |= POLLRDBAND;
1213 1213
1214 return mask; 1214 return mask;
1215 } 1215 }
1216 1216
1217 static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1217 static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1218 { 1218 {
1219 struct sock *sk = sock->sk; 1219 struct sock *sk = sock->sk;
1220 struct dn_scp *scp = DN_SK(sk); 1220 struct dn_scp *scp = DN_SK(sk);
1221 int err = -EOPNOTSUPP; 1221 int err = -EOPNOTSUPP;
1222 long amount = 0; 1222 long amount = 0;
1223 struct sk_buff *skb; 1223 struct sk_buff *skb;
1224 int val; 1224 int val;
1225 1225
1226 switch(cmd) 1226 switch(cmd)
1227 { 1227 {
1228 case SIOCGIFADDR: 1228 case SIOCGIFADDR:
1229 case SIOCSIFADDR: 1229 case SIOCSIFADDR:
1230 return dn_dev_ioctl(cmd, (void __user *)arg); 1230 return dn_dev_ioctl(cmd, (void __user *)arg);
1231 1231
1232 case SIOCATMARK: 1232 case SIOCATMARK:
1233 lock_sock(sk); 1233 lock_sock(sk);
1234 val = !skb_queue_empty(&scp->other_receive_queue); 1234 val = !skb_queue_empty(&scp->other_receive_queue);
1235 if (scp->state != DN_RUN) 1235 if (scp->state != DN_RUN)
1236 val = -ENOTCONN; 1236 val = -ENOTCONN;
1237 release_sock(sk); 1237 release_sock(sk);
1238 return val; 1238 return val;
1239 1239
1240 case TIOCOUTQ: 1240 case TIOCOUTQ:
1241 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); 1241 amount = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1242 if (amount < 0) 1242 if (amount < 0)
1243 amount = 0; 1243 amount = 0;
1244 err = put_user(amount, (int __user *)arg); 1244 err = put_user(amount, (int __user *)arg);
1245 break; 1245 break;
1246 1246
1247 case TIOCINQ: 1247 case TIOCINQ:
1248 lock_sock(sk); 1248 lock_sock(sk);
1249 if ((skb = skb_peek(&scp->other_receive_queue)) != NULL) { 1249 if ((skb = skb_peek(&scp->other_receive_queue)) != NULL) {
1250 amount = skb->len; 1250 amount = skb->len;
1251 } else { 1251 } else {
1252 struct sk_buff *skb = sk->sk_receive_queue.next; 1252 struct sk_buff *skb = sk->sk_receive_queue.next;
1253 for(;;) { 1253 for(;;) {
1254 if (skb == 1254 if (skb ==
1255 (struct sk_buff *)&sk->sk_receive_queue) 1255 (struct sk_buff *)&sk->sk_receive_queue)
1256 break; 1256 break;
1257 amount += skb->len; 1257 amount += skb->len;
1258 skb = skb->next; 1258 skb = skb->next;
1259 } 1259 }
1260 } 1260 }
1261 release_sock(sk); 1261 release_sock(sk);
1262 err = put_user(amount, (int __user *)arg); 1262 err = put_user(amount, (int __user *)arg);
1263 break; 1263 break;
1264 1264
1265 default: 1265 default:
1266 err = -ENOIOCTLCMD; 1266 err = -ENOIOCTLCMD;
1267 break; 1267 break;
1268 } 1268 }
1269 1269
1270 return err; 1270 return err;
1271 } 1271 }
1272 1272
1273 static int dn_listen(struct socket *sock, int backlog) 1273 static int dn_listen(struct socket *sock, int backlog)
1274 { 1274 {
1275 struct sock *sk = sock->sk; 1275 struct sock *sk = sock->sk;
1276 int err = -EINVAL; 1276 int err = -EINVAL;
1277 1277
1278 lock_sock(sk); 1278 lock_sock(sk);
1279 1279
1280 if (sock_flag(sk, SOCK_ZAPPED)) 1280 if (sock_flag(sk, SOCK_ZAPPED))
1281 goto out; 1281 goto out;
1282 1282
1283 if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN)) 1283 if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
1284 goto out; 1284 goto out;
1285 1285
1286 sk->sk_max_ack_backlog = backlog; 1286 sk->sk_max_ack_backlog = backlog;
1287 sk->sk_ack_backlog = 0; 1287 sk->sk_ack_backlog = 0;
1288 sk->sk_state = TCP_LISTEN; 1288 sk->sk_state = TCP_LISTEN;
1289 err = 0; 1289 err = 0;
1290 dn_rehash_sock(sk); 1290 dn_rehash_sock(sk);
1291 1291
1292 out: 1292 out:
1293 release_sock(sk); 1293 release_sock(sk);
1294 1294
1295 return err; 1295 return err;
1296 } 1296 }
1297 1297
1298 1298
1299 static int dn_shutdown(struct socket *sock, int how) 1299 static int dn_shutdown(struct socket *sock, int how)
1300 { 1300 {
1301 struct sock *sk = sock->sk; 1301 struct sock *sk = sock->sk;
1302 struct dn_scp *scp = DN_SK(sk); 1302 struct dn_scp *scp = DN_SK(sk);
1303 int err = -ENOTCONN; 1303 int err = -ENOTCONN;
1304 1304
1305 lock_sock(sk); 1305 lock_sock(sk);
1306 1306
1307 if (sock->state == SS_UNCONNECTED) 1307 if (sock->state == SS_UNCONNECTED)
1308 goto out; 1308 goto out;
1309 1309
1310 err = 0; 1310 err = 0;
1311 if (sock->state == SS_DISCONNECTING) 1311 if (sock->state == SS_DISCONNECTING)
1312 goto out; 1312 goto out;
1313 1313
1314 err = -EINVAL; 1314 err = -EINVAL;
1315 if (scp->state == DN_O) 1315 if (scp->state == DN_O)
1316 goto out; 1316 goto out;
1317 1317
1318 if (how != SHUTDOWN_MASK) 1318 if (how != SHUTDOWN_MASK)
1319 goto out; 1319 goto out;
1320 1320
1321 sk->sk_shutdown = how; 1321 sk->sk_shutdown = how;
1322 dn_destroy_sock(sk); 1322 dn_destroy_sock(sk);
1323 err = 0; 1323 err = 0;
1324 1324
1325 out: 1325 out:
1326 release_sock(sk); 1326 release_sock(sk);
1327 1327
1328 return err; 1328 return err;
1329 } 1329 }
1330 1330
1331 static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) 1331 static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1332 { 1332 {
1333 struct sock *sk = sock->sk; 1333 struct sock *sk = sock->sk;
1334 int err; 1334 int err;
1335 1335
1336 lock_sock(sk); 1336 lock_sock(sk);
1337 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0); 1337 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
1338 release_sock(sk); 1338 release_sock(sk);
1339 1339
1340 return err; 1340 return err;
1341 } 1341 }
1342 1342
1343 static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, int optlen, int flags) 1343 static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, int optlen, int flags)
1344 { 1344 {
1345 struct sock *sk = sock->sk; 1345 struct sock *sk = sock->sk;
1346 struct dn_scp *scp = DN_SK(sk); 1346 struct dn_scp *scp = DN_SK(sk);
1347 long timeo; 1347 long timeo;
1348 union { 1348 union {
1349 struct optdata_dn opt; 1349 struct optdata_dn opt;
1350 struct accessdata_dn acc; 1350 struct accessdata_dn acc;
1351 int mode; 1351 int mode;
1352 unsigned long win; 1352 unsigned long win;
1353 int val; 1353 int val;
1354 unsigned char services; 1354 unsigned char services;
1355 unsigned char info; 1355 unsigned char info;
1356 } u; 1356 } u;
1357 int err; 1357 int err;
1358 1358
1359 if (optlen && !optval) 1359 if (optlen && !optval)
1360 return -EINVAL; 1360 return -EINVAL;
1361 1361
1362 if (optlen > sizeof(u)) 1362 if (optlen > sizeof(u))
1363 return -EINVAL; 1363 return -EINVAL;
1364 1364
1365 if (copy_from_user(&u, optval, optlen)) 1365 if (copy_from_user(&u, optval, optlen))
1366 return -EFAULT; 1366 return -EFAULT;
1367 1367
1368 switch(optname) { 1368 switch(optname) {
1369 case DSO_CONDATA: 1369 case DSO_CONDATA:
1370 if (sock->state == SS_CONNECTED) 1370 if (sock->state == SS_CONNECTED)
1371 return -EISCONN; 1371 return -EISCONN;
1372 if ((scp->state != DN_O) && (scp->state != DN_CR)) 1372 if ((scp->state != DN_O) && (scp->state != DN_CR))
1373 return -EINVAL; 1373 return -EINVAL;
1374 1374
1375 if (optlen != sizeof(struct optdata_dn)) 1375 if (optlen != sizeof(struct optdata_dn))
1376 return -EINVAL; 1376 return -EINVAL;
1377 1377
1378 if (dn_ntohs(u.opt.opt_optl) > 16) 1378 if (le16_to_cpu(u.opt.opt_optl) > 16)
1379 return -EINVAL; 1379 return -EINVAL;
1380 1380
1381 memcpy(&scp->conndata_out, &u.opt, optlen); 1381 memcpy(&scp->conndata_out, &u.opt, optlen);
1382 break; 1382 break;
1383 1383
1384 case DSO_DISDATA: 1384 case DSO_DISDATA:
1385 if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED) 1385 if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED)
1386 return -ENOTCONN; 1386 return -ENOTCONN;
1387 1387
1388 if (optlen != sizeof(struct optdata_dn)) 1388 if (optlen != sizeof(struct optdata_dn))
1389 return -EINVAL; 1389 return -EINVAL;
1390 1390
1391 if (dn_ntohs(u.opt.opt_optl) > 16) 1391 if (le16_to_cpu(u.opt.opt_optl) > 16)
1392 return -EINVAL; 1392 return -EINVAL;
1393 1393
1394 memcpy(&scp->discdata_out, &u.opt, optlen); 1394 memcpy(&scp->discdata_out, &u.opt, optlen);
1395 break; 1395 break;
1396 1396
1397 case DSO_CONACCESS: 1397 case DSO_CONACCESS:
1398 if (sock->state == SS_CONNECTED) 1398 if (sock->state == SS_CONNECTED)
1399 return -EISCONN; 1399 return -EISCONN;
1400 if (scp->state != DN_O) 1400 if (scp->state != DN_O)
1401 return -EINVAL; 1401 return -EINVAL;
1402 1402
1403 if (optlen != sizeof(struct accessdata_dn)) 1403 if (optlen != sizeof(struct accessdata_dn))
1404 return -EINVAL; 1404 return -EINVAL;
1405 1405
1406 if ((u.acc.acc_accl > DN_MAXACCL) || 1406 if ((u.acc.acc_accl > DN_MAXACCL) ||
1407 (u.acc.acc_passl > DN_MAXACCL) || 1407 (u.acc.acc_passl > DN_MAXACCL) ||
1408 (u.acc.acc_userl > DN_MAXACCL)) 1408 (u.acc.acc_userl > DN_MAXACCL))
1409 return -EINVAL; 1409 return -EINVAL;
1410 1410
1411 memcpy(&scp->accessdata, &u.acc, optlen); 1411 memcpy(&scp->accessdata, &u.acc, optlen);
1412 break; 1412 break;
1413 1413
1414 case DSO_ACCEPTMODE: 1414 case DSO_ACCEPTMODE:
1415 if (sock->state == SS_CONNECTED) 1415 if (sock->state == SS_CONNECTED)
1416 return -EISCONN; 1416 return -EISCONN;
1417 if (scp->state != DN_O) 1417 if (scp->state != DN_O)
1418 return -EINVAL; 1418 return -EINVAL;
1419 1419
1420 if (optlen != sizeof(int)) 1420 if (optlen != sizeof(int))
1421 return -EINVAL; 1421 return -EINVAL;
1422 1422
1423 if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER)) 1423 if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
1424 return -EINVAL; 1424 return -EINVAL;
1425 1425
1426 scp->accept_mode = (unsigned char)u.mode; 1426 scp->accept_mode = (unsigned char)u.mode;
1427 break; 1427 break;
1428 1428
1429 case DSO_CONACCEPT: 1429 case DSO_CONACCEPT:
1430 1430
1431 if (scp->state != DN_CR) 1431 if (scp->state != DN_CR)
1432 return -EINVAL; 1432 return -EINVAL;
1433 timeo = sock_rcvtimeo(sk, 0); 1433 timeo = sock_rcvtimeo(sk, 0);
1434 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation); 1434 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
1435 return err; 1435 return err;
1436 1436
1437 case DSO_CONREJECT: 1437 case DSO_CONREJECT:
1438 1438
1439 if (scp->state != DN_CR) 1439 if (scp->state != DN_CR)
1440 return -EINVAL; 1440 return -EINVAL;
1441 1441
1442 scp->state = DN_DR; 1442 scp->state = DN_DR;
1443 sk->sk_shutdown = SHUTDOWN_MASK; 1443 sk->sk_shutdown = SHUTDOWN_MASK;
1444 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation); 1444 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1445 break; 1445 break;
1446 1446
1447 default: 1447 default:
1448 #ifdef CONFIG_NETFILTER 1448 #ifdef CONFIG_NETFILTER
1449 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); 1449 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1450 #endif 1450 #endif
1451 case DSO_LINKINFO: 1451 case DSO_LINKINFO:
1452 case DSO_STREAM: 1452 case DSO_STREAM:
1453 case DSO_SEQPACKET: 1453 case DSO_SEQPACKET:
1454 return -ENOPROTOOPT; 1454 return -ENOPROTOOPT;
1455 1455
1456 case DSO_MAXWINDOW: 1456 case DSO_MAXWINDOW:
1457 if (optlen != sizeof(unsigned long)) 1457 if (optlen != sizeof(unsigned long))
1458 return -EINVAL; 1458 return -EINVAL;
1459 if (u.win > NSP_MAX_WINDOW) 1459 if (u.win > NSP_MAX_WINDOW)
1460 u.win = NSP_MAX_WINDOW; 1460 u.win = NSP_MAX_WINDOW;
1461 if (u.win == 0) 1461 if (u.win == 0)
1462 return -EINVAL; 1462 return -EINVAL;
1463 scp->max_window = u.win; 1463 scp->max_window = u.win;
1464 if (scp->snd_window > u.win) 1464 if (scp->snd_window > u.win)
1465 scp->snd_window = u.win; 1465 scp->snd_window = u.win;
1466 break; 1466 break;
1467 1467
1468 case DSO_NODELAY: 1468 case DSO_NODELAY:
1469 if (optlen != sizeof(int)) 1469 if (optlen != sizeof(int))
1470 return -EINVAL; 1470 return -EINVAL;
1471 if (scp->nonagle == 2) 1471 if (scp->nonagle == 2)
1472 return -EINVAL; 1472 return -EINVAL;
1473 scp->nonagle = (u.val == 0) ? 0 : 1; 1473 scp->nonagle = (u.val == 0) ? 0 : 1;
1474 /* if (scp->nonagle == 1) { Push pending frames } */ 1474 /* if (scp->nonagle == 1) { Push pending frames } */
1475 break; 1475 break;
1476 1476
1477 case DSO_CORK: 1477 case DSO_CORK:
1478 if (optlen != sizeof(int)) 1478 if (optlen != sizeof(int))
1479 return -EINVAL; 1479 return -EINVAL;
1480 if (scp->nonagle == 1) 1480 if (scp->nonagle == 1)
1481 return -EINVAL; 1481 return -EINVAL;
1482 scp->nonagle = (u.val == 0) ? 0 : 2; 1482 scp->nonagle = (u.val == 0) ? 0 : 2;
1483 /* if (scp->nonagle == 0) { Push pending frames } */ 1483 /* if (scp->nonagle == 0) { Push pending frames } */
1484 break; 1484 break;
1485 1485
1486 case DSO_SERVICES: 1486 case DSO_SERVICES:
1487 if (optlen != sizeof(unsigned char)) 1487 if (optlen != sizeof(unsigned char))
1488 return -EINVAL; 1488 return -EINVAL;
1489 if ((u.services & ~NSP_FC_MASK) != 0x01) 1489 if ((u.services & ~NSP_FC_MASK) != 0x01)
1490 return -EINVAL; 1490 return -EINVAL;
1491 if ((u.services & NSP_FC_MASK) == NSP_FC_MASK) 1491 if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
1492 return -EINVAL; 1492 return -EINVAL;
1493 scp->services_loc = u.services; 1493 scp->services_loc = u.services;
1494 break; 1494 break;
1495 1495
1496 case DSO_INFO: 1496 case DSO_INFO:
1497 if (optlen != sizeof(unsigned char)) 1497 if (optlen != sizeof(unsigned char))
1498 return -EINVAL; 1498 return -EINVAL;
1499 if (u.info & 0xfc) 1499 if (u.info & 0xfc)
1500 return -EINVAL; 1500 return -EINVAL;
1501 scp->info_loc = u.info; 1501 scp->info_loc = u.info;
1502 break; 1502 break;
1503 } 1503 }
1504 1504
1505 return 0; 1505 return 0;
1506 } 1506 }
1507 1507
1508 static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) 1508 static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1509 { 1509 {
1510 struct sock *sk = sock->sk; 1510 struct sock *sk = sock->sk;
1511 int err; 1511 int err;
1512 1512
1513 lock_sock(sk); 1513 lock_sock(sk);
1514 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0); 1514 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1515 release_sock(sk); 1515 release_sock(sk);
1516 1516
1517 return err; 1517 return err;
1518 } 1518 }
1519 1519
1520 static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags) 1520 static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags)
1521 { 1521 {
1522 struct sock *sk = sock->sk; 1522 struct sock *sk = sock->sk;
1523 struct dn_scp *scp = DN_SK(sk); 1523 struct dn_scp *scp = DN_SK(sk);
1524 struct linkinfo_dn link; 1524 struct linkinfo_dn link;
1525 unsigned int r_len; 1525 unsigned int r_len;
1526 void *r_data = NULL; 1526 void *r_data = NULL;
1527 unsigned int val; 1527 unsigned int val;
1528 1528
1529 if(get_user(r_len , optlen)) 1529 if(get_user(r_len , optlen))
1530 return -EFAULT; 1530 return -EFAULT;
1531 1531
1532 switch(optname) { 1532 switch(optname) {
1533 case DSO_CONDATA: 1533 case DSO_CONDATA:
1534 if (r_len > sizeof(struct optdata_dn)) 1534 if (r_len > sizeof(struct optdata_dn))
1535 r_len = sizeof(struct optdata_dn); 1535 r_len = sizeof(struct optdata_dn);
1536 r_data = &scp->conndata_in; 1536 r_data = &scp->conndata_in;
1537 break; 1537 break;
1538 1538
1539 case DSO_DISDATA: 1539 case DSO_DISDATA:
1540 if (r_len > sizeof(struct optdata_dn)) 1540 if (r_len > sizeof(struct optdata_dn))
1541 r_len = sizeof(struct optdata_dn); 1541 r_len = sizeof(struct optdata_dn);
1542 r_data = &scp->discdata_in; 1542 r_data = &scp->discdata_in;
1543 break; 1543 break;
1544 1544
1545 case DSO_CONACCESS: 1545 case DSO_CONACCESS:
1546 if (r_len > sizeof(struct accessdata_dn)) 1546 if (r_len > sizeof(struct accessdata_dn))
1547 r_len = sizeof(struct accessdata_dn); 1547 r_len = sizeof(struct accessdata_dn);
1548 r_data = &scp->accessdata; 1548 r_data = &scp->accessdata;
1549 break; 1549 break;
1550 1550
1551 case DSO_ACCEPTMODE: 1551 case DSO_ACCEPTMODE:
1552 if (r_len > sizeof(unsigned char)) 1552 if (r_len > sizeof(unsigned char))
1553 r_len = sizeof(unsigned char); 1553 r_len = sizeof(unsigned char);
1554 r_data = &scp->accept_mode; 1554 r_data = &scp->accept_mode;
1555 break; 1555 break;
1556 1556
1557 case DSO_LINKINFO: 1557 case DSO_LINKINFO:
1558 if (r_len > sizeof(struct linkinfo_dn)) 1558 if (r_len > sizeof(struct linkinfo_dn))
1559 r_len = sizeof(struct linkinfo_dn); 1559 r_len = sizeof(struct linkinfo_dn);
1560 1560
1561 switch(sock->state) { 1561 switch(sock->state) {
1562 case SS_CONNECTING: 1562 case SS_CONNECTING:
1563 link.idn_linkstate = LL_CONNECTING; 1563 link.idn_linkstate = LL_CONNECTING;
1564 break; 1564 break;
1565 case SS_DISCONNECTING: 1565 case SS_DISCONNECTING:
1566 link.idn_linkstate = LL_DISCONNECTING; 1566 link.idn_linkstate = LL_DISCONNECTING;
1567 break; 1567 break;
1568 case SS_CONNECTED: 1568 case SS_CONNECTED:
1569 link.idn_linkstate = LL_RUNNING; 1569 link.idn_linkstate = LL_RUNNING;
1570 break; 1570 break;
1571 default: 1571 default:
1572 link.idn_linkstate = LL_INACTIVE; 1572 link.idn_linkstate = LL_INACTIVE;
1573 } 1573 }
1574 1574
1575 link.idn_segsize = scp->segsize_rem; 1575 link.idn_segsize = scp->segsize_rem;
1576 r_data = &link; 1576 r_data = &link;
1577 break; 1577 break;
1578 1578
1579 default: 1579 default:
1580 #ifdef CONFIG_NETFILTER 1580 #ifdef CONFIG_NETFILTER
1581 { 1581 {
1582 int val, len; 1582 int val, len;
1583 1583
1584 if(get_user(len, optlen)) 1584 if(get_user(len, optlen))
1585 return -EFAULT; 1585 return -EFAULT;
1586 1586
1587 val = nf_getsockopt(sk, PF_DECnet, optname, 1587 val = nf_getsockopt(sk, PF_DECnet, optname,
1588 optval, &len); 1588 optval, &len);
1589 if (val >= 0) 1589 if (val >= 0)
1590 val = put_user(len, optlen); 1590 val = put_user(len, optlen);
1591 return val; 1591 return val;
1592 } 1592 }
1593 #endif 1593 #endif
1594 case DSO_STREAM: 1594 case DSO_STREAM:
1595 case DSO_SEQPACKET: 1595 case DSO_SEQPACKET:
1596 case DSO_CONACCEPT: 1596 case DSO_CONACCEPT:
1597 case DSO_CONREJECT: 1597 case DSO_CONREJECT:
1598 return -ENOPROTOOPT; 1598 return -ENOPROTOOPT;
1599 1599
1600 case DSO_MAXWINDOW: 1600 case DSO_MAXWINDOW:
1601 if (r_len > sizeof(unsigned long)) 1601 if (r_len > sizeof(unsigned long))
1602 r_len = sizeof(unsigned long); 1602 r_len = sizeof(unsigned long);
1603 r_data = &scp->max_window; 1603 r_data = &scp->max_window;
1604 break; 1604 break;
1605 1605
1606 case DSO_NODELAY: 1606 case DSO_NODELAY:
1607 if (r_len > sizeof(int)) 1607 if (r_len > sizeof(int))
1608 r_len = sizeof(int); 1608 r_len = sizeof(int);
1609 val = (scp->nonagle == 1); 1609 val = (scp->nonagle == 1);
1610 r_data = &val; 1610 r_data = &val;
1611 break; 1611 break;
1612 1612
1613 case DSO_CORK: 1613 case DSO_CORK:
1614 if (r_len > sizeof(int)) 1614 if (r_len > sizeof(int))
1615 r_len = sizeof(int); 1615 r_len = sizeof(int);
1616 val = (scp->nonagle == 2); 1616 val = (scp->nonagle == 2);
1617 r_data = &val; 1617 r_data = &val;
1618 break; 1618 break;
1619 1619
1620 case DSO_SERVICES: 1620 case DSO_SERVICES:
1621 if (r_len > sizeof(unsigned char)) 1621 if (r_len > sizeof(unsigned char))
1622 r_len = sizeof(unsigned char); 1622 r_len = sizeof(unsigned char);
1623 r_data = &scp->services_rem; 1623 r_data = &scp->services_rem;
1624 break; 1624 break;
1625 1625
1626 case DSO_INFO: 1626 case DSO_INFO:
1627 if (r_len > sizeof(unsigned char)) 1627 if (r_len > sizeof(unsigned char))
1628 r_len = sizeof(unsigned char); 1628 r_len = sizeof(unsigned char);
1629 r_data = &scp->info_rem; 1629 r_data = &scp->info_rem;
1630 break; 1630 break;
1631 } 1631 }
1632 1632
1633 if (r_data) { 1633 if (r_data) {
1634 if (copy_to_user(optval, r_data, r_len)) 1634 if (copy_to_user(optval, r_data, r_len))
1635 return -EFAULT; 1635 return -EFAULT;
1636 if (put_user(r_len, optlen)) 1636 if (put_user(r_len, optlen))
1637 return -EFAULT; 1637 return -EFAULT;
1638 } 1638 }
1639 1639
1640 return 0; 1640 return 0;
1641 } 1641 }
1642 1642
1643 1643
1644 static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target) 1644 static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
1645 { 1645 {
1646 struct sk_buff *skb = q->next; 1646 struct sk_buff *skb = q->next;
1647 int len = 0; 1647 int len = 0;
1648 1648
1649 if (flags & MSG_OOB) 1649 if (flags & MSG_OOB)
1650 return !skb_queue_empty(q) ? 1 : 0; 1650 return !skb_queue_empty(q) ? 1 : 0;
1651 1651
1652 while(skb != (struct sk_buff *)q) { 1652 while(skb != (struct sk_buff *)q) {
1653 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1653 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1654 len += skb->len; 1654 len += skb->len;
1655 1655
1656 if (cb->nsp_flags & 0x40) { 1656 if (cb->nsp_flags & 0x40) {
1657 /* SOCK_SEQPACKET reads to EOM */ 1657 /* SOCK_SEQPACKET reads to EOM */
1658 if (sk->sk_type == SOCK_SEQPACKET) 1658 if (sk->sk_type == SOCK_SEQPACKET)
1659 return 1; 1659 return 1;
1660 /* so does SOCK_STREAM unless WAITALL is specified */ 1660 /* so does SOCK_STREAM unless WAITALL is specified */
1661 if (!(flags & MSG_WAITALL)) 1661 if (!(flags & MSG_WAITALL))
1662 return 1; 1662 return 1;
1663 } 1663 }
1664 1664
1665 /* minimum data length for read exceeded */ 1665 /* minimum data length for read exceeded */
1666 if (len >= target) 1666 if (len >= target)
1667 return 1; 1667 return 1;
1668 1668
1669 skb = skb->next; 1669 skb = skb->next;
1670 } 1670 }
1671 1671
1672 return 0; 1672 return 0;
1673 } 1673 }
1674 1674
1675 1675
1676 static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, 1676 static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1677 struct msghdr *msg, size_t size, int flags) 1677 struct msghdr *msg, size_t size, int flags)
1678 { 1678 {
1679 struct sock *sk = sock->sk; 1679 struct sock *sk = sock->sk;
1680 struct dn_scp *scp = DN_SK(sk); 1680 struct dn_scp *scp = DN_SK(sk);
1681 struct sk_buff_head *queue = &sk->sk_receive_queue; 1681 struct sk_buff_head *queue = &sk->sk_receive_queue;
1682 size_t target = size > 1 ? 1 : 0; 1682 size_t target = size > 1 ? 1 : 0;
1683 size_t copied = 0; 1683 size_t copied = 0;
1684 int rv = 0; 1684 int rv = 0;
1685 struct sk_buff *skb, *nskb; 1685 struct sk_buff *skb, *nskb;
1686 struct dn_skb_cb *cb = NULL; 1686 struct dn_skb_cb *cb = NULL;
1687 unsigned char eor = 0; 1687 unsigned char eor = 0;
1688 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1688 long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1689 1689
1690 lock_sock(sk); 1690 lock_sock(sk);
1691 1691
1692 if (sock_flag(sk, SOCK_ZAPPED)) { 1692 if (sock_flag(sk, SOCK_ZAPPED)) {
1693 rv = -EADDRNOTAVAIL; 1693 rv = -EADDRNOTAVAIL;
1694 goto out; 1694 goto out;
1695 } 1695 }
1696 1696
1697 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1697 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1698 rv = 0; 1698 rv = 0;
1699 goto out; 1699 goto out;
1700 } 1700 }
1701 1701
1702 rv = dn_check_state(sk, NULL, 0, &timeo, flags); 1702 rv = dn_check_state(sk, NULL, 0, &timeo, flags);
1703 if (rv) 1703 if (rv)
1704 goto out; 1704 goto out;
1705 1705
1706 if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) { 1706 if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) {
1707 rv = -EOPNOTSUPP; 1707 rv = -EOPNOTSUPP;
1708 goto out; 1708 goto out;
1709 } 1709 }
1710 1710
1711 if (flags & MSG_OOB) 1711 if (flags & MSG_OOB)
1712 queue = &scp->other_receive_queue; 1712 queue = &scp->other_receive_queue;
1713 1713
1714 if (flags & MSG_WAITALL) 1714 if (flags & MSG_WAITALL)
1715 target = size; 1715 target = size;
1716 1716
1717 1717
1718 /* 1718 /*
1719 * See if there is data ready to read, sleep if there isn't 1719 * See if there is data ready to read, sleep if there isn't
1720 */ 1720 */
1721 for(;;) { 1721 for(;;) {
1722 DEFINE_WAIT(wait); 1722 DEFINE_WAIT(wait);
1723 1723
1724 if (sk->sk_err) 1724 if (sk->sk_err)
1725 goto out; 1725 goto out;
1726 1726
1727 if (!skb_queue_empty(&scp->other_receive_queue)) { 1727 if (!skb_queue_empty(&scp->other_receive_queue)) {
1728 if (!(flags & MSG_OOB)) { 1728 if (!(flags & MSG_OOB)) {
1729 msg->msg_flags |= MSG_OOB; 1729 msg->msg_flags |= MSG_OOB;
1730 if (!scp->other_report) { 1730 if (!scp->other_report) {
1731 scp->other_report = 1; 1731 scp->other_report = 1;
1732 goto out; 1732 goto out;
1733 } 1733 }
1734 } 1734 }
1735 } 1735 }
1736 1736
1737 if (scp->state != DN_RUN) 1737 if (scp->state != DN_RUN)
1738 goto out; 1738 goto out;
1739 1739
1740 if (signal_pending(current)) { 1740 if (signal_pending(current)) {
1741 rv = sock_intr_errno(timeo); 1741 rv = sock_intr_errno(timeo);
1742 goto out; 1742 goto out;
1743 } 1743 }
1744 1744
1745 if (dn_data_ready(sk, queue, flags, target)) 1745 if (dn_data_ready(sk, queue, flags, target))
1746 break; 1746 break;
1747 1747
1748 if (flags & MSG_DONTWAIT) { 1748 if (flags & MSG_DONTWAIT) {
1749 rv = -EWOULDBLOCK; 1749 rv = -EWOULDBLOCK;
1750 goto out; 1750 goto out;
1751 } 1751 }
1752 1752
1753 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1753 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1754 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1754 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1755 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); 1755 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
1756 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1756 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1757 finish_wait(sk->sk_sleep, &wait); 1757 finish_wait(sk->sk_sleep, &wait);
1758 } 1758 }
1759 1759
1760 for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) { 1760 for(skb = queue->next; skb != (struct sk_buff *)queue; skb = nskb) {
1761 unsigned int chunk = skb->len; 1761 unsigned int chunk = skb->len;
1762 cb = DN_SKB_CB(skb); 1762 cb = DN_SKB_CB(skb);
1763 1763
1764 if ((chunk + copied) > size) 1764 if ((chunk + copied) > size)
1765 chunk = size - copied; 1765 chunk = size - copied;
1766 1766
1767 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { 1767 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1768 rv = -EFAULT; 1768 rv = -EFAULT;
1769 break; 1769 break;
1770 } 1770 }
1771 copied += chunk; 1771 copied += chunk;
1772 1772
1773 if (!(flags & MSG_PEEK)) 1773 if (!(flags & MSG_PEEK))
1774 skb_pull(skb, chunk); 1774 skb_pull(skb, chunk);
1775 1775
1776 eor = cb->nsp_flags & 0x40; 1776 eor = cb->nsp_flags & 0x40;
1777 nskb = skb->next; 1777 nskb = skb->next;
1778 1778
1779 if (skb->len == 0) { 1779 if (skb->len == 0) {
1780 skb_unlink(skb, queue); 1780 skb_unlink(skb, queue);
1781 kfree_skb(skb); 1781 kfree_skb(skb);
1782 /* 1782 /*
1783 * N.B. Don't refer to skb or cb after this point 1783 * N.B. Don't refer to skb or cb after this point
1784 * in loop. 1784 * in loop.
1785 */ 1785 */
1786 if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) { 1786 if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) {
1787 scp->flowloc_sw = DN_SEND; 1787 scp->flowloc_sw = DN_SEND;
1788 dn_nsp_send_link(sk, DN_SEND, 0); 1788 dn_nsp_send_link(sk, DN_SEND, 0);
1789 } 1789 }
1790 } 1790 }
1791 1791
1792 if (eor) { 1792 if (eor) {
1793 if (sk->sk_type == SOCK_SEQPACKET) 1793 if (sk->sk_type == SOCK_SEQPACKET)
1794 break; 1794 break;
1795 if (!(flags & MSG_WAITALL)) 1795 if (!(flags & MSG_WAITALL))
1796 break; 1796 break;
1797 } 1797 }
1798 1798
1799 if (flags & MSG_OOB) 1799 if (flags & MSG_OOB)
1800 break; 1800 break;
1801 1801
1802 if (copied >= target) 1802 if (copied >= target)
1803 break; 1803 break;
1804 } 1804 }
1805 1805
1806 rv = copied; 1806 rv = copied;
1807 1807
1808 1808
1809 if (eor && (sk->sk_type == SOCK_SEQPACKET)) 1809 if (eor && (sk->sk_type == SOCK_SEQPACKET))
1810 msg->msg_flags |= MSG_EOR; 1810 msg->msg_flags |= MSG_EOR;
1811 1811
1812 out: 1812 out:
1813 if (rv == 0) 1813 if (rv == 0)
1814 rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk); 1814 rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
1815 1815
1816 if ((rv >= 0) && msg->msg_name) { 1816 if ((rv >= 0) && msg->msg_name) {
1817 memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn)); 1817 memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
1818 msg->msg_namelen = sizeof(struct sockaddr_dn); 1818 msg->msg_namelen = sizeof(struct sockaddr_dn);
1819 } 1819 }
1820 1820
1821 release_sock(sk); 1821 release_sock(sk);
1822 1822
1823 return rv; 1823 return rv;
1824 } 1824 }
1825 1825
1826 1826
1827 static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags) 1827 static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags)
1828 { 1828 {
1829 unsigned char fctype = scp->services_rem & NSP_FC_MASK; 1829 unsigned char fctype = scp->services_rem & NSP_FC_MASK;
1830 if (skb_queue_len(queue) >= scp->snd_window) 1830 if (skb_queue_len(queue) >= scp->snd_window)
1831 return 1; 1831 return 1;
1832 if (fctype != NSP_FC_NONE) { 1832 if (fctype != NSP_FC_NONE) {
1833 if (flags & MSG_OOB) { 1833 if (flags & MSG_OOB) {
1834 if (scp->flowrem_oth == 0) 1834 if (scp->flowrem_oth == 0)
1835 return 1; 1835 return 1;
1836 } else { 1836 } else {
1837 if (scp->flowrem_dat == 0) 1837 if (scp->flowrem_dat == 0)
1838 return 1; 1838 return 1;
1839 } 1839 }
1840 } 1840 }
1841 return 0; 1841 return 0;
1842 } 1842 }
1843 1843
1844 /* 1844 /*
1845 * The DECnet spec requires that the "routing layer" accepts packets which 1845 * The DECnet spec requires that the "routing layer" accepts packets which
1846 * are at least 230 bytes in size. This excludes any headers which the NSP 1846 * are at least 230 bytes in size. This excludes any headers which the NSP
1847 * layer might add, so we always assume that we'll be using the maximal 1847 * layer might add, so we always assume that we'll be using the maximal
1848 * length header on data packets. The variation in length is due to the 1848 * length header on data packets. The variation in length is due to the
1849 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't 1849 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1850 * make much practical difference. 1850 * make much practical difference.
1851 */ 1851 */
1852 unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu) 1852 unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu)
1853 { 1853 {
1854 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER; 1854 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
1855 if (dev) { 1855 if (dev) {
1856 struct dn_dev *dn_db = dev->dn_ptr; 1856 struct dn_dev *dn_db = dev->dn_ptr;
1857 mtu -= LL_RESERVED_SPACE(dev); 1857 mtu -= LL_RESERVED_SPACE(dev);
1858 if (dn_db->use_long) 1858 if (dn_db->use_long)
1859 mtu -= 21; 1859 mtu -= 21;
1860 else 1860 else
1861 mtu -= 6; 1861 mtu -= 6;
1862 mtu -= DN_MAX_NSP_DATA_HEADER; 1862 mtu -= DN_MAX_NSP_DATA_HEADER;
1863 } else { 1863 } else {
1864 /* 1864 /*
1865 * 21 = long header, 16 = guess at MAC header length 1865 * 21 = long header, 16 = guess at MAC header length
1866 */ 1866 */
1867 mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16); 1867 mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16);
1868 } 1868 }
1869 if (mtu > mss) 1869 if (mtu > mss)
1870 mss = mtu; 1870 mss = mtu;
1871 return mss; 1871 return mss;
1872 } 1872 }
1873 1873
1874 static inline unsigned int dn_current_mss(struct sock *sk, int flags) 1874 static inline unsigned int dn_current_mss(struct sock *sk, int flags)
1875 { 1875 {
1876 struct dst_entry *dst = __sk_dst_get(sk); 1876 struct dst_entry *dst = __sk_dst_get(sk);
1877 struct dn_scp *scp = DN_SK(sk); 1877 struct dn_scp *scp = DN_SK(sk);
1878 int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem); 1878 int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem);
1879 1879
1880 /* Other data messages are limited to 16 bytes per packet */ 1880 /* Other data messages are limited to 16 bytes per packet */
1881 if (flags & MSG_OOB) 1881 if (flags & MSG_OOB)
1882 return 16; 1882 return 16;
1883 1883
1884 /* This works out the maximum size of segment we can send out */ 1884 /* This works out the maximum size of segment we can send out */
1885 if (dst) { 1885 if (dst) {
1886 u32 mtu = dst_mtu(dst); 1886 u32 mtu = dst_mtu(dst);
1887 mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now); 1887 mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now);
1888 } 1888 }
1889 1889
1890 return mss_now; 1890 return mss_now;
1891 } 1891 }
1892 1892
1893 /* 1893 /*
1894 * N.B. We get the timeout wrong here, but then we always did get it 1894 * N.B. We get the timeout wrong here, but then we always did get it
1895 * wrong before and this is another step along the road to correcting 1895 * wrong before and this is another step along the road to correcting
1896 * it. It ought to get updated each time we pass through the routine, 1896 * it. It ought to get updated each time we pass through the routine,
1897 * but in practise it probably doesn't matter too much for now. 1897 * but in practise it probably doesn't matter too much for now.
1898 */ 1898 */
1899 static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk, 1899 static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
1900 unsigned long datalen, int noblock, 1900 unsigned long datalen, int noblock,
1901 int *errcode) 1901 int *errcode)
1902 { 1902 {
1903 struct sk_buff *skb = sock_alloc_send_skb(sk, datalen, 1903 struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
1904 noblock, errcode); 1904 noblock, errcode);
1905 if (skb) { 1905 if (skb) {
1906 skb->protocol = htons(ETH_P_DNA_RT); 1906 skb->protocol = htons(ETH_P_DNA_RT);
1907 skb->pkt_type = PACKET_OUTGOING; 1907 skb->pkt_type = PACKET_OUTGOING;
1908 } 1908 }
1909 return skb; 1909 return skb;
1910 } 1910 }
1911 1911
1912 static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, 1912 static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
1913 struct msghdr *msg, size_t size) 1913 struct msghdr *msg, size_t size)
1914 { 1914 {
1915 struct sock *sk = sock->sk; 1915 struct sock *sk = sock->sk;
1916 struct dn_scp *scp = DN_SK(sk); 1916 struct dn_scp *scp = DN_SK(sk);
1917 size_t mss; 1917 size_t mss;
1918 struct sk_buff_head *queue = &scp->data_xmit_queue; 1918 struct sk_buff_head *queue = &scp->data_xmit_queue;
1919 int flags = msg->msg_flags; 1919 int flags = msg->msg_flags;
1920 int err = 0; 1920 int err = 0;
1921 size_t sent = 0; 1921 size_t sent = 0;
1922 int addr_len = msg->msg_namelen; 1922 int addr_len = msg->msg_namelen;
1923 struct sockaddr_dn *addr = (struct sockaddr_dn *)msg->msg_name; 1923 struct sockaddr_dn *addr = (struct sockaddr_dn *)msg->msg_name;
1924 struct sk_buff *skb = NULL; 1924 struct sk_buff *skb = NULL;
1925 struct dn_skb_cb *cb; 1925 struct dn_skb_cb *cb;
1926 size_t len; 1926 size_t len;
1927 unsigned char fctype; 1927 unsigned char fctype;
1928 long timeo; 1928 long timeo;
1929 1929
1930 if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT)) 1930 if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT))
1931 return -EOPNOTSUPP; 1931 return -EOPNOTSUPP;
1932 1932
1933 if (addr_len && (addr_len != sizeof(struct sockaddr_dn))) 1933 if (addr_len && (addr_len != sizeof(struct sockaddr_dn)))
1934 return -EINVAL; 1934 return -EINVAL;
1935 1935
1936 lock_sock(sk); 1936 lock_sock(sk);
1937 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1937 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1938 /* 1938 /*
1939 * The only difference between stream sockets and sequenced packet 1939 * The only difference between stream sockets and sequenced packet
1940 * sockets is that the stream sockets always behave as if MSG_EOR 1940 * sockets is that the stream sockets always behave as if MSG_EOR
1941 * has been set. 1941 * has been set.
1942 */ 1942 */
1943 if (sock->type == SOCK_STREAM) { 1943 if (sock->type == SOCK_STREAM) {
1944 if (flags & MSG_EOR) { 1944 if (flags & MSG_EOR) {
1945 err = -EINVAL; 1945 err = -EINVAL;
1946 goto out; 1946 goto out;
1947 } 1947 }
1948 flags |= MSG_EOR; 1948 flags |= MSG_EOR;
1949 } 1949 }
1950 1950
1951 1951
1952 err = dn_check_state(sk, addr, addr_len, &timeo, flags); 1952 err = dn_check_state(sk, addr, addr_len, &timeo, flags);
1953 if (err) 1953 if (err)
1954 goto out_err; 1954 goto out_err;
1955 1955
1956 if (sk->sk_shutdown & SEND_SHUTDOWN) { 1956 if (sk->sk_shutdown & SEND_SHUTDOWN) {
1957 err = -EPIPE; 1957 err = -EPIPE;
1958 if (!(flags & MSG_NOSIGNAL)) 1958 if (!(flags & MSG_NOSIGNAL))
1959 send_sig(SIGPIPE, current, 0); 1959 send_sig(SIGPIPE, current, 0);
1960 goto out_err; 1960 goto out_err;
1961 } 1961 }
1962 1962
1963 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) 1963 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
1964 dst_negative_advice(&sk->sk_dst_cache); 1964 dst_negative_advice(&sk->sk_dst_cache);
1965 1965
1966 mss = scp->segsize_rem; 1966 mss = scp->segsize_rem;
1967 fctype = scp->services_rem & NSP_FC_MASK; 1967 fctype = scp->services_rem & NSP_FC_MASK;
1968 1968
1969 mss = dn_current_mss(sk, flags); 1969 mss = dn_current_mss(sk, flags);
1970 1970
1971 if (flags & MSG_OOB) { 1971 if (flags & MSG_OOB) {
1972 queue = &scp->other_xmit_queue; 1972 queue = &scp->other_xmit_queue;
1973 if (size > mss) { 1973 if (size > mss) {
1974 err = -EMSGSIZE; 1974 err = -EMSGSIZE;
1975 goto out; 1975 goto out;
1976 } 1976 }
1977 } 1977 }
1978 1978
1979 scp->persist_fxn = dn_nsp_xmit_timeout; 1979 scp->persist_fxn = dn_nsp_xmit_timeout;
1980 1980
1981 while(sent < size) { 1981 while(sent < size) {
1982 err = sock_error(sk); 1982 err = sock_error(sk);
1983 if (err) 1983 if (err)
1984 goto out; 1984 goto out;
1985 1985
1986 if (signal_pending(current)) { 1986 if (signal_pending(current)) {
1987 err = sock_intr_errno(timeo); 1987 err = sock_intr_errno(timeo);
1988 goto out; 1988 goto out;
1989 } 1989 }
1990 1990
1991 /* 1991 /*
1992 * Calculate size that we wish to send. 1992 * Calculate size that we wish to send.
1993 */ 1993 */
1994 len = size - sent; 1994 len = size - sent;
1995 1995
1996 if (len > mss) 1996 if (len > mss)
1997 len = mss; 1997 len = mss;
1998 1998
1999 /* 1999 /*
2000 * Wait for queue size to go down below the window 2000 * Wait for queue size to go down below the window
2001 * size. 2001 * size.
2002 */ 2002 */
2003 if (dn_queue_too_long(scp, queue, flags)) { 2003 if (dn_queue_too_long(scp, queue, flags)) {
2004 DEFINE_WAIT(wait); 2004 DEFINE_WAIT(wait);
2005 2005
2006 if (flags & MSG_DONTWAIT) { 2006 if (flags & MSG_DONTWAIT) {
2007 err = -EWOULDBLOCK; 2007 err = -EWOULDBLOCK;
2008 goto out; 2008 goto out;
2009 } 2009 }
2010 2010
2011 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 2011 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
2012 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2012 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2013 sk_wait_event(sk, &timeo, 2013 sk_wait_event(sk, &timeo,
2014 !dn_queue_too_long(scp, queue, flags)); 2014 !dn_queue_too_long(scp, queue, flags));
2015 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2015 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2016 finish_wait(sk->sk_sleep, &wait); 2016 finish_wait(sk->sk_sleep, &wait);
2017 continue; 2017 continue;
2018 } 2018 }
2019 2019
2020 /* 2020 /*
2021 * Get a suitably sized skb. 2021 * Get a suitably sized skb.
2022 * 64 is a bit of a hack really, but its larger than any 2022 * 64 is a bit of a hack really, but its larger than any
2023 * link-layer headers and has served us well as a good 2023 * link-layer headers and has served us well as a good
2024 * guess as to their real length. 2024 * guess as to their real length.
2025 */ 2025 */
2026 skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER, 2026 skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
2027 flags & MSG_DONTWAIT, &err); 2027 flags & MSG_DONTWAIT, &err);
2028 2028
2029 if (err) 2029 if (err)
2030 break; 2030 break;
2031 2031
2032 if (!skb) 2032 if (!skb)
2033 continue; 2033 continue;
2034 2034
2035 cb = DN_SKB_CB(skb); 2035 cb = DN_SKB_CB(skb);
2036 2036
2037 skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER); 2037 skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER);
2038 2038
2039 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 2039 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
2040 err = -EFAULT; 2040 err = -EFAULT;
2041 goto out; 2041 goto out;
2042 } 2042 }
2043 2043
2044 if (flags & MSG_OOB) { 2044 if (flags & MSG_OOB) {
2045 cb->nsp_flags = 0x30; 2045 cb->nsp_flags = 0x30;
2046 if (fctype != NSP_FC_NONE) 2046 if (fctype != NSP_FC_NONE)
2047 scp->flowrem_oth--; 2047 scp->flowrem_oth--;
2048 } else { 2048 } else {
2049 cb->nsp_flags = 0x00; 2049 cb->nsp_flags = 0x00;
2050 if (scp->seg_total == 0) 2050 if (scp->seg_total == 0)
2051 cb->nsp_flags |= 0x20; 2051 cb->nsp_flags |= 0x20;
2052 2052
2053 scp->seg_total += len; 2053 scp->seg_total += len;
2054 2054
2055 if (((sent + len) == size) && (flags & MSG_EOR)) { 2055 if (((sent + len) == size) && (flags & MSG_EOR)) {
2056 cb->nsp_flags |= 0x40; 2056 cb->nsp_flags |= 0x40;
2057 scp->seg_total = 0; 2057 scp->seg_total = 0;
2058 if (fctype == NSP_FC_SCMC) 2058 if (fctype == NSP_FC_SCMC)
2059 scp->flowrem_dat--; 2059 scp->flowrem_dat--;
2060 } 2060 }
2061 if (fctype == NSP_FC_SRC) 2061 if (fctype == NSP_FC_SRC)
2062 scp->flowrem_dat--; 2062 scp->flowrem_dat--;
2063 } 2063 }
2064 2064
2065 sent += len; 2065 sent += len;
2066 dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB); 2066 dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
2067 skb = NULL; 2067 skb = NULL;
2068 2068
2069 scp->persist = dn_nsp_persist(sk); 2069 scp->persist = dn_nsp_persist(sk);
2070 2070
2071 } 2071 }
2072 out: 2072 out:
2073 2073
2074 if (skb) 2074 if (skb)
2075 kfree_skb(skb); 2075 kfree_skb(skb);
2076 2076
2077 release_sock(sk); 2077 release_sock(sk);
2078 2078
2079 return sent ? sent : err; 2079 return sent ? sent : err;
2080 2080
2081 out_err: 2081 out_err:
2082 err = sk_stream_error(sk, flags, err); 2082 err = sk_stream_error(sk, flags, err);
2083 release_sock(sk); 2083 release_sock(sk);
2084 return err; 2084 return err;
2085 } 2085 }
2086 2086
2087 static int dn_device_event(struct notifier_block *this, unsigned long event, 2087 static int dn_device_event(struct notifier_block *this, unsigned long event,
2088 void *ptr) 2088 void *ptr)
2089 { 2089 {
2090 struct net_device *dev = (struct net_device *)ptr; 2090 struct net_device *dev = (struct net_device *)ptr;
2091 2091
2092 if (!net_eq(dev_net(dev), &init_net)) 2092 if (!net_eq(dev_net(dev), &init_net))
2093 return NOTIFY_DONE; 2093 return NOTIFY_DONE;
2094 2094
2095 switch(event) { 2095 switch(event) {
2096 case NETDEV_UP: 2096 case NETDEV_UP:
2097 dn_dev_up(dev); 2097 dn_dev_up(dev);
2098 break; 2098 break;
2099 case NETDEV_DOWN: 2099 case NETDEV_DOWN:
2100 dn_dev_down(dev); 2100 dn_dev_down(dev);
2101 break; 2101 break;
2102 default: 2102 default:
2103 break; 2103 break;
2104 } 2104 }
2105 2105
2106 return NOTIFY_DONE; 2106 return NOTIFY_DONE;
2107 } 2107 }
2108 2108
2109 static struct notifier_block dn_dev_notifier = { 2109 static struct notifier_block dn_dev_notifier = {
2110 .notifier_call = dn_device_event, 2110 .notifier_call = dn_device_event,
2111 }; 2111 };
2112 2112
2113 extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); 2113 extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
2114 2114
2115 static struct packet_type dn_dix_packet_type = { 2115 static struct packet_type dn_dix_packet_type = {
2116 .type = __constant_htons(ETH_P_DNA_RT), 2116 .type = __constant_htons(ETH_P_DNA_RT),
2117 .dev = NULL, /* All devices */ 2117 .dev = NULL, /* All devices */
2118 .func = dn_route_rcv, 2118 .func = dn_route_rcv,
2119 }; 2119 };
2120 2120
2121 #ifdef CONFIG_PROC_FS 2121 #ifdef CONFIG_PROC_FS
2122 struct dn_iter_state { 2122 struct dn_iter_state {
2123 int bucket; 2123 int bucket;
2124 }; 2124 };
2125 2125
2126 static struct sock *dn_socket_get_first(struct seq_file *seq) 2126 static struct sock *dn_socket_get_first(struct seq_file *seq)
2127 { 2127 {
2128 struct dn_iter_state *state = seq->private; 2128 struct dn_iter_state *state = seq->private;
2129 struct sock *n = NULL; 2129 struct sock *n = NULL;
2130 2130
2131 for(state->bucket = 0; 2131 for(state->bucket = 0;
2132 state->bucket < DN_SK_HASH_SIZE; 2132 state->bucket < DN_SK_HASH_SIZE;
2133 ++state->bucket) { 2133 ++state->bucket) {
2134 n = sk_head(&dn_sk_hash[state->bucket]); 2134 n = sk_head(&dn_sk_hash[state->bucket]);
2135 if (n) 2135 if (n)
2136 break; 2136 break;
2137 } 2137 }
2138 2138
2139 return n; 2139 return n;
2140 } 2140 }
2141 2141
2142 static struct sock *dn_socket_get_next(struct seq_file *seq, 2142 static struct sock *dn_socket_get_next(struct seq_file *seq,
2143 struct sock *n) 2143 struct sock *n)
2144 { 2144 {
2145 struct dn_iter_state *state = seq->private; 2145 struct dn_iter_state *state = seq->private;
2146 2146
2147 n = sk_next(n); 2147 n = sk_next(n);
2148 try_again: 2148 try_again:
2149 if (n) 2149 if (n)
2150 goto out; 2150 goto out;
2151 if (++state->bucket >= DN_SK_HASH_SIZE) 2151 if (++state->bucket >= DN_SK_HASH_SIZE)
2152 goto out; 2152 goto out;
2153 n = sk_head(&dn_sk_hash[state->bucket]); 2153 n = sk_head(&dn_sk_hash[state->bucket]);
2154 goto try_again; 2154 goto try_again;
2155 out: 2155 out:
2156 return n; 2156 return n;
2157 } 2157 }
2158 2158
2159 static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos) 2159 static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos)
2160 { 2160 {
2161 struct sock *sk = dn_socket_get_first(seq); 2161 struct sock *sk = dn_socket_get_first(seq);
2162 2162
2163 if (sk) { 2163 if (sk) {
2164 while(*pos && (sk = dn_socket_get_next(seq, sk))) 2164 while(*pos && (sk = dn_socket_get_next(seq, sk)))
2165 --*pos; 2165 --*pos;
2166 } 2166 }
2167 return *pos ? NULL : sk; 2167 return *pos ? NULL : sk;
2168 } 2168 }
2169 2169
2170 static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos) 2170 static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos)
2171 { 2171 {
2172 void *rc; 2172 void *rc;
2173 read_lock_bh(&dn_hash_lock); 2173 read_lock_bh(&dn_hash_lock);
2174 rc = socket_get_idx(seq, &pos); 2174 rc = socket_get_idx(seq, &pos);
2175 if (!rc) { 2175 if (!rc) {
2176 read_unlock_bh(&dn_hash_lock); 2176 read_unlock_bh(&dn_hash_lock);
2177 } 2177 }
2178 return rc; 2178 return rc;
2179 } 2179 }
2180 2180
2181 static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos) 2181 static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos)
2182 { 2182 {
2183 return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2183 return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2184 } 2184 }
2185 2185
2186 static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2186 static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2187 { 2187 {
2188 void *rc; 2188 void *rc;
2189 2189
2190 if (v == SEQ_START_TOKEN) { 2190 if (v == SEQ_START_TOKEN) {
2191 rc = dn_socket_get_idx(seq, 0); 2191 rc = dn_socket_get_idx(seq, 0);
2192 goto out; 2192 goto out;
2193 } 2193 }
2194 2194
2195 rc = dn_socket_get_next(seq, v); 2195 rc = dn_socket_get_next(seq, v);
2196 if (rc) 2196 if (rc)
2197 goto out; 2197 goto out;
2198 read_unlock_bh(&dn_hash_lock); 2198 read_unlock_bh(&dn_hash_lock);
2199 out: 2199 out:
2200 ++*pos; 2200 ++*pos;
2201 return rc; 2201 return rc;
2202 } 2202 }
2203 2203
2204 static void dn_socket_seq_stop(struct seq_file *seq, void *v) 2204 static void dn_socket_seq_stop(struct seq_file *seq, void *v)
2205 { 2205 {
2206 if (v && v != SEQ_START_TOKEN) 2206 if (v && v != SEQ_START_TOKEN)
2207 read_unlock_bh(&dn_hash_lock); 2207 read_unlock_bh(&dn_hash_lock);
2208 } 2208 }
2209 2209
2210 #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126) 2210 #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126)
2211 2211
2212 static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf) 2212 static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
2213 { 2213 {
2214 int i; 2214 int i;
2215 2215
2216 switch (dn_ntohs(dn->sdn_objnamel)) { 2216 switch (le16_to_cpu(dn->sdn_objnamel)) {
2217 case 0: 2217 case 0:
2218 sprintf(buf, "%d", dn->sdn_objnum); 2218 sprintf(buf, "%d", dn->sdn_objnum);
2219 break; 2219 break;
2220 default: 2220 default:
2221 for (i = 0; i < dn_ntohs(dn->sdn_objnamel); i++) { 2221 for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) {
2222 buf[i] = dn->sdn_objname[i]; 2222 buf[i] = dn->sdn_objname[i];
2223 if (IS_NOT_PRINTABLE(buf[i])) 2223 if (IS_NOT_PRINTABLE(buf[i]))
2224 buf[i] = '.'; 2224 buf[i] = '.';
2225 } 2225 }
2226 buf[i] = 0; 2226 buf[i] = 0;
2227 } 2227 }
2228 } 2228 }
2229 2229
2230 static char *dn_state2asc(unsigned char state) 2230 static char *dn_state2asc(unsigned char state)
2231 { 2231 {
2232 switch(state) { 2232 switch(state) {
2233 case DN_O: 2233 case DN_O:
2234 return "OPEN"; 2234 return "OPEN";
2235 case DN_CR: 2235 case DN_CR:
2236 return " CR"; 2236 return " CR";
2237 case DN_DR: 2237 case DN_DR:
2238 return " DR"; 2238 return " DR";
2239 case DN_DRC: 2239 case DN_DRC:
2240 return " DRC"; 2240 return " DRC";
2241 case DN_CC: 2241 case DN_CC:
2242 return " CC"; 2242 return " CC";
2243 case DN_CI: 2243 case DN_CI:
2244 return " CI"; 2244 return " CI";
2245 case DN_NR: 2245 case DN_NR:
2246 return " NR"; 2246 return " NR";
2247 case DN_NC: 2247 case DN_NC:
2248 return " NC"; 2248 return " NC";
2249 case DN_CD: 2249 case DN_CD:
2250 return " CD"; 2250 return " CD";
2251 case DN_RJ: 2251 case DN_RJ:
2252 return " RJ"; 2252 return " RJ";
2253 case DN_RUN: 2253 case DN_RUN:
2254 return " RUN"; 2254 return " RUN";
2255 case DN_DI: 2255 case DN_DI:
2256 return " DI"; 2256 return " DI";
2257 case DN_DIC: 2257 case DN_DIC:
2258 return " DIC"; 2258 return " DIC";
2259 case DN_DN: 2259 case DN_DN:
2260 return " DN"; 2260 return " DN";
2261 case DN_CL: 2261 case DN_CL:
2262 return " CL"; 2262 return " CL";
2263 case DN_CN: 2263 case DN_CN:
2264 return " CN"; 2264 return " CN";
2265 } 2265 }
2266 2266
2267 return "????"; 2267 return "????";
2268 } 2268 }
2269 2269
2270 static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk) 2270 static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk)
2271 { 2271 {
2272 struct dn_scp *scp = DN_SK(sk); 2272 struct dn_scp *scp = DN_SK(sk);
2273 char buf1[DN_ASCBUF_LEN]; 2273 char buf1[DN_ASCBUF_LEN];
2274 char buf2[DN_ASCBUF_LEN]; 2274 char buf2[DN_ASCBUF_LEN];
2275 char local_object[DN_MAXOBJL+3]; 2275 char local_object[DN_MAXOBJL+3];
2276 char remote_object[DN_MAXOBJL+3]; 2276 char remote_object[DN_MAXOBJL+3];
2277 2277
2278 dn_printable_object(&scp->addr, local_object); 2278 dn_printable_object(&scp->addr, local_object);
2279 dn_printable_object(&scp->peer, remote_object); 2279 dn_printable_object(&scp->peer, remote_object);
2280 2280
2281 seq_printf(seq, 2281 seq_printf(seq,
2282 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s " 2282 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s "
2283 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n", 2283 "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n",
2284 dn_addr2asc(dn_ntohs(dn_saddr2dn(&scp->addr)), buf1), 2284 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->addr)), buf1),
2285 scp->addrloc, 2285 scp->addrloc,
2286 scp->numdat, 2286 scp->numdat,
2287 scp->numoth, 2287 scp->numoth,
2288 scp->ackxmt_dat, 2288 scp->ackxmt_dat,
2289 scp->ackxmt_oth, 2289 scp->ackxmt_oth,
2290 scp->flowloc_sw, 2290 scp->flowloc_sw,
2291 local_object, 2291 local_object,
2292 dn_addr2asc(dn_ntohs(dn_saddr2dn(&scp->peer)), buf2), 2292 dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->peer)), buf2),
2293 scp->addrrem, 2293 scp->addrrem,
2294 scp->numdat_rcv, 2294 scp->numdat_rcv,
2295 scp->numoth_rcv, 2295 scp->numoth_rcv,
2296 scp->ackrcv_dat, 2296 scp->ackrcv_dat,
2297 scp->ackrcv_oth, 2297 scp->ackrcv_oth,
2298 scp->flowrem_sw, 2298 scp->flowrem_sw,
2299 remote_object, 2299 remote_object,
2300 dn_state2asc(scp->state), 2300 dn_state2asc(scp->state),
2301 ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER")); 2301 ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER"));
2302 } 2302 }
2303 2303
2304 static int dn_socket_seq_show(struct seq_file *seq, void *v) 2304 static int dn_socket_seq_show(struct seq_file *seq, void *v)
2305 { 2305 {
2306 if (v == SEQ_START_TOKEN) { 2306 if (v == SEQ_START_TOKEN) {
2307 seq_puts(seq, "Local Remote\n"); 2307 seq_puts(seq, "Local Remote\n");
2308 } else { 2308 } else {
2309 dn_socket_format_entry(seq, v); 2309 dn_socket_format_entry(seq, v);
2310 } 2310 }
2311 return 0; 2311 return 0;
2312 } 2312 }
2313 2313
2314 static const struct seq_operations dn_socket_seq_ops = { 2314 static const struct seq_operations dn_socket_seq_ops = {
2315 .start = dn_socket_seq_start, 2315 .start = dn_socket_seq_start,
2316 .next = dn_socket_seq_next, 2316 .next = dn_socket_seq_next,
2317 .stop = dn_socket_seq_stop, 2317 .stop = dn_socket_seq_stop,
2318 .show = dn_socket_seq_show, 2318 .show = dn_socket_seq_show,
2319 }; 2319 };
2320 2320
2321 static int dn_socket_seq_open(struct inode *inode, struct file *file) 2321 static int dn_socket_seq_open(struct inode *inode, struct file *file)
2322 { 2322 {
2323 return seq_open_private(file, &dn_socket_seq_ops, 2323 return seq_open_private(file, &dn_socket_seq_ops,
2324 sizeof(struct dn_iter_state)); 2324 sizeof(struct dn_iter_state));
2325 } 2325 }
2326 2326
2327 static const struct file_operations dn_socket_seq_fops = { 2327 static const struct file_operations dn_socket_seq_fops = {
2328 .owner = THIS_MODULE, 2328 .owner = THIS_MODULE,
2329 .open = dn_socket_seq_open, 2329 .open = dn_socket_seq_open,
2330 .read = seq_read, 2330 .read = seq_read,
2331 .llseek = seq_lseek, 2331 .llseek = seq_lseek,
2332 .release = seq_release_private, 2332 .release = seq_release_private,
2333 }; 2333 };
2334 #endif 2334 #endif
2335 2335
2336 static struct net_proto_family dn_family_ops = { 2336 static struct net_proto_family dn_family_ops = {
2337 .family = AF_DECnet, 2337 .family = AF_DECnet,
2338 .create = dn_create, 2338 .create = dn_create,
2339 .owner = THIS_MODULE, 2339 .owner = THIS_MODULE,
2340 }; 2340 };
2341 2341
2342 static const struct proto_ops dn_proto_ops = { 2342 static const struct proto_ops dn_proto_ops = {
2343 .family = AF_DECnet, 2343 .family = AF_DECnet,
2344 .owner = THIS_MODULE, 2344 .owner = THIS_MODULE,
2345 .release = dn_release, 2345 .release = dn_release,
2346 .bind = dn_bind, 2346 .bind = dn_bind,
2347 .connect = dn_connect, 2347 .connect = dn_connect,
2348 .socketpair = sock_no_socketpair, 2348 .socketpair = sock_no_socketpair,
2349 .accept = dn_accept, 2349 .accept = dn_accept,
2350 .getname = dn_getname, 2350 .getname = dn_getname,
2351 .poll = dn_poll, 2351 .poll = dn_poll,
2352 .ioctl = dn_ioctl, 2352 .ioctl = dn_ioctl,
2353 .listen = dn_listen, 2353 .listen = dn_listen,
2354 .shutdown = dn_shutdown, 2354 .shutdown = dn_shutdown,
2355 .setsockopt = dn_setsockopt, 2355 .setsockopt = dn_setsockopt,
2356 .getsockopt = dn_getsockopt, 2356 .getsockopt = dn_getsockopt,
2357 .sendmsg = dn_sendmsg, 2357 .sendmsg = dn_sendmsg,
2358 .recvmsg = dn_recvmsg, 2358 .recvmsg = dn_recvmsg,
2359 .mmap = sock_no_mmap, 2359 .mmap = sock_no_mmap,
2360 .sendpage = sock_no_sendpage, 2360 .sendpage = sock_no_sendpage,
2361 }; 2361 };
2362 2362
2363 void dn_register_sysctl(void); 2363 void dn_register_sysctl(void);
2364 void dn_unregister_sysctl(void); 2364 void dn_unregister_sysctl(void);
2365 2365
2366 MODULE_DESCRIPTION("The Linux DECnet Network Protocol"); 2366 MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
2367 MODULE_AUTHOR("Linux DECnet Project Team"); 2367 MODULE_AUTHOR("Linux DECnet Project Team");
2368 MODULE_LICENSE("GPL"); 2368 MODULE_LICENSE("GPL");
2369 MODULE_ALIAS_NETPROTO(PF_DECnet); 2369 MODULE_ALIAS_NETPROTO(PF_DECnet);
2370 2370
2371 static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n"; 2371 static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n";
2372 2372
2373 static int __init decnet_init(void) 2373 static int __init decnet_init(void)
2374 { 2374 {
2375 int rc; 2375 int rc;
2376 2376
2377 printk(banner); 2377 printk(banner);
2378 2378
2379 rc = proto_register(&dn_proto, 1); 2379 rc = proto_register(&dn_proto, 1);
2380 if (rc != 0) 2380 if (rc != 0)
2381 goto out; 2381 goto out;
2382 2382
2383 dn_neigh_init(); 2383 dn_neigh_init();
2384 dn_dev_init(); 2384 dn_dev_init();
2385 dn_route_init(); 2385 dn_route_init();
2386 dn_fib_init(); 2386 dn_fib_init();
2387 2387
2388 sock_register(&dn_family_ops); 2388 sock_register(&dn_family_ops);
2389 dev_add_pack(&dn_dix_packet_type); 2389 dev_add_pack(&dn_dix_packet_type);
2390 register_netdevice_notifier(&dn_dev_notifier); 2390 register_netdevice_notifier(&dn_dev_notifier);
2391 2391
2392 proc_net_fops_create(&init_net, "decnet", S_IRUGO, &dn_socket_seq_fops); 2392 proc_net_fops_create(&init_net, "decnet", S_IRUGO, &dn_socket_seq_fops);
2393 dn_register_sysctl(); 2393 dn_register_sysctl();
2394 out: 2394 out:
2395 return rc; 2395 return rc;
2396 2396
2397 } 2397 }
2398 module_init(decnet_init); 2398 module_init(decnet_init);
2399 2399
2400 /* 2400 /*
2401 * Prevent DECnet module unloading until its fixed properly. 2401 * Prevent DECnet module unloading until its fixed properly.
2402 * Requires an audit of the code to check for memory leaks and 2402 * Requires an audit of the code to check for memory leaks and
2403 * initialisation problems etc. 2403 * initialisation problems etc.
2404 */ 2404 */
2405 #if 0 2405 #if 0
2406 static void __exit decnet_exit(void) 2406 static void __exit decnet_exit(void)
2407 { 2407 {
2408 sock_unregister(AF_DECnet); 2408 sock_unregister(AF_DECnet);
2409 rtnl_unregister_all(PF_DECnet); 2409 rtnl_unregister_all(PF_DECnet);
2410 dev_remove_pack(&dn_dix_packet_type); 2410 dev_remove_pack(&dn_dix_packet_type);
2411 2411
2412 dn_unregister_sysctl(); 2412 dn_unregister_sysctl();
2413 2413
2414 unregister_netdevice_notifier(&dn_dev_notifier); 2414 unregister_netdevice_notifier(&dn_dev_notifier);
2415 2415
2416 dn_route_cleanup(); 2416 dn_route_cleanup();
2417 dn_dev_cleanup(); 2417 dn_dev_cleanup();
2418 dn_neigh_cleanup(); 2418 dn_neigh_cleanup();
2419 dn_fib_cleanup(); 2419 dn_fib_cleanup();
2420 2420
2421 proc_net_remove(&init_net, "decnet"); 2421 proc_net_remove(&init_net, "decnet");
2422 2422
2423 proto_unregister(&dn_proto); 2423 proto_unregister(&dn_proto);
2424 } 2424 }
2425 module_exit(decnet_exit); 2425 module_exit(decnet_exit);
2426 #endif 2426 #endif
2427 2427
1 /* 1 /*
2 * DECnet An implementation of the DECnet protocol suite for the LINUX 2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket 3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * DECnet Device Layer 6 * DECnet Device Layer
7 * 7 *
8 * Authors: Steve Whitehouse <SteveW@ACM.org> 8 * Authors: Steve Whitehouse <SteveW@ACM.org>
9 * Eduardo Marcelo Serrat <emserrat@geocities.com> 9 * Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * 10 *
11 * Changes: 11 * Changes:
12 * Steve Whitehouse : Devices now see incoming frames so they 12 * Steve Whitehouse : Devices now see incoming frames so they
13 * can mark on who it came from. 13 * can mark on who it came from.
14 * Steve Whitehouse : Fixed bug in creating neighbours. Each neighbour 14 * Steve Whitehouse : Fixed bug in creating neighbours. Each neighbour
15 * can now have a device specific setup func. 15 * can now have a device specific setup func.
16 * Steve Whitehouse : Added /proc/sys/net/decnet/conf/<dev>/ 16 * Steve Whitehouse : Added /proc/sys/net/decnet/conf/<dev>/
17 * Steve Whitehouse : Fixed bug which sometimes killed timer 17 * Steve Whitehouse : Fixed bug which sometimes killed timer
18 * Steve Whitehouse : Multiple ifaddr support 18 * Steve Whitehouse : Multiple ifaddr support
19 * Steve Whitehouse : SIOCGIFCONF is now a compile time option 19 * Steve Whitehouse : SIOCGIFCONF is now a compile time option
20 * Steve Whitehouse : /proc/sys/net/decnet/conf/<sys>/forwarding 20 * Steve Whitehouse : /proc/sys/net/decnet/conf/<sys>/forwarding
21 * Steve Whitehouse : Removed timer1 - it's a user space issue now 21 * Steve Whitehouse : Removed timer1 - it's a user space issue now
22 * Patrick Caulfield : Fixed router hello message format 22 * Patrick Caulfield : Fixed router hello message format
23 * Steve Whitehouse : Got rid of constant sizes for blksize for 23 * Steve Whitehouse : Got rid of constant sizes for blksize for
24 * devices. All mtu based now. 24 * devices. All mtu based now.
25 */ 25 */
26 26
27 #include <linux/capability.h> 27 #include <linux/capability.h>
28 #include <linux/module.h> 28 #include <linux/module.h>
29 #include <linux/moduleparam.h> 29 #include <linux/moduleparam.h>
30 #include <linux/init.h> 30 #include <linux/init.h>
31 #include <linux/net.h> 31 #include <linux/net.h>
32 #include <linux/netdevice.h> 32 #include <linux/netdevice.h>
33 #include <linux/proc_fs.h> 33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h> 34 #include <linux/seq_file.h>
35 #include <linux/timer.h> 35 #include <linux/timer.h>
36 #include <linux/string.h> 36 #include <linux/string.h>
37 #include <linux/if_addr.h> 37 #include <linux/if_addr.h>
38 #include <linux/if_arp.h> 38 #include <linux/if_arp.h>
39 #include <linux/if_ether.h> 39 #include <linux/if_ether.h>
40 #include <linux/skbuff.h> 40 #include <linux/skbuff.h>
41 #include <linux/sysctl.h> 41 #include <linux/sysctl.h>
42 #include <linux/notifier.h> 42 #include <linux/notifier.h>
43 #include <asm/uaccess.h> 43 #include <asm/uaccess.h>
44 #include <asm/system.h> 44 #include <asm/system.h>
45 #include <net/net_namespace.h> 45 #include <net/net_namespace.h>
46 #include <net/neighbour.h> 46 #include <net/neighbour.h>
47 #include <net/dst.h> 47 #include <net/dst.h>
48 #include <net/flow.h> 48 #include <net/flow.h>
49 #include <net/fib_rules.h> 49 #include <net/fib_rules.h>
50 #include <net/netlink.h> 50 #include <net/netlink.h>
51 #include <net/dn.h> 51 #include <net/dn.h>
52 #include <net/dn_dev.h> 52 #include <net/dn_dev.h>
53 #include <net/dn_route.h> 53 #include <net/dn_route.h>
54 #include <net/dn_neigh.h> 54 #include <net/dn_neigh.h>
55 #include <net/dn_fib.h> 55 #include <net/dn_fib.h>
56 56
57 #define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn)) 57 #define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn))
58 58
59 static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00}; 59 static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00};
60 static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00}; 60 static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00};
61 static char dn_hiord[ETH_ALEN] = {0xAA,0x00,0x04,0x00,0x00,0x00}; 61 static char dn_hiord[ETH_ALEN] = {0xAA,0x00,0x04,0x00,0x00,0x00};
62 static unsigned char dn_eco_version[3] = {0x02,0x00,0x00}; 62 static unsigned char dn_eco_version[3] = {0x02,0x00,0x00};
63 63
64 extern struct neigh_table dn_neigh_table; 64 extern struct neigh_table dn_neigh_table;
65 65
66 /* 66 /*
67 * decnet_address is kept in network order. 67 * decnet_address is kept in network order.
68 */ 68 */
69 __le16 decnet_address = 0; 69 __le16 decnet_address = 0;
70 70
71 static DEFINE_RWLOCK(dndev_lock); 71 static DEFINE_RWLOCK(dndev_lock);
72 static struct net_device *decnet_default_device; 72 static struct net_device *decnet_default_device;
73 static BLOCKING_NOTIFIER_HEAD(dnaddr_chain); 73 static BLOCKING_NOTIFIER_HEAD(dnaddr_chain);
74 74
75 static struct dn_dev *dn_dev_create(struct net_device *dev, int *err); 75 static struct dn_dev *dn_dev_create(struct net_device *dev, int *err);
76 static void dn_dev_delete(struct net_device *dev); 76 static void dn_dev_delete(struct net_device *dev);
77 static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa); 77 static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa);
78 78
79 static int dn_eth_up(struct net_device *); 79 static int dn_eth_up(struct net_device *);
80 static void dn_eth_down(struct net_device *); 80 static void dn_eth_down(struct net_device *);
81 static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa); 81 static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa);
82 static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa); 82 static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa);
83 83
84 static struct dn_dev_parms dn_dev_list[] = { 84 static struct dn_dev_parms dn_dev_list[] = {
85 { 85 {
86 .type = ARPHRD_ETHER, /* Ethernet */ 86 .type = ARPHRD_ETHER, /* Ethernet */
87 .mode = DN_DEV_BCAST, 87 .mode = DN_DEV_BCAST,
88 .state = DN_DEV_S_RU, 88 .state = DN_DEV_S_RU,
89 .t2 = 1, 89 .t2 = 1,
90 .t3 = 10, 90 .t3 = 10,
91 .name = "ethernet", 91 .name = "ethernet",
92 .ctl_name = NET_DECNET_CONF_ETHER, 92 .ctl_name = NET_DECNET_CONF_ETHER,
93 .up = dn_eth_up, 93 .up = dn_eth_up,
94 .down = dn_eth_down, 94 .down = dn_eth_down,
95 .timer3 = dn_send_brd_hello, 95 .timer3 = dn_send_brd_hello,
96 }, 96 },
97 { 97 {
98 .type = ARPHRD_IPGRE, /* DECnet tunneled over GRE in IP */ 98 .type = ARPHRD_IPGRE, /* DECnet tunneled over GRE in IP */
99 .mode = DN_DEV_BCAST, 99 .mode = DN_DEV_BCAST,
100 .state = DN_DEV_S_RU, 100 .state = DN_DEV_S_RU,
101 .t2 = 1, 101 .t2 = 1,
102 .t3 = 10, 102 .t3 = 10,
103 .name = "ipgre", 103 .name = "ipgre",
104 .ctl_name = NET_DECNET_CONF_GRE, 104 .ctl_name = NET_DECNET_CONF_GRE,
105 .timer3 = dn_send_brd_hello, 105 .timer3 = dn_send_brd_hello,
106 }, 106 },
107 #if 0 107 #if 0
108 { 108 {
109 .type = ARPHRD_X25, /* Bog standard X.25 */ 109 .type = ARPHRD_X25, /* Bog standard X.25 */
110 .mode = DN_DEV_UCAST, 110 .mode = DN_DEV_UCAST,
111 .state = DN_DEV_S_DS, 111 .state = DN_DEV_S_DS,
112 .t2 = 1, 112 .t2 = 1,
113 .t3 = 120, 113 .t3 = 120,
114 .name = "x25", 114 .name = "x25",
115 .ctl_name = NET_DECNET_CONF_X25, 115 .ctl_name = NET_DECNET_CONF_X25,
116 .timer3 = dn_send_ptp_hello, 116 .timer3 = dn_send_ptp_hello,
117 }, 117 },
118 #endif 118 #endif
119 #if 0 119 #if 0
120 { 120 {
121 .type = ARPHRD_PPP, /* DECnet over PPP */ 121 .type = ARPHRD_PPP, /* DECnet over PPP */
122 .mode = DN_DEV_BCAST, 122 .mode = DN_DEV_BCAST,
123 .state = DN_DEV_S_RU, 123 .state = DN_DEV_S_RU,
124 .t2 = 1, 124 .t2 = 1,
125 .t3 = 10, 125 .t3 = 10,
126 .name = "ppp", 126 .name = "ppp",
127 .ctl_name = NET_DECNET_CONF_PPP, 127 .ctl_name = NET_DECNET_CONF_PPP,
128 .timer3 = dn_send_brd_hello, 128 .timer3 = dn_send_brd_hello,
129 }, 129 },
130 #endif 130 #endif
131 { 131 {
132 .type = ARPHRD_DDCMP, /* DECnet over DDCMP */ 132 .type = ARPHRD_DDCMP, /* DECnet over DDCMP */
133 .mode = DN_DEV_UCAST, 133 .mode = DN_DEV_UCAST,
134 .state = DN_DEV_S_DS, 134 .state = DN_DEV_S_DS,
135 .t2 = 1, 135 .t2 = 1,
136 .t3 = 120, 136 .t3 = 120,
137 .name = "ddcmp", 137 .name = "ddcmp",
138 .ctl_name = NET_DECNET_CONF_DDCMP, 138 .ctl_name = NET_DECNET_CONF_DDCMP,
139 .timer3 = dn_send_ptp_hello, 139 .timer3 = dn_send_ptp_hello,
140 }, 140 },
141 { 141 {
142 .type = ARPHRD_LOOPBACK, /* Loopback interface - always last */ 142 .type = ARPHRD_LOOPBACK, /* Loopback interface - always last */
143 .mode = DN_DEV_BCAST, 143 .mode = DN_DEV_BCAST,
144 .state = DN_DEV_S_RU, 144 .state = DN_DEV_S_RU,
145 .t2 = 1, 145 .t2 = 1,
146 .t3 = 10, 146 .t3 = 10,
147 .name = "loopback", 147 .name = "loopback",
148 .ctl_name = NET_DECNET_CONF_LOOPBACK, 148 .ctl_name = NET_DECNET_CONF_LOOPBACK,
149 .timer3 = dn_send_brd_hello, 149 .timer3 = dn_send_brd_hello,
150 } 150 }
151 }; 151 };
152 152
153 #define DN_DEV_LIST_SIZE ARRAY_SIZE(dn_dev_list) 153 #define DN_DEV_LIST_SIZE ARRAY_SIZE(dn_dev_list)
154 154
155 #define DN_DEV_PARMS_OFFSET(x) offsetof(struct dn_dev_parms, x) 155 #define DN_DEV_PARMS_OFFSET(x) offsetof(struct dn_dev_parms, x)
156 156
157 #ifdef CONFIG_SYSCTL 157 #ifdef CONFIG_SYSCTL
158 158
159 static int min_t2[] = { 1 }; 159 static int min_t2[] = { 1 };
160 static int max_t2[] = { 60 }; /* No max specified, but this seems sensible */ 160 static int max_t2[] = { 60 }; /* No max specified, but this seems sensible */
161 static int min_t3[] = { 1 }; 161 static int min_t3[] = { 1 };
162 static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MULT or T3MULT */ 162 static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MULT or T3MULT */
163 163
164 static int min_priority[1]; 164 static int min_priority[1];
165 static int max_priority[] = { 127 }; /* From DECnet spec */ 165 static int max_priority[] = { 127 }; /* From DECnet spec */
166 166
167 static int dn_forwarding_proc(ctl_table *, int, struct file *, 167 static int dn_forwarding_proc(ctl_table *, int, struct file *,
168 void __user *, size_t *, loff_t *); 168 void __user *, size_t *, loff_t *);
169 static int dn_forwarding_sysctl(ctl_table *table, 169 static int dn_forwarding_sysctl(ctl_table *table,
170 void __user *oldval, size_t __user *oldlenp, 170 void __user *oldval, size_t __user *oldlenp,
171 void __user *newval, size_t newlen); 171 void __user *newval, size_t newlen);
172 172
173 static struct dn_dev_sysctl_table { 173 static struct dn_dev_sysctl_table {
174 struct ctl_table_header *sysctl_header; 174 struct ctl_table_header *sysctl_header;
175 ctl_table dn_dev_vars[5]; 175 ctl_table dn_dev_vars[5];
176 } dn_dev_sysctl = { 176 } dn_dev_sysctl = {
177 NULL, 177 NULL,
178 { 178 {
179 { 179 {
180 .ctl_name = NET_DECNET_CONF_DEV_FORWARDING, 180 .ctl_name = NET_DECNET_CONF_DEV_FORWARDING,
181 .procname = "forwarding", 181 .procname = "forwarding",
182 .data = (void *)DN_DEV_PARMS_OFFSET(forwarding), 182 .data = (void *)DN_DEV_PARMS_OFFSET(forwarding),
183 .maxlen = sizeof(int), 183 .maxlen = sizeof(int),
184 .mode = 0644, 184 .mode = 0644,
185 .proc_handler = dn_forwarding_proc, 185 .proc_handler = dn_forwarding_proc,
186 .strategy = dn_forwarding_sysctl, 186 .strategy = dn_forwarding_sysctl,
187 }, 187 },
188 { 188 {
189 .ctl_name = NET_DECNET_CONF_DEV_PRIORITY, 189 .ctl_name = NET_DECNET_CONF_DEV_PRIORITY,
190 .procname = "priority", 190 .procname = "priority",
191 .data = (void *)DN_DEV_PARMS_OFFSET(priority), 191 .data = (void *)DN_DEV_PARMS_OFFSET(priority),
192 .maxlen = sizeof(int), 192 .maxlen = sizeof(int),
193 .mode = 0644, 193 .mode = 0644,
194 .proc_handler = proc_dointvec_minmax, 194 .proc_handler = proc_dointvec_minmax,
195 .strategy = sysctl_intvec, 195 .strategy = sysctl_intvec,
196 .extra1 = &min_priority, 196 .extra1 = &min_priority,
197 .extra2 = &max_priority 197 .extra2 = &max_priority
198 }, 198 },
199 { 199 {
200 .ctl_name = NET_DECNET_CONF_DEV_T2, 200 .ctl_name = NET_DECNET_CONF_DEV_T2,
201 .procname = "t2", 201 .procname = "t2",
202 .data = (void *)DN_DEV_PARMS_OFFSET(t2), 202 .data = (void *)DN_DEV_PARMS_OFFSET(t2),
203 .maxlen = sizeof(int), 203 .maxlen = sizeof(int),
204 .mode = 0644, 204 .mode = 0644,
205 .proc_handler = proc_dointvec_minmax, 205 .proc_handler = proc_dointvec_minmax,
206 .strategy = sysctl_intvec, 206 .strategy = sysctl_intvec,
207 .extra1 = &min_t2, 207 .extra1 = &min_t2,
208 .extra2 = &max_t2 208 .extra2 = &max_t2
209 }, 209 },
210 { 210 {
211 .ctl_name = NET_DECNET_CONF_DEV_T3, 211 .ctl_name = NET_DECNET_CONF_DEV_T3,
212 .procname = "t3", 212 .procname = "t3",
213 .data = (void *)DN_DEV_PARMS_OFFSET(t3), 213 .data = (void *)DN_DEV_PARMS_OFFSET(t3),
214 .maxlen = sizeof(int), 214 .maxlen = sizeof(int),
215 .mode = 0644, 215 .mode = 0644,
216 .proc_handler = proc_dointvec_minmax, 216 .proc_handler = proc_dointvec_minmax,
217 .strategy = sysctl_intvec, 217 .strategy = sysctl_intvec,
218 .extra1 = &min_t3, 218 .extra1 = &min_t3,
219 .extra2 = &max_t3 219 .extra2 = &max_t3
220 }, 220 },
221 {0} 221 {0}
222 }, 222 },
223 }; 223 };
224 224
225 static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms) 225 static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms)
226 { 226 {
227 struct dn_dev_sysctl_table *t; 227 struct dn_dev_sysctl_table *t;
228 int i; 228 int i;
229 229
230 #define DN_CTL_PATH_DEV 3 230 #define DN_CTL_PATH_DEV 3
231 231
232 struct ctl_path dn_ctl_path[] = { 232 struct ctl_path dn_ctl_path[] = {
233 { .procname = "net", .ctl_name = CTL_NET, }, 233 { .procname = "net", .ctl_name = CTL_NET, },
234 { .procname = "decnet", .ctl_name = NET_DECNET, }, 234 { .procname = "decnet", .ctl_name = NET_DECNET, },
235 { .procname = "conf", .ctl_name = NET_DECNET_CONF, }, 235 { .procname = "conf", .ctl_name = NET_DECNET_CONF, },
236 { /* to be set */ }, 236 { /* to be set */ },
237 { }, 237 { },
238 }; 238 };
239 239
240 t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL); 240 t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL);
241 if (t == NULL) 241 if (t == NULL)
242 return; 242 return;
243 243
244 for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) { 244 for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) {
245 long offset = (long)t->dn_dev_vars[i].data; 245 long offset = (long)t->dn_dev_vars[i].data;
246 t->dn_dev_vars[i].data = ((char *)parms) + offset; 246 t->dn_dev_vars[i].data = ((char *)parms) + offset;
247 } 247 }
248 248
249 if (dev) { 249 if (dev) {
250 dn_ctl_path[DN_CTL_PATH_DEV].procname = dev->name; 250 dn_ctl_path[DN_CTL_PATH_DEV].procname = dev->name;
251 dn_ctl_path[DN_CTL_PATH_DEV].ctl_name = dev->ifindex; 251 dn_ctl_path[DN_CTL_PATH_DEV].ctl_name = dev->ifindex;
252 } else { 252 } else {
253 dn_ctl_path[DN_CTL_PATH_DEV].procname = parms->name; 253 dn_ctl_path[DN_CTL_PATH_DEV].procname = parms->name;
254 dn_ctl_path[DN_CTL_PATH_DEV].ctl_name = parms->ctl_name; 254 dn_ctl_path[DN_CTL_PATH_DEV].ctl_name = parms->ctl_name;
255 } 255 }
256 256
257 t->dn_dev_vars[0].extra1 = (void *)dev; 257 t->dn_dev_vars[0].extra1 = (void *)dev;
258 258
259 t->sysctl_header = register_sysctl_paths(dn_ctl_path, t->dn_dev_vars); 259 t->sysctl_header = register_sysctl_paths(dn_ctl_path, t->dn_dev_vars);
260 if (t->sysctl_header == NULL) 260 if (t->sysctl_header == NULL)
261 kfree(t); 261 kfree(t);
262 else 262 else
263 parms->sysctl = t; 263 parms->sysctl = t;
264 } 264 }
265 265
266 static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms) 266 static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
267 { 267 {
268 if (parms->sysctl) { 268 if (parms->sysctl) {
269 struct dn_dev_sysctl_table *t = parms->sysctl; 269 struct dn_dev_sysctl_table *t = parms->sysctl;
270 parms->sysctl = NULL; 270 parms->sysctl = NULL;
271 unregister_sysctl_table(t->sysctl_header); 271 unregister_sysctl_table(t->sysctl_header);
272 kfree(t); 272 kfree(t);
273 } 273 }
274 } 274 }
275 275
276 static int dn_forwarding_proc(ctl_table *table, int write, 276 static int dn_forwarding_proc(ctl_table *table, int write,
277 struct file *filep, 277 struct file *filep,
278 void __user *buffer, 278 void __user *buffer,
279 size_t *lenp, loff_t *ppos) 279 size_t *lenp, loff_t *ppos)
280 { 280 {
281 #ifdef CONFIG_DECNET_ROUTER 281 #ifdef CONFIG_DECNET_ROUTER
282 struct net_device *dev = table->extra1; 282 struct net_device *dev = table->extra1;
283 struct dn_dev *dn_db; 283 struct dn_dev *dn_db;
284 int err; 284 int err;
285 int tmp, old; 285 int tmp, old;
286 286
287 if (table->extra1 == NULL) 287 if (table->extra1 == NULL)
288 return -EINVAL; 288 return -EINVAL;
289 289
290 dn_db = dev->dn_ptr; 290 dn_db = dev->dn_ptr;
291 old = dn_db->parms.forwarding; 291 old = dn_db->parms.forwarding;
292 292
293 err = proc_dointvec(table, write, filep, buffer, lenp, ppos); 293 err = proc_dointvec(table, write, filep, buffer, lenp, ppos);
294 294
295 if ((err >= 0) && write) { 295 if ((err >= 0) && write) {
296 if (dn_db->parms.forwarding < 0) 296 if (dn_db->parms.forwarding < 0)
297 dn_db->parms.forwarding = 0; 297 dn_db->parms.forwarding = 0;
298 if (dn_db->parms.forwarding > 2) 298 if (dn_db->parms.forwarding > 2)
299 dn_db->parms.forwarding = 2; 299 dn_db->parms.forwarding = 2;
300 /* 300 /*
301 * What an ugly hack this is... its works, just. It 301 * What an ugly hack this is... its works, just. It
302 * would be nice if sysctl/proc were just that little 302 * would be nice if sysctl/proc were just that little
303 * bit more flexible so I don't have to write a special 303 * bit more flexible so I don't have to write a special
304 * routine, or suffer hacks like this - SJW 304 * routine, or suffer hacks like this - SJW
305 */ 305 */
306 tmp = dn_db->parms.forwarding; 306 tmp = dn_db->parms.forwarding;
307 dn_db->parms.forwarding = old; 307 dn_db->parms.forwarding = old;
308 if (dn_db->parms.down) 308 if (dn_db->parms.down)
309 dn_db->parms.down(dev); 309 dn_db->parms.down(dev);
310 dn_db->parms.forwarding = tmp; 310 dn_db->parms.forwarding = tmp;
311 if (dn_db->parms.up) 311 if (dn_db->parms.up)
312 dn_db->parms.up(dev); 312 dn_db->parms.up(dev);
313 } 313 }
314 314
315 return err; 315 return err;
316 #else 316 #else
317 return -EINVAL; 317 return -EINVAL;
318 #endif 318 #endif
319 } 319 }
320 320
321 static int dn_forwarding_sysctl(ctl_table *table, 321 static int dn_forwarding_sysctl(ctl_table *table,
322 void __user *oldval, size_t __user *oldlenp, 322 void __user *oldval, size_t __user *oldlenp,
323 void __user *newval, size_t newlen) 323 void __user *newval, size_t newlen)
324 { 324 {
325 #ifdef CONFIG_DECNET_ROUTER 325 #ifdef CONFIG_DECNET_ROUTER
326 struct net_device *dev = table->extra1; 326 struct net_device *dev = table->extra1;
327 struct dn_dev *dn_db; 327 struct dn_dev *dn_db;
328 int value; 328 int value;
329 329
330 if (table->extra1 == NULL) 330 if (table->extra1 == NULL)
331 return -EINVAL; 331 return -EINVAL;
332 332
333 dn_db = dev->dn_ptr; 333 dn_db = dev->dn_ptr;
334 334
335 if (newval && newlen) { 335 if (newval && newlen) {
336 if (newlen != sizeof(int)) 336 if (newlen != sizeof(int))
337 return -EINVAL; 337 return -EINVAL;
338 338
339 if (get_user(value, (int __user *)newval)) 339 if (get_user(value, (int __user *)newval))
340 return -EFAULT; 340 return -EFAULT;
341 if (value < 0) 341 if (value < 0)
342 return -EINVAL; 342 return -EINVAL;
343 if (value > 2) 343 if (value > 2)
344 return -EINVAL; 344 return -EINVAL;
345 345
346 if (dn_db->parms.down) 346 if (dn_db->parms.down)
347 dn_db->parms.down(dev); 347 dn_db->parms.down(dev);
348 dn_db->parms.forwarding = value; 348 dn_db->parms.forwarding = value;
349 if (dn_db->parms.up) 349 if (dn_db->parms.up)
350 dn_db->parms.up(dev); 350 dn_db->parms.up(dev);
351 } 351 }
352 352
353 return 0; 353 return 0;
354 #else 354 #else
355 return -EINVAL; 355 return -EINVAL;
356 #endif 356 #endif
357 } 357 }
358 358
359 #else /* CONFIG_SYSCTL */ 359 #else /* CONFIG_SYSCTL */
360 static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms) 360 static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
361 { 361 {
362 } 362 }
363 static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms) 363 static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms)
364 { 364 {
365 } 365 }
366 366
367 #endif /* CONFIG_SYSCTL */ 367 #endif /* CONFIG_SYSCTL */
368 368
369 static inline __u16 mtu2blksize(struct net_device *dev) 369 static inline __u16 mtu2blksize(struct net_device *dev)
370 { 370 {
371 u32 blksize = dev->mtu; 371 u32 blksize = dev->mtu;
372 if (blksize > 0xffff) 372 if (blksize > 0xffff)
373 blksize = 0xffff; 373 blksize = 0xffff;
374 374
375 if (dev->type == ARPHRD_ETHER || 375 if (dev->type == ARPHRD_ETHER ||
376 dev->type == ARPHRD_PPP || 376 dev->type == ARPHRD_PPP ||
377 dev->type == ARPHRD_IPGRE || 377 dev->type == ARPHRD_IPGRE ||
378 dev->type == ARPHRD_LOOPBACK) 378 dev->type == ARPHRD_LOOPBACK)
379 blksize -= 2; 379 blksize -= 2;
380 380
381 return (__u16)blksize; 381 return (__u16)blksize;
382 } 382 }
383 383
384 static struct dn_ifaddr *dn_dev_alloc_ifa(void) 384 static struct dn_ifaddr *dn_dev_alloc_ifa(void)
385 { 385 {
386 struct dn_ifaddr *ifa; 386 struct dn_ifaddr *ifa;
387 387
388 ifa = kzalloc(sizeof(*ifa), GFP_KERNEL); 388 ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
389 389
390 return ifa; 390 return ifa;
391 } 391 }
392 392
393 static __inline__ void dn_dev_free_ifa(struct dn_ifaddr *ifa) 393 static __inline__ void dn_dev_free_ifa(struct dn_ifaddr *ifa)
394 { 394 {
395 kfree(ifa); 395 kfree(ifa);
396 } 396 }
397 397
398 static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int destroy) 398 static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int destroy)
399 { 399 {
400 struct dn_ifaddr *ifa1 = *ifap; 400 struct dn_ifaddr *ifa1 = *ifap;
401 unsigned char mac_addr[6]; 401 unsigned char mac_addr[6];
402 struct net_device *dev = dn_db->dev; 402 struct net_device *dev = dn_db->dev;
403 403
404 ASSERT_RTNL(); 404 ASSERT_RTNL();
405 405
406 *ifap = ifa1->ifa_next; 406 *ifap = ifa1->ifa_next;
407 407
408 if (dn_db->dev->type == ARPHRD_ETHER) { 408 if (dn_db->dev->type == ARPHRD_ETHER) {
409 if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { 409 if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) {
410 dn_dn2eth(mac_addr, ifa1->ifa_local); 410 dn_dn2eth(mac_addr, ifa1->ifa_local);
411 dev_mc_delete(dev, mac_addr, ETH_ALEN, 0); 411 dev_mc_delete(dev, mac_addr, ETH_ALEN, 0);
412 } 412 }
413 } 413 }
414 414
415 dn_ifaddr_notify(RTM_DELADDR, ifa1); 415 dn_ifaddr_notify(RTM_DELADDR, ifa1);
416 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1); 416 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1);
417 if (destroy) { 417 if (destroy) {
418 dn_dev_free_ifa(ifa1); 418 dn_dev_free_ifa(ifa1);
419 419
420 if (dn_db->ifa_list == NULL) 420 if (dn_db->ifa_list == NULL)
421 dn_dev_delete(dn_db->dev); 421 dn_dev_delete(dn_db->dev);
422 } 422 }
423 } 423 }
424 424
425 static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa) 425 static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
426 { 426 {
427 struct net_device *dev = dn_db->dev; 427 struct net_device *dev = dn_db->dev;
428 struct dn_ifaddr *ifa1; 428 struct dn_ifaddr *ifa1;
429 unsigned char mac_addr[6]; 429 unsigned char mac_addr[6];
430 430
431 ASSERT_RTNL(); 431 ASSERT_RTNL();
432 432
433 /* Check for duplicates */ 433 /* Check for duplicates */
434 for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) { 434 for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
435 if (ifa1->ifa_local == ifa->ifa_local) 435 if (ifa1->ifa_local == ifa->ifa_local)
436 return -EEXIST; 436 return -EEXIST;
437 } 437 }
438 438
439 if (dev->type == ARPHRD_ETHER) { 439 if (dev->type == ARPHRD_ETHER) {
440 if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { 440 if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) {
441 dn_dn2eth(mac_addr, ifa->ifa_local); 441 dn_dn2eth(mac_addr, ifa->ifa_local);
442 dev_mc_add(dev, mac_addr, ETH_ALEN, 0); 442 dev_mc_add(dev, mac_addr, ETH_ALEN, 0);
443 } 443 }
444 } 444 }
445 445
446 ifa->ifa_next = dn_db->ifa_list; 446 ifa->ifa_next = dn_db->ifa_list;
447 dn_db->ifa_list = ifa; 447 dn_db->ifa_list = ifa;
448 448
449 dn_ifaddr_notify(RTM_NEWADDR, ifa); 449 dn_ifaddr_notify(RTM_NEWADDR, ifa);
450 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa); 450 blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
451 451
452 return 0; 452 return 0;
453 } 453 }
454 454
455 static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa) 455 static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa)
456 { 456 {
457 struct dn_dev *dn_db = dev->dn_ptr; 457 struct dn_dev *dn_db = dev->dn_ptr;
458 int rv; 458 int rv;
459 459
460 if (dn_db == NULL) { 460 if (dn_db == NULL) {
461 int err; 461 int err;
462 dn_db = dn_dev_create(dev, &err); 462 dn_db = dn_dev_create(dev, &err);
463 if (dn_db == NULL) 463 if (dn_db == NULL)
464 return err; 464 return err;
465 } 465 }
466 466
467 ifa->ifa_dev = dn_db; 467 ifa->ifa_dev = dn_db;
468 468
469 if (dev->flags & IFF_LOOPBACK) 469 if (dev->flags & IFF_LOOPBACK)
470 ifa->ifa_scope = RT_SCOPE_HOST; 470 ifa->ifa_scope = RT_SCOPE_HOST;
471 471
472 rv = dn_dev_insert_ifa(dn_db, ifa); 472 rv = dn_dev_insert_ifa(dn_db, ifa);
473 if (rv) 473 if (rv)
474 dn_dev_free_ifa(ifa); 474 dn_dev_free_ifa(ifa);
475 return rv; 475 return rv;
476 } 476 }
477 477
478 478
479 int dn_dev_ioctl(unsigned int cmd, void __user *arg) 479 int dn_dev_ioctl(unsigned int cmd, void __user *arg)
480 { 480 {
481 char buffer[DN_IFREQ_SIZE]; 481 char buffer[DN_IFREQ_SIZE];
482 struct ifreq *ifr = (struct ifreq *)buffer; 482 struct ifreq *ifr = (struct ifreq *)buffer;
483 struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr; 483 struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr;
484 struct dn_dev *dn_db; 484 struct dn_dev *dn_db;
485 struct net_device *dev; 485 struct net_device *dev;
486 struct dn_ifaddr *ifa = NULL, **ifap = NULL; 486 struct dn_ifaddr *ifa = NULL, **ifap = NULL;
487 int ret = 0; 487 int ret = 0;
488 488
489 if (copy_from_user(ifr, arg, DN_IFREQ_SIZE)) 489 if (copy_from_user(ifr, arg, DN_IFREQ_SIZE))
490 return -EFAULT; 490 return -EFAULT;
491 ifr->ifr_name[IFNAMSIZ-1] = 0; 491 ifr->ifr_name[IFNAMSIZ-1] = 0;
492 492
493 dev_load(&init_net, ifr->ifr_name); 493 dev_load(&init_net, ifr->ifr_name);
494 494
495 switch(cmd) { 495 switch(cmd) {
496 case SIOCGIFADDR: 496 case SIOCGIFADDR:
497 break; 497 break;
498 case SIOCSIFADDR: 498 case SIOCSIFADDR:
499 if (!capable(CAP_NET_ADMIN)) 499 if (!capable(CAP_NET_ADMIN))
500 return -EACCES; 500 return -EACCES;
501 if (sdn->sdn_family != AF_DECnet) 501 if (sdn->sdn_family != AF_DECnet)
502 return -EINVAL; 502 return -EINVAL;
503 break; 503 break;
504 default: 504 default:
505 return -EINVAL; 505 return -EINVAL;
506 } 506 }
507 507
508 rtnl_lock(); 508 rtnl_lock();
509 509
510 if ((dev = __dev_get_by_name(&init_net, ifr->ifr_name)) == NULL) { 510 if ((dev = __dev_get_by_name(&init_net, ifr->ifr_name)) == NULL) {
511 ret = -ENODEV; 511 ret = -ENODEV;
512 goto done; 512 goto done;
513 } 513 }
514 514
515 if ((dn_db = dev->dn_ptr) != NULL) { 515 if ((dn_db = dev->dn_ptr) != NULL) {
516 for (ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next) 516 for (ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next)
517 if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0) 517 if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0)
518 break; 518 break;
519 } 519 }
520 520
521 if (ifa == NULL && cmd != SIOCSIFADDR) { 521 if (ifa == NULL && cmd != SIOCSIFADDR) {
522 ret = -EADDRNOTAVAIL; 522 ret = -EADDRNOTAVAIL;
523 goto done; 523 goto done;
524 } 524 }
525 525
526 switch(cmd) { 526 switch(cmd) {
527 case SIOCGIFADDR: 527 case SIOCGIFADDR:
528 *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local; 528 *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local;
529 goto rarok; 529 goto rarok;
530 530
531 case SIOCSIFADDR: 531 case SIOCSIFADDR:
532 if (!ifa) { 532 if (!ifa) {
533 if ((ifa = dn_dev_alloc_ifa()) == NULL) { 533 if ((ifa = dn_dev_alloc_ifa()) == NULL) {
534 ret = -ENOBUFS; 534 ret = -ENOBUFS;
535 break; 535 break;
536 } 536 }
537 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); 537 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
538 } else { 538 } else {
539 if (ifa->ifa_local == dn_saddr2dn(sdn)) 539 if (ifa->ifa_local == dn_saddr2dn(sdn))
540 break; 540 break;
541 dn_dev_del_ifa(dn_db, ifap, 0); 541 dn_dev_del_ifa(dn_db, ifap, 0);
542 } 542 }
543 543
544 ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn); 544 ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn);
545 545
546 ret = dn_dev_set_ifa(dev, ifa); 546 ret = dn_dev_set_ifa(dev, ifa);
547 } 547 }
548 done: 548 done:
549 rtnl_unlock(); 549 rtnl_unlock();
550 550
551 return ret; 551 return ret;
552 rarok: 552 rarok:
553 if (copy_to_user(arg, ifr, DN_IFREQ_SIZE)) 553 if (copy_to_user(arg, ifr, DN_IFREQ_SIZE))
554 ret = -EFAULT; 554 ret = -EFAULT;
555 goto done; 555 goto done;
556 } 556 }
557 557
558 struct net_device *dn_dev_get_default(void) 558 struct net_device *dn_dev_get_default(void)
559 { 559 {
560 struct net_device *dev; 560 struct net_device *dev;
561 read_lock(&dndev_lock); 561 read_lock(&dndev_lock);
562 dev = decnet_default_device; 562 dev = decnet_default_device;
563 if (dev) { 563 if (dev) {
564 if (dev->dn_ptr) 564 if (dev->dn_ptr)
565 dev_hold(dev); 565 dev_hold(dev);
566 else 566 else
567 dev = NULL; 567 dev = NULL;
568 } 568 }
569 read_unlock(&dndev_lock); 569 read_unlock(&dndev_lock);
570 return dev; 570 return dev;
571 } 571 }
572 572
573 int dn_dev_set_default(struct net_device *dev, int force) 573 int dn_dev_set_default(struct net_device *dev, int force)
574 { 574 {
575 struct net_device *old = NULL; 575 struct net_device *old = NULL;
576 int rv = -EBUSY; 576 int rv = -EBUSY;
577 if (!dev->dn_ptr) 577 if (!dev->dn_ptr)
578 return -ENODEV; 578 return -ENODEV;
579 write_lock(&dndev_lock); 579 write_lock(&dndev_lock);
580 if (force || decnet_default_device == NULL) { 580 if (force || decnet_default_device == NULL) {
581 old = decnet_default_device; 581 old = decnet_default_device;
582 decnet_default_device = dev; 582 decnet_default_device = dev;
583 rv = 0; 583 rv = 0;
584 } 584 }
585 write_unlock(&dndev_lock); 585 write_unlock(&dndev_lock);
586 if (old) 586 if (old)
587 dev_put(old); 587 dev_put(old);
588 return rv; 588 return rv;
589 } 589 }
590 590
591 static void dn_dev_check_default(struct net_device *dev) 591 static void dn_dev_check_default(struct net_device *dev)
592 { 592 {
593 write_lock(&dndev_lock); 593 write_lock(&dndev_lock);
594 if (dev == decnet_default_device) { 594 if (dev == decnet_default_device) {
595 decnet_default_device = NULL; 595 decnet_default_device = NULL;
596 } else { 596 } else {
597 dev = NULL; 597 dev = NULL;
598 } 598 }
599 write_unlock(&dndev_lock); 599 write_unlock(&dndev_lock);
600 if (dev) 600 if (dev)
601 dev_put(dev); 601 dev_put(dev);
602 } 602 }
603 603
604 static struct dn_dev *dn_dev_by_index(int ifindex) 604 static struct dn_dev *dn_dev_by_index(int ifindex)
605 { 605 {
606 struct net_device *dev; 606 struct net_device *dev;
607 struct dn_dev *dn_dev = NULL; 607 struct dn_dev *dn_dev = NULL;
608 dev = dev_get_by_index(&init_net, ifindex); 608 dev = dev_get_by_index(&init_net, ifindex);
609 if (dev) { 609 if (dev) {
610 dn_dev = dev->dn_ptr; 610 dn_dev = dev->dn_ptr;
611 dev_put(dev); 611 dev_put(dev);
612 } 612 }
613 613
614 return dn_dev; 614 return dn_dev;
615 } 615 }
616 616
617 static const struct nla_policy dn_ifa_policy[IFA_MAX+1] = { 617 static const struct nla_policy dn_ifa_policy[IFA_MAX+1] = {
618 [IFA_ADDRESS] = { .type = NLA_U16 }, 618 [IFA_ADDRESS] = { .type = NLA_U16 },
619 [IFA_LOCAL] = { .type = NLA_U16 }, 619 [IFA_LOCAL] = { .type = NLA_U16 },
620 [IFA_LABEL] = { .type = NLA_STRING, 620 [IFA_LABEL] = { .type = NLA_STRING,
621 .len = IFNAMSIZ - 1 }, 621 .len = IFNAMSIZ - 1 },
622 }; 622 };
623 623
624 static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 624 static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
625 { 625 {
626 struct net *net = sock_net(skb->sk); 626 struct net *net = sock_net(skb->sk);
627 struct nlattr *tb[IFA_MAX+1]; 627 struct nlattr *tb[IFA_MAX+1];
628 struct dn_dev *dn_db; 628 struct dn_dev *dn_db;
629 struct ifaddrmsg *ifm; 629 struct ifaddrmsg *ifm;
630 struct dn_ifaddr *ifa, **ifap; 630 struct dn_ifaddr *ifa, **ifap;
631 int err = -EINVAL; 631 int err = -EINVAL;
632 632
633 if (net != &init_net) 633 if (net != &init_net)
634 goto errout; 634 goto errout;
635 635
636 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); 636 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
637 if (err < 0) 637 if (err < 0)
638 goto errout; 638 goto errout;
639 639
640 err = -ENODEV; 640 err = -ENODEV;
641 ifm = nlmsg_data(nlh); 641 ifm = nlmsg_data(nlh);
642 if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL) 642 if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL)
643 goto errout; 643 goto errout;
644 644
645 err = -EADDRNOTAVAIL; 645 err = -EADDRNOTAVAIL;
646 for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) { 646 for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) {
647 if (tb[IFA_LOCAL] && 647 if (tb[IFA_LOCAL] &&
648 nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) 648 nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
649 continue; 649 continue;
650 650
651 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) 651 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
652 continue; 652 continue;
653 653
654 dn_dev_del_ifa(dn_db, ifap, 1); 654 dn_dev_del_ifa(dn_db, ifap, 1);
655 return 0; 655 return 0;
656 } 656 }
657 657
658 errout: 658 errout:
659 return err; 659 return err;
660 } 660 }
661 661
662 static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 662 static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
663 { 663 {
664 struct net *net = sock_net(skb->sk); 664 struct net *net = sock_net(skb->sk);
665 struct nlattr *tb[IFA_MAX+1]; 665 struct nlattr *tb[IFA_MAX+1];
666 struct net_device *dev; 666 struct net_device *dev;
667 struct dn_dev *dn_db; 667 struct dn_dev *dn_db;
668 struct ifaddrmsg *ifm; 668 struct ifaddrmsg *ifm;
669 struct dn_ifaddr *ifa; 669 struct dn_ifaddr *ifa;
670 int err; 670 int err;
671 671
672 if (net != &init_net) 672 if (net != &init_net)
673 return -EINVAL; 673 return -EINVAL;
674 674
675 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); 675 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
676 if (err < 0) 676 if (err < 0)
677 return err; 677 return err;
678 678
679 if (tb[IFA_LOCAL] == NULL) 679 if (tb[IFA_LOCAL] == NULL)
680 return -EINVAL; 680 return -EINVAL;
681 681
682 ifm = nlmsg_data(nlh); 682 ifm = nlmsg_data(nlh);
683 if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL) 683 if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL)
684 return -ENODEV; 684 return -ENODEV;
685 685
686 if ((dn_db = dev->dn_ptr) == NULL) { 686 if ((dn_db = dev->dn_ptr) == NULL) {
687 int err; 687 int err;
688 dn_db = dn_dev_create(dev, &err); 688 dn_db = dn_dev_create(dev, &err);
689 if (!dn_db) 689 if (!dn_db)
690 return err; 690 return err;
691 } 691 }
692 692
693 if ((ifa = dn_dev_alloc_ifa()) == NULL) 693 if ((ifa = dn_dev_alloc_ifa()) == NULL)
694 return -ENOBUFS; 694 return -ENOBUFS;
695 695
696 if (tb[IFA_ADDRESS] == NULL) 696 if (tb[IFA_ADDRESS] == NULL)
697 tb[IFA_ADDRESS] = tb[IFA_LOCAL]; 697 tb[IFA_ADDRESS] = tb[IFA_LOCAL];
698 698
699 ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]); 699 ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]);
700 ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]); 700 ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]);
701 ifa->ifa_flags = ifm->ifa_flags; 701 ifa->ifa_flags = ifm->ifa_flags;
702 ifa->ifa_scope = ifm->ifa_scope; 702 ifa->ifa_scope = ifm->ifa_scope;
703 ifa->ifa_dev = dn_db; 703 ifa->ifa_dev = dn_db;
704 704
705 if (tb[IFA_LABEL]) 705 if (tb[IFA_LABEL])
706 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); 706 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
707 else 707 else
708 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ); 708 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
709 709
710 err = dn_dev_insert_ifa(dn_db, ifa); 710 err = dn_dev_insert_ifa(dn_db, ifa);
711 if (err) 711 if (err)
712 dn_dev_free_ifa(ifa); 712 dn_dev_free_ifa(ifa);
713 713
714 return err; 714 return err;
715 } 715 }
716 716
717 static inline size_t dn_ifaddr_nlmsg_size(void) 717 static inline size_t dn_ifaddr_nlmsg_size(void)
718 { 718 {
719 return NLMSG_ALIGN(sizeof(struct ifaddrmsg)) 719 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
720 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */ 720 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
721 + nla_total_size(2) /* IFA_ADDRESS */ 721 + nla_total_size(2) /* IFA_ADDRESS */
722 + nla_total_size(2); /* IFA_LOCAL */ 722 + nla_total_size(2); /* IFA_LOCAL */
723 } 723 }
724 724
725 static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa, 725 static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
726 u32 pid, u32 seq, int event, unsigned int flags) 726 u32 pid, u32 seq, int event, unsigned int flags)
727 { 727 {
728 struct ifaddrmsg *ifm; 728 struct ifaddrmsg *ifm;
729 struct nlmsghdr *nlh; 729 struct nlmsghdr *nlh;
730 730
731 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags); 731 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
732 if (nlh == NULL) 732 if (nlh == NULL)
733 return -EMSGSIZE; 733 return -EMSGSIZE;
734 734
735 ifm = nlmsg_data(nlh); 735 ifm = nlmsg_data(nlh);
736 ifm->ifa_family = AF_DECnet; 736 ifm->ifa_family = AF_DECnet;
737 ifm->ifa_prefixlen = 16; 737 ifm->ifa_prefixlen = 16;
738 ifm->ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT; 738 ifm->ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT;
739 ifm->ifa_scope = ifa->ifa_scope; 739 ifm->ifa_scope = ifa->ifa_scope;
740 ifm->ifa_index = ifa->ifa_dev->dev->ifindex; 740 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
741 741
742 if (ifa->ifa_address) 742 if (ifa->ifa_address)
743 NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address); 743 NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address);
744 if (ifa->ifa_local) 744 if (ifa->ifa_local)
745 NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local); 745 NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local);
746 if (ifa->ifa_label[0]) 746 if (ifa->ifa_label[0])
747 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label); 747 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
748 748
749 return nlmsg_end(skb, nlh); 749 return nlmsg_end(skb, nlh);
750 750
751 nla_put_failure: 751 nla_put_failure:
752 nlmsg_cancel(skb, nlh); 752 nlmsg_cancel(skb, nlh);
753 return -EMSGSIZE; 753 return -EMSGSIZE;
754 } 754 }
755 755
756 static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa) 756 static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa)
757 { 757 {
758 struct sk_buff *skb; 758 struct sk_buff *skb;
759 int err = -ENOBUFS; 759 int err = -ENOBUFS;
760 760
761 skb = alloc_skb(dn_ifaddr_nlmsg_size(), GFP_KERNEL); 761 skb = alloc_skb(dn_ifaddr_nlmsg_size(), GFP_KERNEL);
762 if (skb == NULL) 762 if (skb == NULL)
763 goto errout; 763 goto errout;
764 764
765 err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0); 765 err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0);
766 if (err < 0) { 766 if (err < 0) {
767 /* -EMSGSIZE implies BUG in dn_ifaddr_nlmsg_size() */ 767 /* -EMSGSIZE implies BUG in dn_ifaddr_nlmsg_size() */
768 WARN_ON(err == -EMSGSIZE); 768 WARN_ON(err == -EMSGSIZE);
769 kfree_skb(skb); 769 kfree_skb(skb);
770 goto errout; 770 goto errout;
771 } 771 }
772 err = rtnl_notify(skb, &init_net, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL); 772 err = rtnl_notify(skb, &init_net, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL);
773 errout: 773 errout:
774 if (err < 0) 774 if (err < 0)
775 rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_IFADDR, err); 775 rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_IFADDR, err);
776 } 776 }
777 777
778 static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) 778 static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
779 { 779 {
780 struct net *net = sock_net(skb->sk); 780 struct net *net = sock_net(skb->sk);
781 int idx, dn_idx = 0, skip_ndevs, skip_naddr; 781 int idx, dn_idx = 0, skip_ndevs, skip_naddr;
782 struct net_device *dev; 782 struct net_device *dev;
783 struct dn_dev *dn_db; 783 struct dn_dev *dn_db;
784 struct dn_ifaddr *ifa; 784 struct dn_ifaddr *ifa;
785 785
786 if (net != &init_net) 786 if (net != &init_net)
787 return 0; 787 return 0;
788 788
789 skip_ndevs = cb->args[0]; 789 skip_ndevs = cb->args[0];
790 skip_naddr = cb->args[1]; 790 skip_naddr = cb->args[1];
791 791
792 idx = 0; 792 idx = 0;
793 for_each_netdev(&init_net, dev) { 793 for_each_netdev(&init_net, dev) {
794 if (idx < skip_ndevs) 794 if (idx < skip_ndevs)
795 goto cont; 795 goto cont;
796 else if (idx > skip_ndevs) { 796 else if (idx > skip_ndevs) {
797 /* Only skip over addresses for first dev dumped 797 /* Only skip over addresses for first dev dumped
798 * in this iteration (idx == skip_ndevs) */ 798 * in this iteration (idx == skip_ndevs) */
799 skip_naddr = 0; 799 skip_naddr = 0;
800 } 800 }
801 801
802 if ((dn_db = dev->dn_ptr) == NULL) 802 if ((dn_db = dev->dn_ptr) == NULL)
803 goto cont; 803 goto cont;
804 804
805 for (ifa = dn_db->ifa_list, dn_idx = 0; ifa; 805 for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
806 ifa = ifa->ifa_next, dn_idx++) { 806 ifa = ifa->ifa_next, dn_idx++) {
807 if (dn_idx < skip_naddr) 807 if (dn_idx < skip_naddr)
808 continue; 808 continue;
809 809
810 if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid, 810 if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
811 cb->nlh->nlmsg_seq, RTM_NEWADDR, 811 cb->nlh->nlmsg_seq, RTM_NEWADDR,
812 NLM_F_MULTI) < 0) 812 NLM_F_MULTI) < 0)
813 goto done; 813 goto done;
814 } 814 }
815 cont: 815 cont:
816 idx++; 816 idx++;
817 } 817 }
818 done: 818 done:
819 cb->args[0] = idx; 819 cb->args[0] = idx;
820 cb->args[1] = dn_idx; 820 cb->args[1] = dn_idx;
821 821
822 return skb->len; 822 return skb->len;
823 } 823 }
824 824
825 static int dn_dev_get_first(struct net_device *dev, __le16 *addr) 825 static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
826 { 826 {
827 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 827 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
828 struct dn_ifaddr *ifa; 828 struct dn_ifaddr *ifa;
829 int rv = -ENODEV; 829 int rv = -ENODEV;
830 if (dn_db == NULL) 830 if (dn_db == NULL)
831 goto out; 831 goto out;
832 ifa = dn_db->ifa_list; 832 ifa = dn_db->ifa_list;
833 if (ifa != NULL) { 833 if (ifa != NULL) {
834 *addr = ifa->ifa_local; 834 *addr = ifa->ifa_local;
835 rv = 0; 835 rv = 0;
836 } 836 }
837 out: 837 out:
838 return rv; 838 return rv;
839 } 839 }
840 840
841 /* 841 /*
842 * Find a default address to bind to. 842 * Find a default address to bind to.
843 * 843 *
844 * This is one of those areas where the initial VMS concepts don't really 844 * This is one of those areas where the initial VMS concepts don't really
845 * map onto the Linux concepts, and since we introduced multiple addresses 845 * map onto the Linux concepts, and since we introduced multiple addresses
846 * per interface we have to cope with slightly odd ways of finding out what 846 * per interface we have to cope with slightly odd ways of finding out what
847 * "our address" really is. Mostly it's not a problem; for this we just guess 847 * "our address" really is. Mostly it's not a problem; for this we just guess
848 * a sensible default. Eventually the routing code will take care of all the 848 * a sensible default. Eventually the routing code will take care of all the
849 * nasties for us I hope. 849 * nasties for us I hope.
850 */ 850 */
851 int dn_dev_bind_default(__le16 *addr) 851 int dn_dev_bind_default(__le16 *addr)
852 { 852 {
853 struct net_device *dev; 853 struct net_device *dev;
854 int rv; 854 int rv;
855 dev = dn_dev_get_default(); 855 dev = dn_dev_get_default();
856 last_chance: 856 last_chance:
857 if (dev) { 857 if (dev) {
858 read_lock(&dev_base_lock); 858 read_lock(&dev_base_lock);
859 rv = dn_dev_get_first(dev, addr); 859 rv = dn_dev_get_first(dev, addr);
860 read_unlock(&dev_base_lock); 860 read_unlock(&dev_base_lock);
861 dev_put(dev); 861 dev_put(dev);
862 if (rv == 0 || dev == init_net.loopback_dev) 862 if (rv == 0 || dev == init_net.loopback_dev)
863 return rv; 863 return rv;
864 } 864 }
865 dev = init_net.loopback_dev; 865 dev = init_net.loopback_dev;
866 dev_hold(dev); 866 dev_hold(dev);
867 goto last_chance; 867 goto last_chance;
868 } 868 }
869 869
870 static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa) 870 static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
871 { 871 {
872 struct endnode_hello_message *msg; 872 struct endnode_hello_message *msg;
873 struct sk_buff *skb = NULL; 873 struct sk_buff *skb = NULL;
874 __le16 *pktlen; 874 __le16 *pktlen;
875 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 875 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
876 876
877 if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL) 877 if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
878 return; 878 return;
879 879
880 skb->dev = dev; 880 skb->dev = dev;
881 881
882 msg = (struct endnode_hello_message *)skb_put(skb,sizeof(*msg)); 882 msg = (struct endnode_hello_message *)skb_put(skb,sizeof(*msg));
883 883
884 msg->msgflg = 0x0D; 884 msg->msgflg = 0x0D;
885 memcpy(msg->tiver, dn_eco_version, 3); 885 memcpy(msg->tiver, dn_eco_version, 3);
886 dn_dn2eth(msg->id, ifa->ifa_local); 886 dn_dn2eth(msg->id, ifa->ifa_local);
887 msg->iinfo = DN_RT_INFO_ENDN; 887 msg->iinfo = DN_RT_INFO_ENDN;
888 msg->blksize = dn_htons(mtu2blksize(dev)); 888 msg->blksize = cpu_to_le16(mtu2blksize(dev));
889 msg->area = 0x00; 889 msg->area = 0x00;
890 memset(msg->seed, 0, 8); 890 memset(msg->seed, 0, 8);
891 memcpy(msg->neighbor, dn_hiord, ETH_ALEN); 891 memcpy(msg->neighbor, dn_hiord, ETH_ALEN);
892 892
893 if (dn_db->router) { 893 if (dn_db->router) {
894 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router; 894 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
895 dn_dn2eth(msg->neighbor, dn->addr); 895 dn_dn2eth(msg->neighbor, dn->addr);
896 } 896 }
897 897
898 msg->timer = dn_htons((unsigned short)dn_db->parms.t3); 898 msg->timer = cpu_to_le16((unsigned short)dn_db->parms.t3);
899 msg->mpd = 0x00; 899 msg->mpd = 0x00;
900 msg->datalen = 0x02; 900 msg->datalen = 0x02;
901 memset(msg->data, 0xAA, 2); 901 memset(msg->data, 0xAA, 2);
902 902
903 pktlen = (__le16 *)skb_push(skb,2); 903 pktlen = (__le16 *)skb_push(skb,2);
904 *pktlen = dn_htons(skb->len - 2); 904 *pktlen = cpu_to_le16(skb->len - 2);
905 905
906 skb_reset_network_header(skb); 906 skb_reset_network_header(skb);
907 907
908 dn_rt_finish_output(skb, dn_rt_all_rt_mcast, msg->id); 908 dn_rt_finish_output(skb, dn_rt_all_rt_mcast, msg->id);
909 } 909 }
910 910
911 911
912 #define DRDELAY (5 * HZ) 912 #define DRDELAY (5 * HZ)
913 913
914 static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa) 914 static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa)
915 { 915 {
916 /* First check time since device went up */ 916 /* First check time since device went up */
917 if ((jiffies - dn_db->uptime) < DRDELAY) 917 if ((jiffies - dn_db->uptime) < DRDELAY)
918 return 0; 918 return 0;
919 919
920 /* If there is no router, then yes... */ 920 /* If there is no router, then yes... */
921 if (!dn_db->router) 921 if (!dn_db->router)
922 return 1; 922 return 1;
923 923
924 /* otherwise only if we have a higher priority or.. */ 924 /* otherwise only if we have a higher priority or.. */
925 if (dn->priority < dn_db->parms.priority) 925 if (dn->priority < dn_db->parms.priority)
926 return 1; 926 return 1;
927 927
928 /* if we have equal priority and a higher node number */ 928 /* if we have equal priority and a higher node number */
929 if (dn->priority != dn_db->parms.priority) 929 if (dn->priority != dn_db->parms.priority)
930 return 0; 930 return 0;
931 931
932 if (dn_ntohs(dn->addr) < dn_ntohs(ifa->ifa_local)) 932 if (le16_to_cpu(dn->addr) < le16_to_cpu(ifa->ifa_local))
933 return 1; 933 return 1;
934 934
935 return 0; 935 return 0;
936 } 936 }
937 937
938 static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa) 938 static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
939 { 939 {
940 int n; 940 int n;
941 struct dn_dev *dn_db = dev->dn_ptr; 941 struct dn_dev *dn_db = dev->dn_ptr;
942 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router; 942 struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
943 struct sk_buff *skb; 943 struct sk_buff *skb;
944 size_t size; 944 size_t size;
945 unsigned char *ptr; 945 unsigned char *ptr;
946 unsigned char *i1, *i2; 946 unsigned char *i1, *i2;
947 __le16 *pktlen; 947 __le16 *pktlen;
948 char *src; 948 char *src;
949 949
950 if (mtu2blksize(dev) < (26 + 7)) 950 if (mtu2blksize(dev) < (26 + 7))
951 return; 951 return;
952 952
953 n = mtu2blksize(dev) - 26; 953 n = mtu2blksize(dev) - 26;
954 n /= 7; 954 n /= 7;
955 955
956 if (n > 32) 956 if (n > 32)
957 n = 32; 957 n = 32;
958 958
959 size = 2 + 26 + 7 * n; 959 size = 2 + 26 + 7 * n;
960 960
961 if ((skb = dn_alloc_skb(NULL, size, GFP_ATOMIC)) == NULL) 961 if ((skb = dn_alloc_skb(NULL, size, GFP_ATOMIC)) == NULL)
962 return; 962 return;
963 963
964 skb->dev = dev; 964 skb->dev = dev;
965 ptr = skb_put(skb, size); 965 ptr = skb_put(skb, size);
966 966
967 *ptr++ = DN_RT_PKT_CNTL | DN_RT_PKT_ERTH; 967 *ptr++ = DN_RT_PKT_CNTL | DN_RT_PKT_ERTH;
968 *ptr++ = 2; /* ECO */ 968 *ptr++ = 2; /* ECO */
969 *ptr++ = 0; 969 *ptr++ = 0;
970 *ptr++ = 0; 970 *ptr++ = 0;
971 dn_dn2eth(ptr, ifa->ifa_local); 971 dn_dn2eth(ptr, ifa->ifa_local);
972 src = ptr; 972 src = ptr;
973 ptr += ETH_ALEN; 973 ptr += ETH_ALEN;
974 *ptr++ = dn_db->parms.forwarding == 1 ? 974 *ptr++ = dn_db->parms.forwarding == 1 ?
975 DN_RT_INFO_L1RT : DN_RT_INFO_L2RT; 975 DN_RT_INFO_L1RT : DN_RT_INFO_L2RT;
976 *((__le16 *)ptr) = dn_htons(mtu2blksize(dev)); 976 *((__le16 *)ptr) = cpu_to_le16(mtu2blksize(dev));
977 ptr += 2; 977 ptr += 2;
978 *ptr++ = dn_db->parms.priority; /* Priority */ 978 *ptr++ = dn_db->parms.priority; /* Priority */
979 *ptr++ = 0; /* Area: Reserved */ 979 *ptr++ = 0; /* Area: Reserved */
980 *((__le16 *)ptr) = dn_htons((unsigned short)dn_db->parms.t3); 980 *((__le16 *)ptr) = cpu_to_le16((unsigned short)dn_db->parms.t3);
981 ptr += 2; 981 ptr += 2;
982 *ptr++ = 0; /* MPD: Reserved */ 982 *ptr++ = 0; /* MPD: Reserved */
983 i1 = ptr++; 983 i1 = ptr++;
984 memset(ptr, 0, 7); /* Name: Reserved */ 984 memset(ptr, 0, 7); /* Name: Reserved */
985 ptr += 7; 985 ptr += 7;
986 i2 = ptr++; 986 i2 = ptr++;
987 987
988 n = dn_neigh_elist(dev, ptr, n); 988 n = dn_neigh_elist(dev, ptr, n);
989 989
990 *i2 = 7 * n; 990 *i2 = 7 * n;
991 *i1 = 8 + *i2; 991 *i1 = 8 + *i2;
992 992
993 skb_trim(skb, (27 + *i2)); 993 skb_trim(skb, (27 + *i2));
994 994
995 pktlen = (__le16 *)skb_push(skb, 2); 995 pktlen = (__le16 *)skb_push(skb, 2);
996 *pktlen = dn_htons(skb->len - 2); 996 *pktlen = cpu_to_le16(skb->len - 2);
997 997
998 skb_reset_network_header(skb); 998 skb_reset_network_header(skb);
999 999
1000 if (dn_am_i_a_router(dn, dn_db, ifa)) { 1000 if (dn_am_i_a_router(dn, dn_db, ifa)) {
1001 struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); 1001 struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
1002 if (skb2) { 1002 if (skb2) {
1003 dn_rt_finish_output(skb2, dn_rt_all_end_mcast, src); 1003 dn_rt_finish_output(skb2, dn_rt_all_end_mcast, src);
1004 } 1004 }
1005 } 1005 }
1006 1006
1007 dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src); 1007 dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src);
1008 } 1008 }
1009 1009
1010 static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa) 1010 static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa)
1011 { 1011 {
1012 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 1012 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
1013 1013
1014 if (dn_db->parms.forwarding == 0) 1014 if (dn_db->parms.forwarding == 0)
1015 dn_send_endnode_hello(dev, ifa); 1015 dn_send_endnode_hello(dev, ifa);
1016 else 1016 else
1017 dn_send_router_hello(dev, ifa); 1017 dn_send_router_hello(dev, ifa);
1018 } 1018 }
1019 1019
1020 static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa) 1020 static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa)
1021 { 1021 {
1022 int tdlen = 16; 1022 int tdlen = 16;
1023 int size = dev->hard_header_len + 2 + 4 + tdlen; 1023 int size = dev->hard_header_len + 2 + 4 + tdlen;
1024 struct sk_buff *skb = dn_alloc_skb(NULL, size, GFP_ATOMIC); 1024 struct sk_buff *skb = dn_alloc_skb(NULL, size, GFP_ATOMIC);
1025 int i; 1025 int i;
1026 unsigned char *ptr; 1026 unsigned char *ptr;
1027 char src[ETH_ALEN]; 1027 char src[ETH_ALEN];
1028 1028
1029 if (skb == NULL) 1029 if (skb == NULL)
1030 return ; 1030 return ;
1031 1031
1032 skb->dev = dev; 1032 skb->dev = dev;
1033 skb_push(skb, dev->hard_header_len); 1033 skb_push(skb, dev->hard_header_len);
1034 ptr = skb_put(skb, 2 + 4 + tdlen); 1034 ptr = skb_put(skb, 2 + 4 + tdlen);
1035 1035
1036 *ptr++ = DN_RT_PKT_HELO; 1036 *ptr++ = DN_RT_PKT_HELO;
1037 *((__le16 *)ptr) = ifa->ifa_local; 1037 *((__le16 *)ptr) = ifa->ifa_local;
1038 ptr += 2; 1038 ptr += 2;
1039 *ptr++ = tdlen; 1039 *ptr++ = tdlen;
1040 1040
1041 for(i = 0; i < tdlen; i++) 1041 for(i = 0; i < tdlen; i++)
1042 *ptr++ = 0252; 1042 *ptr++ = 0252;
1043 1043
1044 dn_dn2eth(src, ifa->ifa_local); 1044 dn_dn2eth(src, ifa->ifa_local);
1045 dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src); 1045 dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src);
1046 } 1046 }
1047 1047
1048 static int dn_eth_up(struct net_device *dev) 1048 static int dn_eth_up(struct net_device *dev)
1049 { 1049 {
1050 struct dn_dev *dn_db = dev->dn_ptr; 1050 struct dn_dev *dn_db = dev->dn_ptr;
1051 1051
1052 if (dn_db->parms.forwarding == 0) 1052 if (dn_db->parms.forwarding == 0)
1053 dev_mc_add(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); 1053 dev_mc_add(dev, dn_rt_all_end_mcast, ETH_ALEN, 0);
1054 else 1054 else
1055 dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); 1055 dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0);
1056 1056
1057 dn_db->use_long = 1; 1057 dn_db->use_long = 1;
1058 1058
1059 return 0; 1059 return 0;
1060 } 1060 }
1061 1061
1062 static void dn_eth_down(struct net_device *dev) 1062 static void dn_eth_down(struct net_device *dev)
1063 { 1063 {
1064 struct dn_dev *dn_db = dev->dn_ptr; 1064 struct dn_dev *dn_db = dev->dn_ptr;
1065 1065
1066 if (dn_db->parms.forwarding == 0) 1066 if (dn_db->parms.forwarding == 0)
1067 dev_mc_delete(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); 1067 dev_mc_delete(dev, dn_rt_all_end_mcast, ETH_ALEN, 0);
1068 else 1068 else
1069 dev_mc_delete(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); 1069 dev_mc_delete(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0);
1070 } 1070 }
1071 1071
1072 static void dn_dev_set_timer(struct net_device *dev); 1072 static void dn_dev_set_timer(struct net_device *dev);
1073 1073
1074 static void dn_dev_timer_func(unsigned long arg) 1074 static void dn_dev_timer_func(unsigned long arg)
1075 { 1075 {
1076 struct net_device *dev = (struct net_device *)arg; 1076 struct net_device *dev = (struct net_device *)arg;
1077 struct dn_dev *dn_db = dev->dn_ptr; 1077 struct dn_dev *dn_db = dev->dn_ptr;
1078 struct dn_ifaddr *ifa; 1078 struct dn_ifaddr *ifa;
1079 1079
1080 if (dn_db->t3 <= dn_db->parms.t2) { 1080 if (dn_db->t3 <= dn_db->parms.t2) {
1081 if (dn_db->parms.timer3) { 1081 if (dn_db->parms.timer3) {
1082 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { 1082 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
1083 if (!(ifa->ifa_flags & IFA_F_SECONDARY)) 1083 if (!(ifa->ifa_flags & IFA_F_SECONDARY))
1084 dn_db->parms.timer3(dev, ifa); 1084 dn_db->parms.timer3(dev, ifa);
1085 } 1085 }
1086 } 1086 }
1087 dn_db->t3 = dn_db->parms.t3; 1087 dn_db->t3 = dn_db->parms.t3;
1088 } else { 1088 } else {
1089 dn_db->t3 -= dn_db->parms.t2; 1089 dn_db->t3 -= dn_db->parms.t2;
1090 } 1090 }
1091 1091
1092 dn_dev_set_timer(dev); 1092 dn_dev_set_timer(dev);
1093 } 1093 }
1094 1094
1095 static void dn_dev_set_timer(struct net_device *dev) 1095 static void dn_dev_set_timer(struct net_device *dev)
1096 { 1096 {
1097 struct dn_dev *dn_db = dev->dn_ptr; 1097 struct dn_dev *dn_db = dev->dn_ptr;
1098 1098
1099 if (dn_db->parms.t2 > dn_db->parms.t3) 1099 if (dn_db->parms.t2 > dn_db->parms.t3)
1100 dn_db->parms.t2 = dn_db->parms.t3; 1100 dn_db->parms.t2 = dn_db->parms.t3;
1101 1101
1102 dn_db->timer.data = (unsigned long)dev; 1102 dn_db->timer.data = (unsigned long)dev;
1103 dn_db->timer.function = dn_dev_timer_func; 1103 dn_db->timer.function = dn_dev_timer_func;
1104 dn_db->timer.expires = jiffies + (dn_db->parms.t2 * HZ); 1104 dn_db->timer.expires = jiffies + (dn_db->parms.t2 * HZ);
1105 1105
1106 add_timer(&dn_db->timer); 1106 add_timer(&dn_db->timer);
1107 } 1107 }
1108 1108
1109 struct dn_dev *dn_dev_create(struct net_device *dev, int *err) 1109 struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
1110 { 1110 {
1111 int i; 1111 int i;
1112 struct dn_dev_parms *p = dn_dev_list; 1112 struct dn_dev_parms *p = dn_dev_list;
1113 struct dn_dev *dn_db; 1113 struct dn_dev *dn_db;
1114 1114
1115 for(i = 0; i < DN_DEV_LIST_SIZE; i++, p++) { 1115 for(i = 0; i < DN_DEV_LIST_SIZE; i++, p++) {
1116 if (p->type == dev->type) 1116 if (p->type == dev->type)
1117 break; 1117 break;
1118 } 1118 }
1119 1119
1120 *err = -ENODEV; 1120 *err = -ENODEV;
1121 if (i == DN_DEV_LIST_SIZE) 1121 if (i == DN_DEV_LIST_SIZE)
1122 return NULL; 1122 return NULL;
1123 1123
1124 *err = -ENOBUFS; 1124 *err = -ENOBUFS;
1125 if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL) 1125 if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL)
1126 return NULL; 1126 return NULL;
1127 1127
1128 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms)); 1128 memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
1129 smp_wmb(); 1129 smp_wmb();
1130 dev->dn_ptr = dn_db; 1130 dev->dn_ptr = dn_db;
1131 dn_db->dev = dev; 1131 dn_db->dev = dev;
1132 init_timer(&dn_db->timer); 1132 init_timer(&dn_db->timer);
1133 1133
1134 dn_db->uptime = jiffies; 1134 dn_db->uptime = jiffies;
1135 1135
1136 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table); 1136 dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
1137 if (!dn_db->neigh_parms) { 1137 if (!dn_db->neigh_parms) {
1138 dev->dn_ptr = NULL; 1138 dev->dn_ptr = NULL;
1139 kfree(dn_db); 1139 kfree(dn_db);
1140 return NULL; 1140 return NULL;
1141 } 1141 }
1142 1142
1143 if (dn_db->parms.up) { 1143 if (dn_db->parms.up) {
1144 if (dn_db->parms.up(dev) < 0) { 1144 if (dn_db->parms.up(dev) < 0) {
1145 neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms); 1145 neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms);
1146 dev->dn_ptr = NULL; 1146 dev->dn_ptr = NULL;
1147 kfree(dn_db); 1147 kfree(dn_db);
1148 return NULL; 1148 return NULL;
1149 } 1149 }
1150 } 1150 }
1151 1151
1152 dn_dev_sysctl_register(dev, &dn_db->parms); 1152 dn_dev_sysctl_register(dev, &dn_db->parms);
1153 1153
1154 dn_dev_set_timer(dev); 1154 dn_dev_set_timer(dev);
1155 1155
1156 *err = 0; 1156 *err = 0;
1157 return dn_db; 1157 return dn_db;
1158 } 1158 }
1159 1159
1160 1160
1161 /* 1161 /*
1162 * This processes a device up event. We only start up 1162 * This processes a device up event. We only start up
1163 * the loopback device & ethernet devices with correct 1163 * the loopback device & ethernet devices with correct
1164 * MAC addreses automatically. Others must be started 1164 * MAC addreses automatically. Others must be started
1165 * specifically. 1165 * specifically.
1166 * 1166 *
1167 * FIXME: How should we configure the loopback address ? If we could dispense 1167 * FIXME: How should we configure the loopback address ? If we could dispense
1168 * with using decnet_address here and for autobind, it will be one less thing 1168 * with using decnet_address here and for autobind, it will be one less thing
1169 * for users to worry about setting up. 1169 * for users to worry about setting up.
1170 */ 1170 */
1171 1171
1172 void dn_dev_up(struct net_device *dev) 1172 void dn_dev_up(struct net_device *dev)
1173 { 1173 {
1174 struct dn_ifaddr *ifa; 1174 struct dn_ifaddr *ifa;
1175 __le16 addr = decnet_address; 1175 __le16 addr = decnet_address;
1176 int maybe_default = 0; 1176 int maybe_default = 0;
1177 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr; 1177 struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
1178 1178
1179 if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK)) 1179 if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
1180 return; 1180 return;
1181 1181
1182 /* 1182 /*
1183 * Need to ensure that loopback device has a dn_db attached to it 1183 * Need to ensure that loopback device has a dn_db attached to it
1184 * to allow creation of neighbours against it, even though it might 1184 * to allow creation of neighbours against it, even though it might
1185 * not have a local address of its own. Might as well do the same for 1185 * not have a local address of its own. Might as well do the same for
1186 * all autoconfigured interfaces. 1186 * all autoconfigured interfaces.
1187 */ 1187 */
1188 if (dn_db == NULL) { 1188 if (dn_db == NULL) {
1189 int err; 1189 int err;
1190 dn_db = dn_dev_create(dev, &err); 1190 dn_db = dn_dev_create(dev, &err);
1191 if (dn_db == NULL) 1191 if (dn_db == NULL)
1192 return; 1192 return;
1193 } 1193 }
1194 1194
1195 if (dev->type == ARPHRD_ETHER) { 1195 if (dev->type == ARPHRD_ETHER) {
1196 if (memcmp(dev->dev_addr, dn_hiord, 4) != 0) 1196 if (memcmp(dev->dev_addr, dn_hiord, 4) != 0)
1197 return; 1197 return;
1198 addr = dn_eth2dn(dev->dev_addr); 1198 addr = dn_eth2dn(dev->dev_addr);
1199 maybe_default = 1; 1199 maybe_default = 1;
1200 } 1200 }
1201 1201
1202 if (addr == 0) 1202 if (addr == 0)
1203 return; 1203 return;
1204 1204
1205 if ((ifa = dn_dev_alloc_ifa()) == NULL) 1205 if ((ifa = dn_dev_alloc_ifa()) == NULL)
1206 return; 1206 return;
1207 1207
1208 ifa->ifa_local = ifa->ifa_address = addr; 1208 ifa->ifa_local = ifa->ifa_address = addr;
1209 ifa->ifa_flags = 0; 1209 ifa->ifa_flags = 0;
1210 ifa->ifa_scope = RT_SCOPE_UNIVERSE; 1210 ifa->ifa_scope = RT_SCOPE_UNIVERSE;
1211 strcpy(ifa->ifa_label, dev->name); 1211 strcpy(ifa->ifa_label, dev->name);
1212 1212
1213 dn_dev_set_ifa(dev, ifa); 1213 dn_dev_set_ifa(dev, ifa);
1214 1214
1215 /* 1215 /*
1216 * Automagically set the default device to the first automatically 1216 * Automagically set the default device to the first automatically
1217 * configured ethernet card in the system. 1217 * configured ethernet card in the system.
1218 */ 1218 */
1219 if (maybe_default) { 1219 if (maybe_default) {
1220 dev_hold(dev); 1220 dev_hold(dev);
1221 if (dn_dev_set_default(dev, 0)) 1221 if (dn_dev_set_default(dev, 0))
1222 dev_put(dev); 1222 dev_put(dev);
1223 } 1223 }
1224 } 1224 }
1225 1225
1226 static void dn_dev_delete(struct net_device *dev) 1226 static void dn_dev_delete(struct net_device *dev)
1227 { 1227 {
1228 struct dn_dev *dn_db = dev->dn_ptr; 1228 struct dn_dev *dn_db = dev->dn_ptr;
1229 1229
1230 if (dn_db == NULL) 1230 if (dn_db == NULL)
1231 return; 1231 return;
1232 1232
1233 del_timer_sync(&dn_db->timer); 1233 del_timer_sync(&dn_db->timer);
1234 dn_dev_sysctl_unregister(&dn_db->parms); 1234 dn_dev_sysctl_unregister(&dn_db->parms);
1235 dn_dev_check_default(dev); 1235 dn_dev_check_default(dev);
1236 neigh_ifdown(&dn_neigh_table, dev); 1236 neigh_ifdown(&dn_neigh_table, dev);
1237 1237
1238 if (dn_db->parms.down) 1238 if (dn_db->parms.down)
1239 dn_db->parms.down(dev); 1239 dn_db->parms.down(dev);
1240 1240
1241 dev->dn_ptr = NULL; 1241 dev->dn_ptr = NULL;
1242 1242
1243 neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms); 1243 neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms);
1244 neigh_ifdown(&dn_neigh_table, dev); 1244 neigh_ifdown(&dn_neigh_table, dev);
1245 1245
1246 if (dn_db->router) 1246 if (dn_db->router)
1247 neigh_release(dn_db->router); 1247 neigh_release(dn_db->router);
1248 if (dn_db->peer) 1248 if (dn_db->peer)
1249 neigh_release(dn_db->peer); 1249 neigh_release(dn_db->peer);
1250 1250
1251 kfree(dn_db); 1251 kfree(dn_db);
1252 } 1252 }
1253 1253
1254 void dn_dev_down(struct net_device *dev) 1254 void dn_dev_down(struct net_device *dev)
1255 { 1255 {
1256 struct dn_dev *dn_db = dev->dn_ptr; 1256 struct dn_dev *dn_db = dev->dn_ptr;
1257 struct dn_ifaddr *ifa; 1257 struct dn_ifaddr *ifa;
1258 1258
1259 if (dn_db == NULL) 1259 if (dn_db == NULL)
1260 return; 1260 return;
1261 1261
1262 while((ifa = dn_db->ifa_list) != NULL) { 1262 while((ifa = dn_db->ifa_list) != NULL) {
1263 dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0); 1263 dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0);
1264 dn_dev_free_ifa(ifa); 1264 dn_dev_free_ifa(ifa);
1265 } 1265 }
1266 1266
1267 dn_dev_delete(dev); 1267 dn_dev_delete(dev);
1268 } 1268 }
1269 1269
1270 void dn_dev_init_pkt(struct sk_buff *skb) 1270 void dn_dev_init_pkt(struct sk_buff *skb)
1271 { 1271 {
1272 return; 1272 return;
1273 } 1273 }
1274 1274
1275 void dn_dev_veri_pkt(struct sk_buff *skb) 1275 void dn_dev_veri_pkt(struct sk_buff *skb)
1276 { 1276 {
1277 return; 1277 return;
1278 } 1278 }
1279 1279
1280 void dn_dev_hello(struct sk_buff *skb) 1280 void dn_dev_hello(struct sk_buff *skb)
1281 { 1281 {
1282 return; 1282 return;
1283 } 1283 }
1284 1284
1285 void dn_dev_devices_off(void) 1285 void dn_dev_devices_off(void)
1286 { 1286 {
1287 struct net_device *dev; 1287 struct net_device *dev;
1288 1288
1289 rtnl_lock(); 1289 rtnl_lock();
1290 for_each_netdev(&init_net, dev) 1290 for_each_netdev(&init_net, dev)
1291 dn_dev_down(dev); 1291 dn_dev_down(dev);
1292 rtnl_unlock(); 1292 rtnl_unlock();
1293 1293
1294 } 1294 }
1295 1295
1296 void dn_dev_devices_on(void) 1296 void dn_dev_devices_on(void)
1297 { 1297 {
1298 struct net_device *dev; 1298 struct net_device *dev;
1299 1299
1300 rtnl_lock(); 1300 rtnl_lock();
1301 for_each_netdev(&init_net, dev) { 1301 for_each_netdev(&init_net, dev) {
1302 if (dev->flags & IFF_UP) 1302 if (dev->flags & IFF_UP)
1303 dn_dev_up(dev); 1303 dn_dev_up(dev);
1304 } 1304 }
1305 rtnl_unlock(); 1305 rtnl_unlock();
1306 } 1306 }
1307 1307
1308 int register_dnaddr_notifier(struct notifier_block *nb) 1308 int register_dnaddr_notifier(struct notifier_block *nb)
1309 { 1309 {
1310 return blocking_notifier_chain_register(&dnaddr_chain, nb); 1310 return blocking_notifier_chain_register(&dnaddr_chain, nb);
1311 } 1311 }
1312 1312
1313 int unregister_dnaddr_notifier(struct notifier_block *nb) 1313 int unregister_dnaddr_notifier(struct notifier_block *nb)
1314 { 1314 {
1315 return blocking_notifier_chain_unregister(&dnaddr_chain, nb); 1315 return blocking_notifier_chain_unregister(&dnaddr_chain, nb);
1316 } 1316 }
1317 1317
1318 #ifdef CONFIG_PROC_FS 1318 #ifdef CONFIG_PROC_FS
1319 static inline int is_dn_dev(struct net_device *dev) 1319 static inline int is_dn_dev(struct net_device *dev)
1320 { 1320 {
1321 return dev->dn_ptr != NULL; 1321 return dev->dn_ptr != NULL;
1322 } 1322 }
1323 1323
1324 static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos) 1324 static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
1325 { 1325 {
1326 int i; 1326 int i;
1327 struct net_device *dev; 1327 struct net_device *dev;
1328 1328
1329 read_lock(&dev_base_lock); 1329 read_lock(&dev_base_lock);
1330 1330
1331 if (*pos == 0) 1331 if (*pos == 0)
1332 return SEQ_START_TOKEN; 1332 return SEQ_START_TOKEN;
1333 1333
1334 i = 1; 1334 i = 1;
1335 for_each_netdev(&init_net, dev) { 1335 for_each_netdev(&init_net, dev) {
1336 if (!is_dn_dev(dev)) 1336 if (!is_dn_dev(dev))
1337 continue; 1337 continue;
1338 1338
1339 if (i++ == *pos) 1339 if (i++ == *pos)
1340 return dev; 1340 return dev;
1341 } 1341 }
1342 1342
1343 return NULL; 1343 return NULL;
1344 } 1344 }
1345 1345
1346 static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1346 static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1347 { 1347 {
1348 struct net_device *dev; 1348 struct net_device *dev;
1349 1349
1350 ++*pos; 1350 ++*pos;
1351 1351
1352 dev = (struct net_device *)v; 1352 dev = (struct net_device *)v;
1353 if (v == SEQ_START_TOKEN) 1353 if (v == SEQ_START_TOKEN)
1354 dev = net_device_entry(&init_net.dev_base_head); 1354 dev = net_device_entry(&init_net.dev_base_head);
1355 1355
1356 for_each_netdev_continue(&init_net, dev) { 1356 for_each_netdev_continue(&init_net, dev) {
1357 if (!is_dn_dev(dev)) 1357 if (!is_dn_dev(dev))
1358 continue; 1358 continue;
1359 1359
1360 return dev; 1360 return dev;
1361 } 1361 }
1362 1362
1363 return NULL; 1363 return NULL;
1364 } 1364 }
1365 1365
1366 static void dn_dev_seq_stop(struct seq_file *seq, void *v) 1366 static void dn_dev_seq_stop(struct seq_file *seq, void *v)
1367 { 1367 {
1368 read_unlock(&dev_base_lock); 1368 read_unlock(&dev_base_lock);
1369 } 1369 }
1370 1370
1371 static char *dn_type2asc(char type) 1371 static char *dn_type2asc(char type)
1372 { 1372 {
1373 switch(type) { 1373 switch(type) {
1374 case DN_DEV_BCAST: 1374 case DN_DEV_BCAST:
1375 return "B"; 1375 return "B";
1376 case DN_DEV_UCAST: 1376 case DN_DEV_UCAST:
1377 return "U"; 1377 return "U";
1378 case DN_DEV_MPOINT: 1378 case DN_DEV_MPOINT:
1379 return "M"; 1379 return "M";
1380 } 1380 }
1381 1381
1382 return "?"; 1382 return "?";
1383 } 1383 }
1384 1384
1385 static int dn_dev_seq_show(struct seq_file *seq, void *v) 1385 static int dn_dev_seq_show(struct seq_file *seq, void *v)
1386 { 1386 {
1387 if (v == SEQ_START_TOKEN) 1387 if (v == SEQ_START_TOKEN)
1388 seq_puts(seq, "Name Flags T1 Timer1 T3 Timer3 BlkSize Pri State DevType Router Peer\n"); 1388 seq_puts(seq, "Name Flags T1 Timer1 T3 Timer3 BlkSize Pri State DevType Router Peer\n");
1389 else { 1389 else {
1390 struct net_device *dev = v; 1390 struct net_device *dev = v;
1391 char peer_buf[DN_ASCBUF_LEN]; 1391 char peer_buf[DN_ASCBUF_LEN];
1392 char router_buf[DN_ASCBUF_LEN]; 1392 char router_buf[DN_ASCBUF_LEN];
1393 struct dn_dev *dn_db = dev->dn_ptr; 1393 struct dn_dev *dn_db = dev->dn_ptr;
1394 1394
1395 seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu" 1395 seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu"
1396 " %04hu %03d %02x %-10s %-7s %-7s\n", 1396 " %04hu %03d %02x %-10s %-7s %-7s\n",
1397 dev->name ? dev->name : "???", 1397 dev->name ? dev->name : "???",
1398 dn_type2asc(dn_db->parms.mode), 1398 dn_type2asc(dn_db->parms.mode),
1399 0, 0, 1399 0, 0,
1400 dn_db->t3, dn_db->parms.t3, 1400 dn_db->t3, dn_db->parms.t3,
1401 mtu2blksize(dev), 1401 mtu2blksize(dev),
1402 dn_db->parms.priority, 1402 dn_db->parms.priority,
1403 dn_db->parms.state, dn_db->parms.name, 1403 dn_db->parms.state, dn_db->parms.name,
1404 dn_db->router ? dn_addr2asc(dn_ntohs(*(__le16 *)dn_db->router->primary_key), router_buf) : "", 1404 dn_db->router ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->router->primary_key), router_buf) : "",
1405 dn_db->peer ? dn_addr2asc(dn_ntohs(*(__le16 *)dn_db->peer->primary_key), peer_buf) : ""); 1405 dn_db->peer ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->peer->primary_key), peer_buf) : "");
1406 } 1406 }
1407 return 0; 1407 return 0;
1408 } 1408 }
1409 1409
1410 static const struct seq_operations dn_dev_seq_ops = { 1410 static const struct seq_operations dn_dev_seq_ops = {
1411 .start = dn_dev_seq_start, 1411 .start = dn_dev_seq_start,
1412 .next = dn_dev_seq_next, 1412 .next = dn_dev_seq_next,
1413 .stop = dn_dev_seq_stop, 1413 .stop = dn_dev_seq_stop,
1414 .show = dn_dev_seq_show, 1414 .show = dn_dev_seq_show,
1415 }; 1415 };
1416 1416
1417 static int dn_dev_seq_open(struct inode *inode, struct file *file) 1417 static int dn_dev_seq_open(struct inode *inode, struct file *file)
1418 { 1418 {
1419 return seq_open(file, &dn_dev_seq_ops); 1419 return seq_open(file, &dn_dev_seq_ops);
1420 } 1420 }
1421 1421
1422 static const struct file_operations dn_dev_seq_fops = { 1422 static const struct file_operations dn_dev_seq_fops = {
1423 .owner = THIS_MODULE, 1423 .owner = THIS_MODULE,
1424 .open = dn_dev_seq_open, 1424 .open = dn_dev_seq_open,
1425 .read = seq_read, 1425 .read = seq_read,
1426 .llseek = seq_lseek, 1426 .llseek = seq_lseek,
1427 .release = seq_release, 1427 .release = seq_release,
1428 }; 1428 };
1429 1429
1430 #endif /* CONFIG_PROC_FS */ 1430 #endif /* CONFIG_PROC_FS */
1431 1431
1432 static int addr[2]; 1432 static int addr[2];
1433 module_param_array(addr, int, NULL, 0444); 1433 module_param_array(addr, int, NULL, 0444);
1434 MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node"); 1434 MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node");
1435 1435
1436 void __init dn_dev_init(void) 1436 void __init dn_dev_init(void)
1437 { 1437 {
1438 if (addr[0] > 63 || addr[0] < 0) { 1438 if (addr[0] > 63 || addr[0] < 0) {
1439 printk(KERN_ERR "DECnet: Area must be between 0 and 63"); 1439 printk(KERN_ERR "DECnet: Area must be between 0 and 63");
1440 return; 1440 return;
1441 } 1441 }
1442 1442
1443 if (addr[1] > 1023 || addr[1] < 0) { 1443 if (addr[1] > 1023 || addr[1] < 0) {
1444 printk(KERN_ERR "DECnet: Node must be between 0 and 1023"); 1444 printk(KERN_ERR "DECnet: Node must be between 0 and 1023");
1445 return; 1445 return;
1446 } 1446 }
1447 1447
1448 decnet_address = dn_htons((addr[0] << 10) | addr[1]); 1448 decnet_address = cpu_to_le16((addr[0] << 10) | addr[1]);
1449 1449
1450 dn_dev_devices_on(); 1450 dn_dev_devices_on();
1451 1451
1452 rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL); 1452 rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL);
1453 rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL); 1453 rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL);
1454 rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr); 1454 rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr);
1455 1455
1456 proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops); 1456 proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops);
1457 1457
1458 #ifdef CONFIG_SYSCTL 1458 #ifdef CONFIG_SYSCTL
1459 { 1459 {
1460 int i; 1460 int i;
1461 for(i = 0; i < DN_DEV_LIST_SIZE; i++) 1461 for(i = 0; i < DN_DEV_LIST_SIZE; i++)
1462 dn_dev_sysctl_register(NULL, &dn_dev_list[i]); 1462 dn_dev_sysctl_register(NULL, &dn_dev_list[i]);
1463 } 1463 }
1464 #endif /* CONFIG_SYSCTL */ 1464 #endif /* CONFIG_SYSCTL */
1465 } 1465 }
1466 1466
1467 void __exit dn_dev_cleanup(void) 1467 void __exit dn_dev_cleanup(void)
1468 { 1468 {
1469 #ifdef CONFIG_SYSCTL 1469 #ifdef CONFIG_SYSCTL
1470 { 1470 {
1471 int i; 1471 int i;
1472 for(i = 0; i < DN_DEV_LIST_SIZE; i++) 1472 for(i = 0; i < DN_DEV_LIST_SIZE; i++)
1473 dn_dev_sysctl_unregister(&dn_dev_list[i]); 1473 dn_dev_sysctl_unregister(&dn_dev_list[i]);
1474 } 1474 }
1475 #endif /* CONFIG_SYSCTL */ 1475 #endif /* CONFIG_SYSCTL */
1476 1476
1477 proc_net_remove(&init_net, "decnet_dev"); 1477 proc_net_remove(&init_net, "decnet_dev");
1478 1478
1479 dn_dev_devices_off(); 1479 dn_dev_devices_off();
1480 } 1480 }
1481 1481
net/decnet/dn_neigh.c
1 /* 1 /*
2 * DECnet An implementation of the DECnet protocol suite for the LINUX 2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket 3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * DECnet Neighbour Functions (Adjacency Database and 6 * DECnet Neighbour Functions (Adjacency Database and
7 * On-Ethernet Cache) 7 * On-Ethernet Cache)
8 * 8 *
9 * Author: Steve Whitehouse <SteveW@ACM.org> 9 * Author: Steve Whitehouse <SteveW@ACM.org>
10 * 10 *
11 * 11 *
12 * Changes: 12 * Changes:
13 * Steve Whitehouse : Fixed router listing routine 13 * Steve Whitehouse : Fixed router listing routine
14 * Steve Whitehouse : Added error_report functions 14 * Steve Whitehouse : Added error_report functions
15 * Steve Whitehouse : Added default router detection 15 * Steve Whitehouse : Added default router detection
16 * Steve Whitehouse : Hop counts in outgoing messages 16 * Steve Whitehouse : Hop counts in outgoing messages
17 * Steve Whitehouse : Fixed src/dst in outgoing messages so 17 * Steve Whitehouse : Fixed src/dst in outgoing messages so
18 * forwarding now stands a good chance of 18 * forwarding now stands a good chance of
19 * working. 19 * working.
20 * Steve Whitehouse : Fixed neighbour states (for now anyway). 20 * Steve Whitehouse : Fixed neighbour states (for now anyway).
21 * Steve Whitehouse : Made error_report functions dummies. This 21 * Steve Whitehouse : Made error_report functions dummies. This
22 * is not the right place to return skbs. 22 * is not the right place to return skbs.
23 * Steve Whitehouse : Convert to seq_file 23 * Steve Whitehouse : Convert to seq_file
24 * 24 *
25 */ 25 */
26 26
27 #include <linux/net.h> 27 #include <linux/net.h>
28 #include <linux/module.h> 28 #include <linux/module.h>
29 #include <linux/socket.h> 29 #include <linux/socket.h>
30 #include <linux/if_arp.h> 30 #include <linux/if_arp.h>
31 #include <linux/if_ether.h> 31 #include <linux/if_ether.h>
32 #include <linux/init.h> 32 #include <linux/init.h>
33 #include <linux/proc_fs.h> 33 #include <linux/proc_fs.h>
34 #include <linux/string.h> 34 #include <linux/string.h>
35 #include <linux/netfilter_decnet.h> 35 #include <linux/netfilter_decnet.h>
36 #include <linux/spinlock.h> 36 #include <linux/spinlock.h>
37 #include <linux/seq_file.h> 37 #include <linux/seq_file.h>
38 #include <linux/rcupdate.h> 38 #include <linux/rcupdate.h>
39 #include <linux/jhash.h> 39 #include <linux/jhash.h>
40 #include <asm/atomic.h> 40 #include <asm/atomic.h>
41 #include <net/net_namespace.h> 41 #include <net/net_namespace.h>
42 #include <net/neighbour.h> 42 #include <net/neighbour.h>
43 #include <net/dst.h> 43 #include <net/dst.h>
44 #include <net/flow.h> 44 #include <net/flow.h>
45 #include <net/dn.h> 45 #include <net/dn.h>
46 #include <net/dn_dev.h> 46 #include <net/dn_dev.h>
47 #include <net/dn_neigh.h> 47 #include <net/dn_neigh.h>
48 #include <net/dn_route.h> 48 #include <net/dn_route.h>
49 49
50 static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev); 50 static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev);
51 static int dn_neigh_construct(struct neighbour *); 51 static int dn_neigh_construct(struct neighbour *);
52 static void dn_long_error_report(struct neighbour *, struct sk_buff *); 52 static void dn_long_error_report(struct neighbour *, struct sk_buff *);
53 static void dn_short_error_report(struct neighbour *, struct sk_buff *); 53 static void dn_short_error_report(struct neighbour *, struct sk_buff *);
54 static int dn_long_output(struct sk_buff *); 54 static int dn_long_output(struct sk_buff *);
55 static int dn_short_output(struct sk_buff *); 55 static int dn_short_output(struct sk_buff *);
56 static int dn_phase3_output(struct sk_buff *); 56 static int dn_phase3_output(struct sk_buff *);
57 57
58 58
59 /* 59 /*
60 * For talking to broadcast devices: Ethernet & PPP 60 * For talking to broadcast devices: Ethernet & PPP
61 */ 61 */
62 static struct neigh_ops dn_long_ops = { 62 static struct neigh_ops dn_long_ops = {
63 .family = AF_DECnet, 63 .family = AF_DECnet,
64 .error_report = dn_long_error_report, 64 .error_report = dn_long_error_report,
65 .output = dn_long_output, 65 .output = dn_long_output,
66 .connected_output = dn_long_output, 66 .connected_output = dn_long_output,
67 .hh_output = dev_queue_xmit, 67 .hh_output = dev_queue_xmit,
68 .queue_xmit = dev_queue_xmit, 68 .queue_xmit = dev_queue_xmit,
69 }; 69 };
70 70
71 /* 71 /*
72 * For talking to pointopoint and multidrop devices: DDCMP and X.25 72 * For talking to pointopoint and multidrop devices: DDCMP and X.25
73 */ 73 */
74 static struct neigh_ops dn_short_ops = { 74 static struct neigh_ops dn_short_ops = {
75 .family = AF_DECnet, 75 .family = AF_DECnet,
76 .error_report = dn_short_error_report, 76 .error_report = dn_short_error_report,
77 .output = dn_short_output, 77 .output = dn_short_output,
78 .connected_output = dn_short_output, 78 .connected_output = dn_short_output,
79 .hh_output = dev_queue_xmit, 79 .hh_output = dev_queue_xmit,
80 .queue_xmit = dev_queue_xmit, 80 .queue_xmit = dev_queue_xmit,
81 }; 81 };
82 82
83 /* 83 /*
84 * For talking to DECnet phase III nodes 84 * For talking to DECnet phase III nodes
85 */ 85 */
86 static struct neigh_ops dn_phase3_ops = { 86 static struct neigh_ops dn_phase3_ops = {
87 .family = AF_DECnet, 87 .family = AF_DECnet,
88 .error_report = dn_short_error_report, /* Can use short version here */ 88 .error_report = dn_short_error_report, /* Can use short version here */
89 .output = dn_phase3_output, 89 .output = dn_phase3_output,
90 .connected_output = dn_phase3_output, 90 .connected_output = dn_phase3_output,
91 .hh_output = dev_queue_xmit, 91 .hh_output = dev_queue_xmit,
92 .queue_xmit = dev_queue_xmit 92 .queue_xmit = dev_queue_xmit
93 }; 93 };
94 94
95 struct neigh_table dn_neigh_table = { 95 struct neigh_table dn_neigh_table = {
96 .family = PF_DECnet, 96 .family = PF_DECnet,
97 .entry_size = sizeof(struct dn_neigh), 97 .entry_size = sizeof(struct dn_neigh),
98 .key_len = sizeof(__le16), 98 .key_len = sizeof(__le16),
99 .hash = dn_neigh_hash, 99 .hash = dn_neigh_hash,
100 .constructor = dn_neigh_construct, 100 .constructor = dn_neigh_construct,
101 .id = "dn_neigh_cache", 101 .id = "dn_neigh_cache",
102 .parms ={ 102 .parms ={
103 .tbl = &dn_neigh_table, 103 .tbl = &dn_neigh_table,
104 .base_reachable_time = 30 * HZ, 104 .base_reachable_time = 30 * HZ,
105 .retrans_time = 1 * HZ, 105 .retrans_time = 1 * HZ,
106 .gc_staletime = 60 * HZ, 106 .gc_staletime = 60 * HZ,
107 .reachable_time = 30 * HZ, 107 .reachable_time = 30 * HZ,
108 .delay_probe_time = 5 * HZ, 108 .delay_probe_time = 5 * HZ,
109 .queue_len = 3, 109 .queue_len = 3,
110 .ucast_probes = 0, 110 .ucast_probes = 0,
111 .app_probes = 0, 111 .app_probes = 0,
112 .mcast_probes = 0, 112 .mcast_probes = 0,
113 .anycast_delay = 0, 113 .anycast_delay = 0,
114 .proxy_delay = 0, 114 .proxy_delay = 0,
115 .proxy_qlen = 0, 115 .proxy_qlen = 0,
116 .locktime = 1 * HZ, 116 .locktime = 1 * HZ,
117 }, 117 },
118 .gc_interval = 30 * HZ, 118 .gc_interval = 30 * HZ,
119 .gc_thresh1 = 128, 119 .gc_thresh1 = 128,
120 .gc_thresh2 = 512, 120 .gc_thresh2 = 512,
121 .gc_thresh3 = 1024, 121 .gc_thresh3 = 1024,
122 }; 122 };
123 123
124 static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev) 124 static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev)
125 { 125 {
126 return jhash_2words(*(__u16 *)pkey, 0, dn_neigh_table.hash_rnd); 126 return jhash_2words(*(__u16 *)pkey, 0, dn_neigh_table.hash_rnd);
127 } 127 }
128 128
129 static int dn_neigh_construct(struct neighbour *neigh) 129 static int dn_neigh_construct(struct neighbour *neigh)
130 { 130 {
131 struct net_device *dev = neigh->dev; 131 struct net_device *dev = neigh->dev;
132 struct dn_neigh *dn = (struct dn_neigh *)neigh; 132 struct dn_neigh *dn = (struct dn_neigh *)neigh;
133 struct dn_dev *dn_db; 133 struct dn_dev *dn_db;
134 struct neigh_parms *parms; 134 struct neigh_parms *parms;
135 135
136 rcu_read_lock(); 136 rcu_read_lock();
137 dn_db = rcu_dereference(dev->dn_ptr); 137 dn_db = rcu_dereference(dev->dn_ptr);
138 if (dn_db == NULL) { 138 if (dn_db == NULL) {
139 rcu_read_unlock(); 139 rcu_read_unlock();
140 return -EINVAL; 140 return -EINVAL;
141 } 141 }
142 142
143 parms = dn_db->neigh_parms; 143 parms = dn_db->neigh_parms;
144 if (!parms) { 144 if (!parms) {
145 rcu_read_unlock(); 145 rcu_read_unlock();
146 return -EINVAL; 146 return -EINVAL;
147 } 147 }
148 148
149 __neigh_parms_put(neigh->parms); 149 __neigh_parms_put(neigh->parms);
150 neigh->parms = neigh_parms_clone(parms); 150 neigh->parms = neigh_parms_clone(parms);
151 151
152 if (dn_db->use_long) 152 if (dn_db->use_long)
153 neigh->ops = &dn_long_ops; 153 neigh->ops = &dn_long_ops;
154 else 154 else
155 neigh->ops = &dn_short_ops; 155 neigh->ops = &dn_short_ops;
156 rcu_read_unlock(); 156 rcu_read_unlock();
157 157
158 if (dn->flags & DN_NDFLAG_P3) 158 if (dn->flags & DN_NDFLAG_P3)
159 neigh->ops = &dn_phase3_ops; 159 neigh->ops = &dn_phase3_ops;
160 160
161 neigh->nud_state = NUD_NOARP; 161 neigh->nud_state = NUD_NOARP;
162 neigh->output = neigh->ops->connected_output; 162 neigh->output = neigh->ops->connected_output;
163 163
164 if ((dev->type == ARPHRD_IPGRE) || (dev->flags & IFF_POINTOPOINT)) 164 if ((dev->type == ARPHRD_IPGRE) || (dev->flags & IFF_POINTOPOINT))
165 memcpy(neigh->ha, dev->broadcast, dev->addr_len); 165 memcpy(neigh->ha, dev->broadcast, dev->addr_len);
166 else if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_LOOPBACK)) 166 else if ((dev->type == ARPHRD_ETHER) || (dev->type == ARPHRD_LOOPBACK))
167 dn_dn2eth(neigh->ha, dn->addr); 167 dn_dn2eth(neigh->ha, dn->addr);
168 else { 168 else {
169 if (net_ratelimit()) 169 if (net_ratelimit())
170 printk(KERN_DEBUG "Trying to create neigh for hw %d\n", dev->type); 170 printk(KERN_DEBUG "Trying to create neigh for hw %d\n", dev->type);
171 return -EINVAL; 171 return -EINVAL;
172 } 172 }
173 173
174 /* 174 /*
175 * Make an estimate of the remote block size by assuming that its 175 * Make an estimate of the remote block size by assuming that its
176 * two less then the device mtu, which it true for ethernet (and 176 * two less then the device mtu, which it true for ethernet (and
177 * other things which support long format headers) since there is 177 * other things which support long format headers) since there is
178 * an extra length field (of 16 bits) which isn't part of the 178 * an extra length field (of 16 bits) which isn't part of the
179 * ethernet headers and which the DECnet specs won't admit is part 179 * ethernet headers and which the DECnet specs won't admit is part
180 * of the DECnet routing headers either. 180 * of the DECnet routing headers either.
181 * 181 *
182 * If we over estimate here its no big deal, the NSP negotiations 182 * If we over estimate here its no big deal, the NSP negotiations
183 * will prevent us from sending packets which are too large for the 183 * will prevent us from sending packets which are too large for the
184 * remote node to handle. In any case this figure is normally updated 184 * remote node to handle. In any case this figure is normally updated
185 * by a hello message in most cases. 185 * by a hello message in most cases.
186 */ 186 */
187 dn->blksize = dev->mtu - 2; 187 dn->blksize = dev->mtu - 2;
188 188
189 return 0; 189 return 0;
190 } 190 }
191 191
192 static void dn_long_error_report(struct neighbour *neigh, struct sk_buff *skb) 192 static void dn_long_error_report(struct neighbour *neigh, struct sk_buff *skb)
193 { 193 {
194 printk(KERN_DEBUG "dn_long_error_report: called\n"); 194 printk(KERN_DEBUG "dn_long_error_report: called\n");
195 kfree_skb(skb); 195 kfree_skb(skb);
196 } 196 }
197 197
198 198
199 static void dn_short_error_report(struct neighbour *neigh, struct sk_buff *skb) 199 static void dn_short_error_report(struct neighbour *neigh, struct sk_buff *skb)
200 { 200 {
201 printk(KERN_DEBUG "dn_short_error_report: called\n"); 201 printk(KERN_DEBUG "dn_short_error_report: called\n");
202 kfree_skb(skb); 202 kfree_skb(skb);
203 } 203 }
204 204
205 static int dn_neigh_output_packet(struct sk_buff *skb) 205 static int dn_neigh_output_packet(struct sk_buff *skb)
206 { 206 {
207 struct dst_entry *dst = skb->dst; 207 struct dst_entry *dst = skb->dst;
208 struct dn_route *rt = (struct dn_route *)dst; 208 struct dn_route *rt = (struct dn_route *)dst;
209 struct neighbour *neigh = dst->neighbour; 209 struct neighbour *neigh = dst->neighbour;
210 struct net_device *dev = neigh->dev; 210 struct net_device *dev = neigh->dev;
211 char mac_addr[ETH_ALEN]; 211 char mac_addr[ETH_ALEN];
212 212
213 dn_dn2eth(mac_addr, rt->rt_local_src); 213 dn_dn2eth(mac_addr, rt->rt_local_src);
214 if (dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, 214 if (dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha,
215 mac_addr, skb->len) >= 0) 215 mac_addr, skb->len) >= 0)
216 return neigh->ops->queue_xmit(skb); 216 return neigh->ops->queue_xmit(skb);
217 217
218 if (net_ratelimit()) 218 if (net_ratelimit())
219 printk(KERN_DEBUG "dn_neigh_output_packet: oops, can't send packet\n"); 219 printk(KERN_DEBUG "dn_neigh_output_packet: oops, can't send packet\n");
220 220
221 kfree_skb(skb); 221 kfree_skb(skb);
222 return -EINVAL; 222 return -EINVAL;
223 } 223 }
224 224
225 static int dn_long_output(struct sk_buff *skb) 225 static int dn_long_output(struct sk_buff *skb)
226 { 226 {
227 struct dst_entry *dst = skb->dst; 227 struct dst_entry *dst = skb->dst;
228 struct neighbour *neigh = dst->neighbour; 228 struct neighbour *neigh = dst->neighbour;
229 struct net_device *dev = neigh->dev; 229 struct net_device *dev = neigh->dev;
230 int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3; 230 int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3;
231 unsigned char *data; 231 unsigned char *data;
232 struct dn_long_packet *lp; 232 struct dn_long_packet *lp;
233 struct dn_skb_cb *cb = DN_SKB_CB(skb); 233 struct dn_skb_cb *cb = DN_SKB_CB(skb);
234 234
235 235
236 if (skb_headroom(skb) < headroom) { 236 if (skb_headroom(skb) < headroom) {
237 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); 237 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
238 if (skb2 == NULL) { 238 if (skb2 == NULL) {
239 if (net_ratelimit()) 239 if (net_ratelimit())
240 printk(KERN_CRIT "dn_long_output: no memory\n"); 240 printk(KERN_CRIT "dn_long_output: no memory\n");
241 kfree_skb(skb); 241 kfree_skb(skb);
242 return -ENOBUFS; 242 return -ENOBUFS;
243 } 243 }
244 kfree_skb(skb); 244 kfree_skb(skb);
245 skb = skb2; 245 skb = skb2;
246 if (net_ratelimit()) 246 if (net_ratelimit())
247 printk(KERN_INFO "dn_long_output: Increasing headroom\n"); 247 printk(KERN_INFO "dn_long_output: Increasing headroom\n");
248 } 248 }
249 249
250 data = skb_push(skb, sizeof(struct dn_long_packet) + 3); 250 data = skb_push(skb, sizeof(struct dn_long_packet) + 3);
251 lp = (struct dn_long_packet *)(data+3); 251 lp = (struct dn_long_packet *)(data+3);
252 252
253 *((__le16 *)data) = dn_htons(skb->len - 2); 253 *((__le16 *)data) = cpu_to_le16(skb->len - 2);
254 *(data + 2) = 1 | DN_RT_F_PF; /* Padding */ 254 *(data + 2) = 1 | DN_RT_F_PF; /* Padding */
255 255
256 lp->msgflg = DN_RT_PKT_LONG|(cb->rt_flags&(DN_RT_F_IE|DN_RT_F_RQR|DN_RT_F_RTS)); 256 lp->msgflg = DN_RT_PKT_LONG|(cb->rt_flags&(DN_RT_F_IE|DN_RT_F_RQR|DN_RT_F_RTS));
257 lp->d_area = lp->d_subarea = 0; 257 lp->d_area = lp->d_subarea = 0;
258 dn_dn2eth(lp->d_id, cb->dst); 258 dn_dn2eth(lp->d_id, cb->dst);
259 lp->s_area = lp->s_subarea = 0; 259 lp->s_area = lp->s_subarea = 0;
260 dn_dn2eth(lp->s_id, cb->src); 260 dn_dn2eth(lp->s_id, cb->src);
261 lp->nl2 = 0; 261 lp->nl2 = 0;
262 lp->visit_ct = cb->hops & 0x3f; 262 lp->visit_ct = cb->hops & 0x3f;
263 lp->s_class = 0; 263 lp->s_class = 0;
264 lp->pt = 0; 264 lp->pt = 0;
265 265
266 skb_reset_network_header(skb); 266 skb_reset_network_header(skb);
267 267
268 return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet); 268 return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
269 } 269 }
270 270
271 static int dn_short_output(struct sk_buff *skb) 271 static int dn_short_output(struct sk_buff *skb)
272 { 272 {
273 struct dst_entry *dst = skb->dst; 273 struct dst_entry *dst = skb->dst;
274 struct neighbour *neigh = dst->neighbour; 274 struct neighbour *neigh = dst->neighbour;
275 struct net_device *dev = neigh->dev; 275 struct net_device *dev = neigh->dev;
276 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; 276 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
277 struct dn_short_packet *sp; 277 struct dn_short_packet *sp;
278 unsigned char *data; 278 unsigned char *data;
279 struct dn_skb_cb *cb = DN_SKB_CB(skb); 279 struct dn_skb_cb *cb = DN_SKB_CB(skb);
280 280
281 281
282 if (skb_headroom(skb) < headroom) { 282 if (skb_headroom(skb) < headroom) {
283 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); 283 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
284 if (skb2 == NULL) { 284 if (skb2 == NULL) {
285 if (net_ratelimit()) 285 if (net_ratelimit())
286 printk(KERN_CRIT "dn_short_output: no memory\n"); 286 printk(KERN_CRIT "dn_short_output: no memory\n");
287 kfree_skb(skb); 287 kfree_skb(skb);
288 return -ENOBUFS; 288 return -ENOBUFS;
289 } 289 }
290 kfree_skb(skb); 290 kfree_skb(skb);
291 skb = skb2; 291 skb = skb2;
292 if (net_ratelimit()) 292 if (net_ratelimit())
293 printk(KERN_INFO "dn_short_output: Increasing headroom\n"); 293 printk(KERN_INFO "dn_short_output: Increasing headroom\n");
294 } 294 }
295 295
296 data = skb_push(skb, sizeof(struct dn_short_packet) + 2); 296 data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
297 *((__le16 *)data) = dn_htons(skb->len - 2); 297 *((__le16 *)data) = cpu_to_le16(skb->len - 2);
298 sp = (struct dn_short_packet *)(data+2); 298 sp = (struct dn_short_packet *)(data+2);
299 299
300 sp->msgflg = DN_RT_PKT_SHORT|(cb->rt_flags&(DN_RT_F_RQR|DN_RT_F_RTS)); 300 sp->msgflg = DN_RT_PKT_SHORT|(cb->rt_flags&(DN_RT_F_RQR|DN_RT_F_RTS));
301 sp->dstnode = cb->dst; 301 sp->dstnode = cb->dst;
302 sp->srcnode = cb->src; 302 sp->srcnode = cb->src;
303 sp->forward = cb->hops & 0x3f; 303 sp->forward = cb->hops & 0x3f;
304 304
305 skb_reset_network_header(skb); 305 skb_reset_network_header(skb);
306 306
307 return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet); 307 return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
308 } 308 }
309 309
310 /* 310 /*
311 * Phase 3 output is the same is short output, execpt that 311 * Phase 3 output is the same is short output, execpt that
312 * it clears the area bits before transmission. 312 * it clears the area bits before transmission.
313 */ 313 */
314 static int dn_phase3_output(struct sk_buff *skb) 314 static int dn_phase3_output(struct sk_buff *skb)
315 { 315 {
316 struct dst_entry *dst = skb->dst; 316 struct dst_entry *dst = skb->dst;
317 struct neighbour *neigh = dst->neighbour; 317 struct neighbour *neigh = dst->neighbour;
318 struct net_device *dev = neigh->dev; 318 struct net_device *dev = neigh->dev;
319 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; 319 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
320 struct dn_short_packet *sp; 320 struct dn_short_packet *sp;
321 unsigned char *data; 321 unsigned char *data;
322 struct dn_skb_cb *cb = DN_SKB_CB(skb); 322 struct dn_skb_cb *cb = DN_SKB_CB(skb);
323 323
324 if (skb_headroom(skb) < headroom) { 324 if (skb_headroom(skb) < headroom) {
325 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom); 325 struct sk_buff *skb2 = skb_realloc_headroom(skb, headroom);
326 if (skb2 == NULL) { 326 if (skb2 == NULL) {
327 if (net_ratelimit()) 327 if (net_ratelimit())
328 printk(KERN_CRIT "dn_phase3_output: no memory\n"); 328 printk(KERN_CRIT "dn_phase3_output: no memory\n");
329 kfree_skb(skb); 329 kfree_skb(skb);
330 return -ENOBUFS; 330 return -ENOBUFS;
331 } 331 }
332 kfree_skb(skb); 332 kfree_skb(skb);
333 skb = skb2; 333 skb = skb2;
334 if (net_ratelimit()) 334 if (net_ratelimit())
335 printk(KERN_INFO "dn_phase3_output: Increasing headroom\n"); 335 printk(KERN_INFO "dn_phase3_output: Increasing headroom\n");
336 } 336 }
337 337
338 data = skb_push(skb, sizeof(struct dn_short_packet) + 2); 338 data = skb_push(skb, sizeof(struct dn_short_packet) + 2);
339 *((__le16 *)data) = dn_htons(skb->len - 2); 339 *((__le16 *)data) = cpu_to_le16(skb->len - 2);
340 sp = (struct dn_short_packet *)(data + 2); 340 sp = (struct dn_short_packet *)(data + 2);
341 341
342 sp->msgflg = DN_RT_PKT_SHORT|(cb->rt_flags&(DN_RT_F_RQR|DN_RT_F_RTS)); 342 sp->msgflg = DN_RT_PKT_SHORT|(cb->rt_flags&(DN_RT_F_RQR|DN_RT_F_RTS));
343 sp->dstnode = cb->dst & dn_htons(0x03ff); 343 sp->dstnode = cb->dst & cpu_to_le16(0x03ff);
344 sp->srcnode = cb->src & dn_htons(0x03ff); 344 sp->srcnode = cb->src & cpu_to_le16(0x03ff);
345 sp->forward = cb->hops & 0x3f; 345 sp->forward = cb->hops & 0x3f;
346 346
347 skb_reset_network_header(skb); 347 skb_reset_network_header(skb);
348 348
349 return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet); 349 return NF_HOOK(PF_DECnet, NF_DN_POST_ROUTING, skb, NULL, neigh->dev, dn_neigh_output_packet);
350 } 350 }
351 351
352 /* 352 /*
353 * Unfortunately, the neighbour code uses the device in its hash 353 * Unfortunately, the neighbour code uses the device in its hash
354 * function, so we don't get any advantage from it. This function 354 * function, so we don't get any advantage from it. This function
355 * basically does a neigh_lookup(), but without comparing the device 355 * basically does a neigh_lookup(), but without comparing the device
356 * field. This is required for the On-Ethernet cache 356 * field. This is required for the On-Ethernet cache
357 */ 357 */
358 358
359 /* 359 /*
360 * Pointopoint link receives a hello message 360 * Pointopoint link receives a hello message
361 */ 361 */
362 void dn_neigh_pointopoint_hello(struct sk_buff *skb) 362 void dn_neigh_pointopoint_hello(struct sk_buff *skb)
363 { 363 {
364 kfree_skb(skb); 364 kfree_skb(skb);
365 } 365 }
366 366
367 /* 367 /*
368 * Ethernet router hello message received 368 * Ethernet router hello message received
369 */ 369 */
370 int dn_neigh_router_hello(struct sk_buff *skb) 370 int dn_neigh_router_hello(struct sk_buff *skb)
371 { 371 {
372 struct rtnode_hello_message *msg = (struct rtnode_hello_message *)skb->data; 372 struct rtnode_hello_message *msg = (struct rtnode_hello_message *)skb->data;
373 373
374 struct neighbour *neigh; 374 struct neighbour *neigh;
375 struct dn_neigh *dn; 375 struct dn_neigh *dn;
376 struct dn_dev *dn_db; 376 struct dn_dev *dn_db;
377 __le16 src; 377 __le16 src;
378 378
379 src = dn_eth2dn(msg->id); 379 src = dn_eth2dn(msg->id);
380 380
381 neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1); 381 neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1);
382 382
383 dn = (struct dn_neigh *)neigh; 383 dn = (struct dn_neigh *)neigh;
384 384
385 if (neigh) { 385 if (neigh) {
386 write_lock(&neigh->lock); 386 write_lock(&neigh->lock);
387 387
388 neigh->used = jiffies; 388 neigh->used = jiffies;
389 dn_db = (struct dn_dev *)neigh->dev->dn_ptr; 389 dn_db = (struct dn_dev *)neigh->dev->dn_ptr;
390 390
391 if (!(neigh->nud_state & NUD_PERMANENT)) { 391 if (!(neigh->nud_state & NUD_PERMANENT)) {
392 neigh->updated = jiffies; 392 neigh->updated = jiffies;
393 393
394 if (neigh->dev->type == ARPHRD_ETHER) 394 if (neigh->dev->type == ARPHRD_ETHER)
395 memcpy(neigh->ha, &eth_hdr(skb)->h_source, ETH_ALEN); 395 memcpy(neigh->ha, &eth_hdr(skb)->h_source, ETH_ALEN);
396 396
397 dn->blksize = dn_ntohs(msg->blksize); 397 dn->blksize = le16_to_cpu(msg->blksize);
398 dn->priority = msg->priority; 398 dn->priority = msg->priority;
399 399
400 dn->flags &= ~DN_NDFLAG_P3; 400 dn->flags &= ~DN_NDFLAG_P3;
401 401
402 switch(msg->iinfo & DN_RT_INFO_TYPE) { 402 switch(msg->iinfo & DN_RT_INFO_TYPE) {
403 case DN_RT_INFO_L1RT: 403 case DN_RT_INFO_L1RT:
404 dn->flags &=~DN_NDFLAG_R2; 404 dn->flags &=~DN_NDFLAG_R2;
405 dn->flags |= DN_NDFLAG_R1; 405 dn->flags |= DN_NDFLAG_R1;
406 break; 406 break;
407 case DN_RT_INFO_L2RT: 407 case DN_RT_INFO_L2RT:
408 dn->flags |= DN_NDFLAG_R2; 408 dn->flags |= DN_NDFLAG_R2;
409 } 409 }
410 } 410 }
411 411
412 /* Only use routers in our area */ 412 /* Only use routers in our area */
413 if ((dn_ntohs(src)>>10) == (dn_ntohs((decnet_address))>>10)) { 413 if ((le16_to_cpu(src)>>10) == (le16_to_cpu((decnet_address))>>10)) {
414 if (!dn_db->router) { 414 if (!dn_db->router) {
415 dn_db->router = neigh_clone(neigh); 415 dn_db->router = neigh_clone(neigh);
416 } else { 416 } else {
417 if (msg->priority > ((struct dn_neigh *)dn_db->router)->priority) 417 if (msg->priority > ((struct dn_neigh *)dn_db->router)->priority)
418 neigh_release(xchg(&dn_db->router, neigh_clone(neigh))); 418 neigh_release(xchg(&dn_db->router, neigh_clone(neigh)));
419 } 419 }
420 } 420 }
421 write_unlock(&neigh->lock); 421 write_unlock(&neigh->lock);
422 neigh_release(neigh); 422 neigh_release(neigh);
423 } 423 }
424 424
425 kfree_skb(skb); 425 kfree_skb(skb);
426 return 0; 426 return 0;
427 } 427 }
428 428
429 /* 429 /*
430 * Endnode hello message received 430 * Endnode hello message received
431 */ 431 */
432 int dn_neigh_endnode_hello(struct sk_buff *skb) 432 int dn_neigh_endnode_hello(struct sk_buff *skb)
433 { 433 {
434 struct endnode_hello_message *msg = (struct endnode_hello_message *)skb->data; 434 struct endnode_hello_message *msg = (struct endnode_hello_message *)skb->data;
435 struct neighbour *neigh; 435 struct neighbour *neigh;
436 struct dn_neigh *dn; 436 struct dn_neigh *dn;
437 __le16 src; 437 __le16 src;
438 438
439 src = dn_eth2dn(msg->id); 439 src = dn_eth2dn(msg->id);
440 440
441 neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1); 441 neigh = __neigh_lookup(&dn_neigh_table, &src, skb->dev, 1);
442 442
443 dn = (struct dn_neigh *)neigh; 443 dn = (struct dn_neigh *)neigh;
444 444
445 if (neigh) { 445 if (neigh) {
446 write_lock(&neigh->lock); 446 write_lock(&neigh->lock);
447 447
448 neigh->used = jiffies; 448 neigh->used = jiffies;
449 449
450 if (!(neigh->nud_state & NUD_PERMANENT)) { 450 if (!(neigh->nud_state & NUD_PERMANENT)) {
451 neigh->updated = jiffies; 451 neigh->updated = jiffies;
452 452
453 if (neigh->dev->type == ARPHRD_ETHER) 453 if (neigh->dev->type == ARPHRD_ETHER)
454 memcpy(neigh->ha, &eth_hdr(skb)->h_source, ETH_ALEN); 454 memcpy(neigh->ha, &eth_hdr(skb)->h_source, ETH_ALEN);
455 dn->flags &= ~(DN_NDFLAG_R1 | DN_NDFLAG_R2); 455 dn->flags &= ~(DN_NDFLAG_R1 | DN_NDFLAG_R2);
456 dn->blksize = dn_ntohs(msg->blksize); 456 dn->blksize = le16_to_cpu(msg->blksize);
457 dn->priority = 0; 457 dn->priority = 0;
458 } 458 }
459 459
460 write_unlock(&neigh->lock); 460 write_unlock(&neigh->lock);
461 neigh_release(neigh); 461 neigh_release(neigh);
462 } 462 }
463 463
464 kfree_skb(skb); 464 kfree_skb(skb);
465 return 0; 465 return 0;
466 } 466 }
467 467
468 static char *dn_find_slot(char *base, int max, int priority) 468 static char *dn_find_slot(char *base, int max, int priority)
469 { 469 {
470 int i; 470 int i;
471 unsigned char *min = NULL; 471 unsigned char *min = NULL;
472 472
473 base += 6; /* skip first id */ 473 base += 6; /* skip first id */
474 474
475 for(i = 0; i < max; i++) { 475 for(i = 0; i < max; i++) {
476 if (!min || (*base < *min)) 476 if (!min || (*base < *min))
477 min = base; 477 min = base;
478 base += 7; /* find next priority */ 478 base += 7; /* find next priority */
479 } 479 }
480 480
481 if (!min) 481 if (!min)
482 return NULL; 482 return NULL;
483 483
484 return (*min < priority) ? (min - 6) : NULL; 484 return (*min < priority) ? (min - 6) : NULL;
485 } 485 }
486 486
487 struct elist_cb_state { 487 struct elist_cb_state {
488 struct net_device *dev; 488 struct net_device *dev;
489 unsigned char *ptr; 489 unsigned char *ptr;
490 unsigned char *rs; 490 unsigned char *rs;
491 int t, n; 491 int t, n;
492 }; 492 };
493 493
494 static void neigh_elist_cb(struct neighbour *neigh, void *_info) 494 static void neigh_elist_cb(struct neighbour *neigh, void *_info)
495 { 495 {
496 struct elist_cb_state *s = _info; 496 struct elist_cb_state *s = _info;
497 struct dn_neigh *dn; 497 struct dn_neigh *dn;
498 498
499 if (neigh->dev != s->dev) 499 if (neigh->dev != s->dev)
500 return; 500 return;
501 501
502 dn = (struct dn_neigh *) neigh; 502 dn = (struct dn_neigh *) neigh;
503 if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2))) 503 if (!(dn->flags & (DN_NDFLAG_R1|DN_NDFLAG_R2)))
504 return; 504 return;
505 505
506 if (s->t == s->n) 506 if (s->t == s->n)
507 s->rs = dn_find_slot(s->ptr, s->n, dn->priority); 507 s->rs = dn_find_slot(s->ptr, s->n, dn->priority);
508 else 508 else
509 s->t++; 509 s->t++;
510 if (s->rs == NULL) 510 if (s->rs == NULL)
511 return; 511 return;
512 512
513 dn_dn2eth(s->rs, dn->addr); 513 dn_dn2eth(s->rs, dn->addr);
514 s->rs += 6; 514 s->rs += 6;
515 *(s->rs) = neigh->nud_state & NUD_CONNECTED ? 0x80 : 0x0; 515 *(s->rs) = neigh->nud_state & NUD_CONNECTED ? 0x80 : 0x0;
516 *(s->rs) |= dn->priority; 516 *(s->rs) |= dn->priority;
517 s->rs++; 517 s->rs++;
518 } 518 }
519 519
520 int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n) 520 int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n)
521 { 521 {
522 struct elist_cb_state state; 522 struct elist_cb_state state;
523 523
524 state.dev = dev; 524 state.dev = dev;
525 state.t = 0; 525 state.t = 0;
526 state.n = n; 526 state.n = n;
527 state.ptr = ptr; 527 state.ptr = ptr;
528 state.rs = ptr; 528 state.rs = ptr;
529 529
530 neigh_for_each(&dn_neigh_table, neigh_elist_cb, &state); 530 neigh_for_each(&dn_neigh_table, neigh_elist_cb, &state);
531 531
532 return state.t; 532 return state.t;
533 } 533 }
534 534
535 535
536 #ifdef CONFIG_PROC_FS 536 #ifdef CONFIG_PROC_FS
537 537
538 static inline void dn_neigh_format_entry(struct seq_file *seq, 538 static inline void dn_neigh_format_entry(struct seq_file *seq,
539 struct neighbour *n) 539 struct neighbour *n)
540 { 540 {
541 struct dn_neigh *dn = (struct dn_neigh *) n; 541 struct dn_neigh *dn = (struct dn_neigh *) n;
542 char buf[DN_ASCBUF_LEN]; 542 char buf[DN_ASCBUF_LEN];
543 543
544 read_lock(&n->lock); 544 read_lock(&n->lock);
545 seq_printf(seq, "%-7s %s%s%s %02x %02d %07ld %-8s\n", 545 seq_printf(seq, "%-7s %s%s%s %02x %02d %07ld %-8s\n",
546 dn_addr2asc(dn_ntohs(dn->addr), buf), 546 dn_addr2asc(le16_to_cpu(dn->addr), buf),
547 (dn->flags&DN_NDFLAG_R1) ? "1" : "-", 547 (dn->flags&DN_NDFLAG_R1) ? "1" : "-",
548 (dn->flags&DN_NDFLAG_R2) ? "2" : "-", 548 (dn->flags&DN_NDFLAG_R2) ? "2" : "-",
549 (dn->flags&DN_NDFLAG_P3) ? "3" : "-", 549 (dn->flags&DN_NDFLAG_P3) ? "3" : "-",
550 dn->n.nud_state, 550 dn->n.nud_state,
551 atomic_read(&dn->n.refcnt), 551 atomic_read(&dn->n.refcnt),
552 dn->blksize, 552 dn->blksize,
553 (dn->n.dev) ? dn->n.dev->name : "?"); 553 (dn->n.dev) ? dn->n.dev->name : "?");
554 read_unlock(&n->lock); 554 read_unlock(&n->lock);
555 } 555 }
556 556
557 static int dn_neigh_seq_show(struct seq_file *seq, void *v) 557 static int dn_neigh_seq_show(struct seq_file *seq, void *v)
558 { 558 {
559 if (v == SEQ_START_TOKEN) { 559 if (v == SEQ_START_TOKEN) {
560 seq_puts(seq, "Addr Flags State Use Blksize Dev\n"); 560 seq_puts(seq, "Addr Flags State Use Blksize Dev\n");
561 } else { 561 } else {
562 dn_neigh_format_entry(seq, v); 562 dn_neigh_format_entry(seq, v);
563 } 563 }
564 564
565 return 0; 565 return 0;
566 } 566 }
567 567
568 static void *dn_neigh_seq_start(struct seq_file *seq, loff_t *pos) 568 static void *dn_neigh_seq_start(struct seq_file *seq, loff_t *pos)
569 { 569 {
570 return neigh_seq_start(seq, pos, &dn_neigh_table, 570 return neigh_seq_start(seq, pos, &dn_neigh_table,
571 NEIGH_SEQ_NEIGH_ONLY); 571 NEIGH_SEQ_NEIGH_ONLY);
572 } 572 }
573 573
574 static const struct seq_operations dn_neigh_seq_ops = { 574 static const struct seq_operations dn_neigh_seq_ops = {
575 .start = dn_neigh_seq_start, 575 .start = dn_neigh_seq_start,
576 .next = neigh_seq_next, 576 .next = neigh_seq_next,
577 .stop = neigh_seq_stop, 577 .stop = neigh_seq_stop,
578 .show = dn_neigh_seq_show, 578 .show = dn_neigh_seq_show,
579 }; 579 };
580 580
581 static int dn_neigh_seq_open(struct inode *inode, struct file *file) 581 static int dn_neigh_seq_open(struct inode *inode, struct file *file)
582 { 582 {
583 return seq_open_net(inode, file, &dn_neigh_seq_ops, 583 return seq_open_net(inode, file, &dn_neigh_seq_ops,
584 sizeof(struct neigh_seq_state)); 584 sizeof(struct neigh_seq_state));
585 } 585 }
586 586
587 static const struct file_operations dn_neigh_seq_fops = { 587 static const struct file_operations dn_neigh_seq_fops = {
588 .owner = THIS_MODULE, 588 .owner = THIS_MODULE,
589 .open = dn_neigh_seq_open, 589 .open = dn_neigh_seq_open,
590 .read = seq_read, 590 .read = seq_read,
591 .llseek = seq_lseek, 591 .llseek = seq_lseek,
592 .release = seq_release_net, 592 .release = seq_release_net,
593 }; 593 };
594 594
595 #endif 595 #endif
596 596
597 void __init dn_neigh_init(void) 597 void __init dn_neigh_init(void)
598 { 598 {
599 neigh_table_init(&dn_neigh_table); 599 neigh_table_init(&dn_neigh_table);
600 proc_net_fops_create(&init_net, "decnet_neigh", S_IRUGO, &dn_neigh_seq_fops); 600 proc_net_fops_create(&init_net, "decnet_neigh", S_IRUGO, &dn_neigh_seq_fops);
601 } 601 }
602 602
603 void __exit dn_neigh_cleanup(void) 603 void __exit dn_neigh_cleanup(void)
604 { 604 {
605 proc_net_remove(&init_net, "decnet_neigh"); 605 proc_net_remove(&init_net, "decnet_neigh");
606 neigh_table_clear(&dn_neigh_table); 606 neigh_table_clear(&dn_neigh_table);
607 } 607 }
608 608
net/decnet/dn_nsp_in.c
1 /* 1 /*
2 * DECnet An implementation of the DECnet protocol suite for the LINUX 2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket 3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * DECnet Network Services Protocol (Input) 6 * DECnet Network Services Protocol (Input)
7 * 7 *
8 * Author: Eduardo Marcelo Serrat <emserrat@geocities.com> 8 * Author: Eduardo Marcelo Serrat <emserrat@geocities.com>
9 * 9 *
10 * Changes: 10 * Changes:
11 * 11 *
12 * Steve Whitehouse: Split into dn_nsp_in.c and dn_nsp_out.c from 12 * Steve Whitehouse: Split into dn_nsp_in.c and dn_nsp_out.c from
13 * original dn_nsp.c. 13 * original dn_nsp.c.
14 * Steve Whitehouse: Updated to work with my new routing architecture. 14 * Steve Whitehouse: Updated to work with my new routing architecture.
15 * Steve Whitehouse: Add changes from Eduardo Serrat's patches. 15 * Steve Whitehouse: Add changes from Eduardo Serrat's patches.
16 * Steve Whitehouse: Put all ack handling code in a common routine. 16 * Steve Whitehouse: Put all ack handling code in a common routine.
17 * Steve Whitehouse: Put other common bits into dn_nsp_rx() 17 * Steve Whitehouse: Put other common bits into dn_nsp_rx()
18 * Steve Whitehouse: More checks on skb->len to catch bogus packets 18 * Steve Whitehouse: More checks on skb->len to catch bogus packets
19 * Fixed various race conditions and possible nasties. 19 * Fixed various race conditions and possible nasties.
20 * Steve Whitehouse: Now handles returned conninit frames. 20 * Steve Whitehouse: Now handles returned conninit frames.
21 * David S. Miller: New socket locking 21 * David S. Miller: New socket locking
22 * Steve Whitehouse: Fixed lockup when socket filtering was enabled. 22 * Steve Whitehouse: Fixed lockup when socket filtering was enabled.
23 * Paul Koning: Fix to push CC sockets into RUN when acks are 23 * Paul Koning: Fix to push CC sockets into RUN when acks are
24 * received. 24 * received.
25 * Steve Whitehouse: 25 * Steve Whitehouse:
26 * Patrick Caulfield: Checking conninits for correctness & sending of error 26 * Patrick Caulfield: Checking conninits for correctness & sending of error
27 * responses. 27 * responses.
28 * Steve Whitehouse: Added backlog congestion level return codes. 28 * Steve Whitehouse: Added backlog congestion level return codes.
29 * Patrick Caulfield: 29 * Patrick Caulfield:
30 * Steve Whitehouse: Added flow control support (outbound) 30 * Steve Whitehouse: Added flow control support (outbound)
31 * Steve Whitehouse: Prepare for nonlinear skbs 31 * Steve Whitehouse: Prepare for nonlinear skbs
32 */ 32 */
33 33
34 /****************************************************************************** 34 /******************************************************************************
35 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 35 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
36 36
37 This program is free software; you can redistribute it and/or modify 37 This program is free software; you can redistribute it and/or modify
38 it under the terms of the GNU General Public License as published by 38 it under the terms of the GNU General Public License as published by
39 the Free Software Foundation; either version 2 of the License, or 39 the Free Software Foundation; either version 2 of the License, or
40 any later version. 40 any later version.
41 41
42 This program is distributed in the hope that it will be useful, 42 This program is distributed in the hope that it will be useful,
43 but WITHOUT ANY WARRANTY; without even the implied warranty of 43 but WITHOUT ANY WARRANTY; without even the implied warranty of
44 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 44 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
45 GNU General Public License for more details. 45 GNU General Public License for more details.
46 *******************************************************************************/ 46 *******************************************************************************/
47 47
48 #include <linux/errno.h> 48 #include <linux/errno.h>
49 #include <linux/types.h> 49 #include <linux/types.h>
50 #include <linux/socket.h> 50 #include <linux/socket.h>
51 #include <linux/in.h> 51 #include <linux/in.h>
52 #include <linux/kernel.h> 52 #include <linux/kernel.h>
53 #include <linux/timer.h> 53 #include <linux/timer.h>
54 #include <linux/string.h> 54 #include <linux/string.h>
55 #include <linux/sockios.h> 55 #include <linux/sockios.h>
56 #include <linux/net.h> 56 #include <linux/net.h>
57 #include <linux/netdevice.h> 57 #include <linux/netdevice.h>
58 #include <linux/inet.h> 58 #include <linux/inet.h>
59 #include <linux/route.h> 59 #include <linux/route.h>
60 #include <net/sock.h> 60 #include <net/sock.h>
61 #include <net/tcp_states.h> 61 #include <net/tcp_states.h>
62 #include <asm/system.h> 62 #include <asm/system.h>
63 #include <linux/fcntl.h> 63 #include <linux/fcntl.h>
64 #include <linux/mm.h> 64 #include <linux/mm.h>
65 #include <linux/termios.h> 65 #include <linux/termios.h>
66 #include <linux/interrupt.h> 66 #include <linux/interrupt.h>
67 #include <linux/proc_fs.h> 67 #include <linux/proc_fs.h>
68 #include <linux/stat.h> 68 #include <linux/stat.h>
69 #include <linux/init.h> 69 #include <linux/init.h>
70 #include <linux/poll.h> 70 #include <linux/poll.h>
71 #include <linux/netfilter_decnet.h> 71 #include <linux/netfilter_decnet.h>
72 #include <net/neighbour.h> 72 #include <net/neighbour.h>
73 #include <net/dst.h> 73 #include <net/dst.h>
74 #include <net/dn.h> 74 #include <net/dn.h>
75 #include <net/dn_nsp.h> 75 #include <net/dn_nsp.h>
76 #include <net/dn_dev.h> 76 #include <net/dn_dev.h>
77 #include <net/dn_route.h> 77 #include <net/dn_route.h>
78 78
79 extern int decnet_log_martians; 79 extern int decnet_log_martians;
80 80
81 static void dn_log_martian(struct sk_buff *skb, const char *msg) 81 static void dn_log_martian(struct sk_buff *skb, const char *msg)
82 { 82 {
83 if (decnet_log_martians && net_ratelimit()) { 83 if (decnet_log_martians && net_ratelimit()) {
84 char *devname = skb->dev ? skb->dev->name : "???"; 84 char *devname = skb->dev ? skb->dev->name : "???";
85 struct dn_skb_cb *cb = DN_SKB_CB(skb); 85 struct dn_skb_cb *cb = DN_SKB_CB(skb);
86 printk(KERN_INFO "DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n", msg, devname, dn_ntohs(cb->src), dn_ntohs(cb->dst), dn_ntohs(cb->src_port), dn_ntohs(cb->dst_port)); 86 printk(KERN_INFO "DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n",
87 msg, devname, le16_to_cpu(cb->src), le16_to_cpu(cb->dst),
88 le16_to_cpu(cb->src_port), le16_to_cpu(cb->dst_port));
87 } 89 }
88 } 90 }
89 91
90 /* 92 /*
91 * For this function we've flipped the cross-subchannel bit 93 * For this function we've flipped the cross-subchannel bit
92 * if the message is an otherdata or linkservice message. Thus 94 * if the message is an otherdata or linkservice message. Thus
93 * we can use it to work out what to update. 95 * we can use it to work out what to update.
94 */ 96 */
95 static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack) 97 static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack)
96 { 98 {
97 struct dn_scp *scp = DN_SK(sk); 99 struct dn_scp *scp = DN_SK(sk);
98 unsigned short type = ((ack >> 12) & 0x0003); 100 unsigned short type = ((ack >> 12) & 0x0003);
99 int wakeup = 0; 101 int wakeup = 0;
100 102
101 switch(type) { 103 switch(type) {
102 case 0: /* ACK - Data */ 104 case 0: /* ACK - Data */
103 if (dn_after(ack, scp->ackrcv_dat)) { 105 if (dn_after(ack, scp->ackrcv_dat)) {
104 scp->ackrcv_dat = ack & 0x0fff; 106 scp->ackrcv_dat = ack & 0x0fff;
105 wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->data_xmit_queue, ack); 107 wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->data_xmit_queue, ack);
106 } 108 }
107 break; 109 break;
108 case 1: /* NAK - Data */ 110 case 1: /* NAK - Data */
109 break; 111 break;
110 case 2: /* ACK - OtherData */ 112 case 2: /* ACK - OtherData */
111 if (dn_after(ack, scp->ackrcv_oth)) { 113 if (dn_after(ack, scp->ackrcv_oth)) {
112 scp->ackrcv_oth = ack & 0x0fff; 114 scp->ackrcv_oth = ack & 0x0fff;
113 wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->other_xmit_queue, ack); 115 wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->other_xmit_queue, ack);
114 } 116 }
115 break; 117 break;
116 case 3: /* NAK - OtherData */ 118 case 3: /* NAK - OtherData */
117 break; 119 break;
118 } 120 }
119 121
120 if (wakeup && !sock_flag(sk, SOCK_DEAD)) 122 if (wakeup && !sock_flag(sk, SOCK_DEAD))
121 sk->sk_state_change(sk); 123 sk->sk_state_change(sk);
122 } 124 }
123 125
124 /* 126 /*
125 * This function is a universal ack processor. 127 * This function is a universal ack processor.
126 */ 128 */
127 static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth) 129 static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth)
128 { 130 {
129 __le16 *ptr = (__le16 *)skb->data; 131 __le16 *ptr = (__le16 *)skb->data;
130 int len = 0; 132 int len = 0;
131 unsigned short ack; 133 unsigned short ack;
132 134
133 if (skb->len < 2) 135 if (skb->len < 2)
134 return len; 136 return len;
135 137
136 if ((ack = dn_ntohs(*ptr)) & 0x8000) { 138 if ((ack = le16_to_cpu(*ptr)) & 0x8000) {
137 skb_pull(skb, 2); 139 skb_pull(skb, 2);
138 ptr++; 140 ptr++;
139 len += 2; 141 len += 2;
140 if ((ack & 0x4000) == 0) { 142 if ((ack & 0x4000) == 0) {
141 if (oth) 143 if (oth)
142 ack ^= 0x2000; 144 ack ^= 0x2000;
143 dn_ack(sk, skb, ack); 145 dn_ack(sk, skb, ack);
144 } 146 }
145 } 147 }
146 148
147 if (skb->len < 2) 149 if (skb->len < 2)
148 return len; 150 return len;
149 151
150 if ((ack = dn_ntohs(*ptr)) & 0x8000) { 152 if ((ack = le16_to_cpu(*ptr)) & 0x8000) {
151 skb_pull(skb, 2); 153 skb_pull(skb, 2);
152 len += 2; 154 len += 2;
153 if ((ack & 0x4000) == 0) { 155 if ((ack & 0x4000) == 0) {
154 if (oth) 156 if (oth)
155 ack ^= 0x2000; 157 ack ^= 0x2000;
156 dn_ack(sk, skb, ack); 158 dn_ack(sk, skb, ack);
157 } 159 }
158 } 160 }
159 161
160 return len; 162 return len;
161 } 163 }
162 164
163 165
164 /** 166 /**
165 * dn_check_idf - Check an image data field format is correct. 167 * dn_check_idf - Check an image data field format is correct.
166 * @pptr: Pointer to pointer to image data 168 * @pptr: Pointer to pointer to image data
167 * @len: Pointer to length of image data 169 * @len: Pointer to length of image data
168 * @max: The maximum allowed length of the data in the image data field 170 * @max: The maximum allowed length of the data in the image data field
169 * @follow_on: Check that this many bytes exist beyond the end of the image data 171 * @follow_on: Check that this many bytes exist beyond the end of the image data
170 * 172 *
171 * Returns: 0 if ok, -1 on error 173 * Returns: 0 if ok, -1 on error
172 */ 174 */
173 static inline int dn_check_idf(unsigned char **pptr, int *len, unsigned char max, unsigned char follow_on) 175 static inline int dn_check_idf(unsigned char **pptr, int *len, unsigned char max, unsigned char follow_on)
174 { 176 {
175 unsigned char *ptr = *pptr; 177 unsigned char *ptr = *pptr;
176 unsigned char flen = *ptr++; 178 unsigned char flen = *ptr++;
177 179
178 (*len)--; 180 (*len)--;
179 if (flen > max) 181 if (flen > max)
180 return -1; 182 return -1;
181 if ((flen + follow_on) > *len) 183 if ((flen + follow_on) > *len)
182 return -1; 184 return -1;
183 185
184 *len -= flen; 186 *len -= flen;
185 *pptr = ptr + flen; 187 *pptr = ptr + flen;
186 return 0; 188 return 0;
187 } 189 }
188 190
189 /* 191 /*
190 * Table of reason codes to pass back to node which sent us a badly 192 * Table of reason codes to pass back to node which sent us a badly
191 * formed message, plus text messages for the log. A zero entry in 193 * formed message, plus text messages for the log. A zero entry in
192 * the reason field means "don't reply" otherwise a disc init is sent with 194 * the reason field means "don't reply" otherwise a disc init is sent with
193 * the specified reason code. 195 * the specified reason code.
194 */ 196 */
195 static struct { 197 static struct {
196 unsigned short reason; 198 unsigned short reason;
197 const char *text; 199 const char *text;
198 } ci_err_table[] = { 200 } ci_err_table[] = {
199 { 0, "CI: Truncated message" }, 201 { 0, "CI: Truncated message" },
200 { NSP_REASON_ID, "CI: Destination username error" }, 202 { NSP_REASON_ID, "CI: Destination username error" },
201 { NSP_REASON_ID, "CI: Destination username type" }, 203 { NSP_REASON_ID, "CI: Destination username type" },
202 { NSP_REASON_US, "CI: Source username error" }, 204 { NSP_REASON_US, "CI: Source username error" },
203 { 0, "CI: Truncated at menuver" }, 205 { 0, "CI: Truncated at menuver" },
204 { 0, "CI: Truncated before access or user data" }, 206 { 0, "CI: Truncated before access or user data" },
205 { NSP_REASON_IO, "CI: Access data format error" }, 207 { NSP_REASON_IO, "CI: Access data format error" },
206 { NSP_REASON_IO, "CI: User data format error" } 208 { NSP_REASON_IO, "CI: User data format error" }
207 }; 209 };
208 210
209 /* 211 /*
210 * This function uses a slightly different lookup method 212 * This function uses a slightly different lookup method
211 * to find its sockets, since it searches on object name/number 213 * to find its sockets, since it searches on object name/number
212 * rather than port numbers. Various tests are done to ensure that 214 * rather than port numbers. Various tests are done to ensure that
213 * the incoming data is in the correct format before it is queued to 215 * the incoming data is in the correct format before it is queued to
214 * a socket. 216 * a socket.
215 */ 217 */
216 static struct sock *dn_find_listener(struct sk_buff *skb, unsigned short *reason) 218 static struct sock *dn_find_listener(struct sk_buff *skb, unsigned short *reason)
217 { 219 {
218 struct dn_skb_cb *cb = DN_SKB_CB(skb); 220 struct dn_skb_cb *cb = DN_SKB_CB(skb);
219 struct nsp_conn_init_msg *msg = (struct nsp_conn_init_msg *)skb->data; 221 struct nsp_conn_init_msg *msg = (struct nsp_conn_init_msg *)skb->data;
220 struct sockaddr_dn dstaddr; 222 struct sockaddr_dn dstaddr;
221 struct sockaddr_dn srcaddr; 223 struct sockaddr_dn srcaddr;
222 unsigned char type = 0; 224 unsigned char type = 0;
223 int dstlen; 225 int dstlen;
224 int srclen; 226 int srclen;
225 unsigned char *ptr; 227 unsigned char *ptr;
226 int len; 228 int len;
227 int err = 0; 229 int err = 0;
228 unsigned char menuver; 230 unsigned char menuver;
229 231
230 memset(&dstaddr, 0, sizeof(struct sockaddr_dn)); 232 memset(&dstaddr, 0, sizeof(struct sockaddr_dn));
231 memset(&srcaddr, 0, sizeof(struct sockaddr_dn)); 233 memset(&srcaddr, 0, sizeof(struct sockaddr_dn));
232 234
233 /* 235 /*
234 * 1. Decode & remove message header 236 * 1. Decode & remove message header
235 */ 237 */
236 cb->src_port = msg->srcaddr; 238 cb->src_port = msg->srcaddr;
237 cb->dst_port = msg->dstaddr; 239 cb->dst_port = msg->dstaddr;
238 cb->services = msg->services; 240 cb->services = msg->services;
239 cb->info = msg->info; 241 cb->info = msg->info;
240 cb->segsize = dn_ntohs(msg->segsize); 242 cb->segsize = le16_to_cpu(msg->segsize);
241 243
242 if (!pskb_may_pull(skb, sizeof(*msg))) 244 if (!pskb_may_pull(skb, sizeof(*msg)))
243 goto err_out; 245 goto err_out;
244 246
245 skb_pull(skb, sizeof(*msg)); 247 skb_pull(skb, sizeof(*msg));
246 248
247 len = skb->len; 249 len = skb->len;
248 ptr = skb->data; 250 ptr = skb->data;
249 251
250 /* 252 /*
251 * 2. Check destination end username format 253 * 2. Check destination end username format
252 */ 254 */
253 dstlen = dn_username2sockaddr(ptr, len, &dstaddr, &type); 255 dstlen = dn_username2sockaddr(ptr, len, &dstaddr, &type);
254 err++; 256 err++;
255 if (dstlen < 0) 257 if (dstlen < 0)
256 goto err_out; 258 goto err_out;
257 259
258 err++; 260 err++;
259 if (type > 1) 261 if (type > 1)
260 goto err_out; 262 goto err_out;
261 263
262 len -= dstlen; 264 len -= dstlen;
263 ptr += dstlen; 265 ptr += dstlen;
264 266
265 /* 267 /*
266 * 3. Check source end username format 268 * 3. Check source end username format
267 */ 269 */
268 srclen = dn_username2sockaddr(ptr, len, &srcaddr, &type); 270 srclen = dn_username2sockaddr(ptr, len, &srcaddr, &type);
269 err++; 271 err++;
270 if (srclen < 0) 272 if (srclen < 0)
271 goto err_out; 273 goto err_out;
272 274
273 len -= srclen; 275 len -= srclen;
274 ptr += srclen; 276 ptr += srclen;
275 err++; 277 err++;
276 if (len < 1) 278 if (len < 1)
277 goto err_out; 279 goto err_out;
278 280
279 menuver = *ptr; 281 menuver = *ptr;
280 ptr++; 282 ptr++;
281 len--; 283 len--;
282 284
283 /* 285 /*
284 * 4. Check that optional data actually exists if menuver says it does 286 * 4. Check that optional data actually exists if menuver says it does
285 */ 287 */
286 err++; 288 err++;
287 if ((menuver & (DN_MENUVER_ACC | DN_MENUVER_USR)) && (len < 1)) 289 if ((menuver & (DN_MENUVER_ACC | DN_MENUVER_USR)) && (len < 1))
288 goto err_out; 290 goto err_out;
289 291
290 /* 292 /*
291 * 5. Check optional access data format 293 * 5. Check optional access data format
292 */ 294 */
293 err++; 295 err++;
294 if (menuver & DN_MENUVER_ACC) { 296 if (menuver & DN_MENUVER_ACC) {
295 if (dn_check_idf(&ptr, &len, 39, 1)) 297 if (dn_check_idf(&ptr, &len, 39, 1))
296 goto err_out; 298 goto err_out;
297 if (dn_check_idf(&ptr, &len, 39, 1)) 299 if (dn_check_idf(&ptr, &len, 39, 1))
298 goto err_out; 300 goto err_out;
299 if (dn_check_idf(&ptr, &len, 39, (menuver & DN_MENUVER_USR) ? 1 : 0)) 301 if (dn_check_idf(&ptr, &len, 39, (menuver & DN_MENUVER_USR) ? 1 : 0))
300 goto err_out; 302 goto err_out;
301 } 303 }
302 304
303 /* 305 /*
304 * 6. Check optional user data format 306 * 6. Check optional user data format
305 */ 307 */
306 err++; 308 err++;
307 if (menuver & DN_MENUVER_USR) { 309 if (menuver & DN_MENUVER_USR) {
308 if (dn_check_idf(&ptr, &len, 16, 0)) 310 if (dn_check_idf(&ptr, &len, 16, 0))
309 goto err_out; 311 goto err_out;
310 } 312 }
311 313
312 /* 314 /*
313 * 7. Look up socket based on destination end username 315 * 7. Look up socket based on destination end username
314 */ 316 */
315 return dn_sklist_find_listener(&dstaddr); 317 return dn_sklist_find_listener(&dstaddr);
316 err_out: 318 err_out:
317 dn_log_martian(skb, ci_err_table[err].text); 319 dn_log_martian(skb, ci_err_table[err].text);
318 *reason = ci_err_table[err].reason; 320 *reason = ci_err_table[err].reason;
319 return NULL; 321 return NULL;
320 } 322 }
321 323
322 324
323 static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb) 325 static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb)
324 { 326 {
325 if (sk_acceptq_is_full(sk)) { 327 if (sk_acceptq_is_full(sk)) {
326 kfree_skb(skb); 328 kfree_skb(skb);
327 return; 329 return;
328 } 330 }
329 331
330 sk->sk_ack_backlog++; 332 sk->sk_ack_backlog++;
331 skb_queue_tail(&sk->sk_receive_queue, skb); 333 skb_queue_tail(&sk->sk_receive_queue, skb);
332 sk->sk_state_change(sk); 334 sk->sk_state_change(sk);
333 } 335 }
334 336
335 static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb) 337 static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
336 { 338 {
337 struct dn_skb_cb *cb = DN_SKB_CB(skb); 339 struct dn_skb_cb *cb = DN_SKB_CB(skb);
338 struct dn_scp *scp = DN_SK(sk); 340 struct dn_scp *scp = DN_SK(sk);
339 unsigned char *ptr; 341 unsigned char *ptr;
340 342
341 if (skb->len < 4) 343 if (skb->len < 4)
342 goto out; 344 goto out;
343 345
344 ptr = skb->data; 346 ptr = skb->data;
345 cb->services = *ptr++; 347 cb->services = *ptr++;
346 cb->info = *ptr++; 348 cb->info = *ptr++;
347 cb->segsize = dn_ntohs(*(__le16 *)ptr); 349 cb->segsize = le16_to_cpu(*(__le16 *)ptr);
348 350
349 if ((scp->state == DN_CI) || (scp->state == DN_CD)) { 351 if ((scp->state == DN_CI) || (scp->state == DN_CD)) {
350 scp->persist = 0; 352 scp->persist = 0;
351 scp->addrrem = cb->src_port; 353 scp->addrrem = cb->src_port;
352 sk->sk_state = TCP_ESTABLISHED; 354 sk->sk_state = TCP_ESTABLISHED;
353 scp->state = DN_RUN; 355 scp->state = DN_RUN;
354 scp->services_rem = cb->services; 356 scp->services_rem = cb->services;
355 scp->info_rem = cb->info; 357 scp->info_rem = cb->info;
356 scp->segsize_rem = cb->segsize; 358 scp->segsize_rem = cb->segsize;
357 359
358 if ((scp->services_rem & NSP_FC_MASK) == NSP_FC_NONE) 360 if ((scp->services_rem & NSP_FC_MASK) == NSP_FC_NONE)
359 scp->max_window = decnet_no_fc_max_cwnd; 361 scp->max_window = decnet_no_fc_max_cwnd;
360 362
361 if (skb->len > 0) { 363 if (skb->len > 0) {
362 u16 dlen = *skb->data; 364 u16 dlen = *skb->data;
363 if ((dlen <= 16) && (dlen <= skb->len)) { 365 if ((dlen <= 16) && (dlen <= skb->len)) {
364 scp->conndata_in.opt_optl = dn_htons(dlen); 366 scp->conndata_in.opt_optl = cpu_to_le16(dlen);
365 skb_copy_from_linear_data_offset(skb, 1, 367 skb_copy_from_linear_data_offset(skb, 1,
366 scp->conndata_in.opt_data, dlen); 368 scp->conndata_in.opt_data, dlen);
367 } 369 }
368 } 370 }
369 dn_nsp_send_link(sk, DN_NOCHANGE, 0); 371 dn_nsp_send_link(sk, DN_NOCHANGE, 0);
370 if (!sock_flag(sk, SOCK_DEAD)) 372 if (!sock_flag(sk, SOCK_DEAD))
371 sk->sk_state_change(sk); 373 sk->sk_state_change(sk);
372 } 374 }
373 375
374 out: 376 out:
375 kfree_skb(skb); 377 kfree_skb(skb);
376 } 378 }
377 379
378 static void dn_nsp_conn_ack(struct sock *sk, struct sk_buff *skb) 380 static void dn_nsp_conn_ack(struct sock *sk, struct sk_buff *skb)
379 { 381 {
380 struct dn_scp *scp = DN_SK(sk); 382 struct dn_scp *scp = DN_SK(sk);
381 383
382 if (scp->state == DN_CI) { 384 if (scp->state == DN_CI) {
383 scp->state = DN_CD; 385 scp->state = DN_CD;
384 scp->persist = 0; 386 scp->persist = 0;
385 } 387 }
386 388
387 kfree_skb(skb); 389 kfree_skb(skb);
388 } 390 }
389 391
390 static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb) 392 static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb)
391 { 393 {
392 struct dn_scp *scp = DN_SK(sk); 394 struct dn_scp *scp = DN_SK(sk);
393 struct dn_skb_cb *cb = DN_SKB_CB(skb); 395 struct dn_skb_cb *cb = DN_SKB_CB(skb);
394 unsigned short reason; 396 unsigned short reason;
395 397
396 if (skb->len < 2) 398 if (skb->len < 2)
397 goto out; 399 goto out;
398 400
399 reason = dn_ntohs(*(__le16 *)skb->data); 401 reason = le16_to_cpu(*(__le16 *)skb->data);
400 skb_pull(skb, 2); 402 skb_pull(skb, 2);
401 403
402 scp->discdata_in.opt_status = dn_htons(reason); 404 scp->discdata_in.opt_status = cpu_to_le16(reason);
403 scp->discdata_in.opt_optl = 0; 405 scp->discdata_in.opt_optl = 0;
404 memset(scp->discdata_in.opt_data, 0, 16); 406 memset(scp->discdata_in.opt_data, 0, 16);
405 407
406 if (skb->len > 0) { 408 if (skb->len > 0) {
407 u16 dlen = *skb->data; 409 u16 dlen = *skb->data;
408 if ((dlen <= 16) && (dlen <= skb->len)) { 410 if ((dlen <= 16) && (dlen <= skb->len)) {
409 scp->discdata_in.opt_optl = dn_htons(dlen); 411 scp->discdata_in.opt_optl = cpu_to_le16(dlen);
410 skb_copy_from_linear_data_offset(skb, 1, scp->discdata_in.opt_data, dlen); 412 skb_copy_from_linear_data_offset(skb, 1, scp->discdata_in.opt_data, dlen);
411 } 413 }
412 } 414 }
413 415
414 scp->addrrem = cb->src_port; 416 scp->addrrem = cb->src_port;
415 sk->sk_state = TCP_CLOSE; 417 sk->sk_state = TCP_CLOSE;
416 418
417 switch(scp->state) { 419 switch(scp->state) {
418 case DN_CI: 420 case DN_CI:
419 case DN_CD: 421 case DN_CD:
420 scp->state = DN_RJ; 422 scp->state = DN_RJ;
421 sk->sk_err = ECONNREFUSED; 423 sk->sk_err = ECONNREFUSED;
422 break; 424 break;
423 case DN_RUN: 425 case DN_RUN:
424 sk->sk_shutdown |= SHUTDOWN_MASK; 426 sk->sk_shutdown |= SHUTDOWN_MASK;
425 scp->state = DN_DN; 427 scp->state = DN_DN;
426 break; 428 break;
427 case DN_DI: 429 case DN_DI:
428 scp->state = DN_DIC; 430 scp->state = DN_DIC;
429 break; 431 break;
430 } 432 }
431 433
432 if (!sock_flag(sk, SOCK_DEAD)) { 434 if (!sock_flag(sk, SOCK_DEAD)) {
433 if (sk->sk_socket->state != SS_UNCONNECTED) 435 if (sk->sk_socket->state != SS_UNCONNECTED)
434 sk->sk_socket->state = SS_DISCONNECTING; 436 sk->sk_socket->state = SS_DISCONNECTING;
435 sk->sk_state_change(sk); 437 sk->sk_state_change(sk);
436 } 438 }
437 439
438 /* 440 /*
439 * It appears that its possible for remote machines to send disc 441 * It appears that its possible for remote machines to send disc
440 * init messages with no port identifier if we are in the CI and 442 * init messages with no port identifier if we are in the CI and
441 * possibly also the CD state. Obviously we shouldn't reply with 443 * possibly also the CD state. Obviously we shouldn't reply with
442 * a message if we don't know what the end point is. 444 * a message if we don't know what the end point is.
443 */ 445 */
444 if (scp->addrrem) { 446 if (scp->addrrem) {
445 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC); 447 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
446 } 448 }
447 scp->persist_fxn = dn_destroy_timer; 449 scp->persist_fxn = dn_destroy_timer;
448 scp->persist = dn_nsp_persist(sk); 450 scp->persist = dn_nsp_persist(sk);
449 451
450 out: 452 out:
451 kfree_skb(skb); 453 kfree_skb(skb);
452 } 454 }
453 455
454 /* 456 /*
455 * disc_conf messages are also called no_resources or no_link 457 * disc_conf messages are also called no_resources or no_link
456 * messages depending upon the "reason" field. 458 * messages depending upon the "reason" field.
457 */ 459 */
458 static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb) 460 static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb)
459 { 461 {
460 struct dn_scp *scp = DN_SK(sk); 462 struct dn_scp *scp = DN_SK(sk);
461 unsigned short reason; 463 unsigned short reason;
462 464
463 if (skb->len != 2) 465 if (skb->len != 2)
464 goto out; 466 goto out;
465 467
466 reason = dn_ntohs(*(__le16 *)skb->data); 468 reason = le16_to_cpu(*(__le16 *)skb->data);
467 469
468 sk->sk_state = TCP_CLOSE; 470 sk->sk_state = TCP_CLOSE;
469 471
470 switch(scp->state) { 472 switch(scp->state) {
471 case DN_CI: 473 case DN_CI:
472 scp->state = DN_NR; 474 scp->state = DN_NR;
473 break; 475 break;
474 case DN_DR: 476 case DN_DR:
475 if (reason == NSP_REASON_DC) 477 if (reason == NSP_REASON_DC)
476 scp->state = DN_DRC; 478 scp->state = DN_DRC;
477 if (reason == NSP_REASON_NL) 479 if (reason == NSP_REASON_NL)
478 scp->state = DN_CN; 480 scp->state = DN_CN;
479 break; 481 break;
480 case DN_DI: 482 case DN_DI:
481 scp->state = DN_DIC; 483 scp->state = DN_DIC;
482 break; 484 break;
483 case DN_RUN: 485 case DN_RUN:
484 sk->sk_shutdown |= SHUTDOWN_MASK; 486 sk->sk_shutdown |= SHUTDOWN_MASK;
485 case DN_CC: 487 case DN_CC:
486 scp->state = DN_CN; 488 scp->state = DN_CN;
487 } 489 }
488 490
489 if (!sock_flag(sk, SOCK_DEAD)) { 491 if (!sock_flag(sk, SOCK_DEAD)) {
490 if (sk->sk_socket->state != SS_UNCONNECTED) 492 if (sk->sk_socket->state != SS_UNCONNECTED)
491 sk->sk_socket->state = SS_DISCONNECTING; 493 sk->sk_socket->state = SS_DISCONNECTING;
492 sk->sk_state_change(sk); 494 sk->sk_state_change(sk);
493 } 495 }
494 496
495 scp->persist_fxn = dn_destroy_timer; 497 scp->persist_fxn = dn_destroy_timer;
496 scp->persist = dn_nsp_persist(sk); 498 scp->persist = dn_nsp_persist(sk);
497 499
498 out: 500 out:
499 kfree_skb(skb); 501 kfree_skb(skb);
500 } 502 }
501 503
502 static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb) 504 static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb)
503 { 505 {
504 struct dn_scp *scp = DN_SK(sk); 506 struct dn_scp *scp = DN_SK(sk);
505 unsigned short segnum; 507 unsigned short segnum;
506 unsigned char lsflags; 508 unsigned char lsflags;
507 signed char fcval; 509 signed char fcval;
508 int wake_up = 0; 510 int wake_up = 0;
509 char *ptr = skb->data; 511 char *ptr = skb->data;
510 unsigned char fctype = scp->services_rem & NSP_FC_MASK; 512 unsigned char fctype = scp->services_rem & NSP_FC_MASK;
511 513
512 if (skb->len != 4) 514 if (skb->len != 4)
513 goto out; 515 goto out;
514 516
515 segnum = dn_ntohs(*(__le16 *)ptr); 517 segnum = le16_to_cpu(*(__le16 *)ptr);
516 ptr += 2; 518 ptr += 2;
517 lsflags = *(unsigned char *)ptr++; 519 lsflags = *(unsigned char *)ptr++;
518 fcval = *ptr; 520 fcval = *ptr;
519 521
520 /* 522 /*
521 * Here we ignore erronous packets which should really 523 * Here we ignore erronous packets which should really
522 * should cause a connection abort. It is not critical 524 * should cause a connection abort. It is not critical
523 * for now though. 525 * for now though.
524 */ 526 */
525 if (lsflags & 0xf8) 527 if (lsflags & 0xf8)
526 goto out; 528 goto out;
527 529
528 if (seq_next(scp->numoth_rcv, segnum)) { 530 if (seq_next(scp->numoth_rcv, segnum)) {
529 seq_add(&scp->numoth_rcv, 1); 531 seq_add(&scp->numoth_rcv, 1);
530 switch(lsflags & 0x04) { /* FCVAL INT */ 532 switch(lsflags & 0x04) { /* FCVAL INT */
531 case 0x00: /* Normal Request */ 533 case 0x00: /* Normal Request */
532 switch(lsflags & 0x03) { /* FCVAL MOD */ 534 switch(lsflags & 0x03) { /* FCVAL MOD */
533 case 0x00: /* Request count */ 535 case 0x00: /* Request count */
534 if (fcval < 0) { 536 if (fcval < 0) {
535 unsigned char p_fcval = -fcval; 537 unsigned char p_fcval = -fcval;
536 if ((scp->flowrem_dat > p_fcval) && 538 if ((scp->flowrem_dat > p_fcval) &&
537 (fctype == NSP_FC_SCMC)) { 539 (fctype == NSP_FC_SCMC)) {
538 scp->flowrem_dat -= p_fcval; 540 scp->flowrem_dat -= p_fcval;
539 } 541 }
540 } else if (fcval > 0) { 542 } else if (fcval > 0) {
541 scp->flowrem_dat += fcval; 543 scp->flowrem_dat += fcval;
542 wake_up = 1; 544 wake_up = 1;
543 } 545 }
544 break; 546 break;
545 case 0x01: /* Stop outgoing data */ 547 case 0x01: /* Stop outgoing data */
546 scp->flowrem_sw = DN_DONTSEND; 548 scp->flowrem_sw = DN_DONTSEND;
547 break; 549 break;
548 case 0x02: /* Ok to start again */ 550 case 0x02: /* Ok to start again */
549 scp->flowrem_sw = DN_SEND; 551 scp->flowrem_sw = DN_SEND;
550 dn_nsp_output(sk); 552 dn_nsp_output(sk);
551 wake_up = 1; 553 wake_up = 1;
552 } 554 }
553 break; 555 break;
554 case 0x04: /* Interrupt Request */ 556 case 0x04: /* Interrupt Request */
555 if (fcval > 0) { 557 if (fcval > 0) {
556 scp->flowrem_oth += fcval; 558 scp->flowrem_oth += fcval;
557 wake_up = 1; 559 wake_up = 1;
558 } 560 }
559 break; 561 break;
560 } 562 }
561 if (wake_up && !sock_flag(sk, SOCK_DEAD)) 563 if (wake_up && !sock_flag(sk, SOCK_DEAD))
562 sk->sk_state_change(sk); 564 sk->sk_state_change(sk);
563 } 565 }
564 566
565 dn_nsp_send_oth_ack(sk); 567 dn_nsp_send_oth_ack(sk);
566 568
567 out: 569 out:
568 kfree_skb(skb); 570 kfree_skb(skb);
569 } 571 }
570 572
571 /* 573 /*
572 * Copy of sock_queue_rcv_skb (from sock.h) without 574 * Copy of sock_queue_rcv_skb (from sock.h) without
573 * bh_lock_sock() (its already held when this is called) which 575 * bh_lock_sock() (its already held when this is called) which
574 * also allows data and other data to be queued to a socket. 576 * also allows data and other data to be queued to a socket.
575 */ 577 */
576 static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue) 578 static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
577 { 579 {
578 int err; 580 int err;
579 581
580 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 582 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
581 number of warnings when compiling with -W --ANK 583 number of warnings when compiling with -W --ANK
582 */ 584 */
583 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 585 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
584 (unsigned)sk->sk_rcvbuf) { 586 (unsigned)sk->sk_rcvbuf) {
585 err = -ENOMEM; 587 err = -ENOMEM;
586 goto out; 588 goto out;
587 } 589 }
588 590
589 err = sk_filter(sk, skb); 591 err = sk_filter(sk, skb);
590 if (err) 592 if (err)
591 goto out; 593 goto out;
592 594
593 skb_set_owner_r(skb, sk); 595 skb_set_owner_r(skb, sk);
594 skb_queue_tail(queue, skb); 596 skb_queue_tail(queue, skb);
595 597
596 /* This code only runs from BH or BH protected context. 598 /* This code only runs from BH or BH protected context.
597 * Therefore the plain read_lock is ok here. -DaveM 599 * Therefore the plain read_lock is ok here. -DaveM
598 */ 600 */
599 read_lock(&sk->sk_callback_lock); 601 read_lock(&sk->sk_callback_lock);
600 if (!sock_flag(sk, SOCK_DEAD)) { 602 if (!sock_flag(sk, SOCK_DEAD)) {
601 struct socket *sock = sk->sk_socket; 603 struct socket *sock = sk->sk_socket;
602 wake_up_interruptible(sk->sk_sleep); 604 wake_up_interruptible(sk->sk_sleep);
603 if (sock && sock->fasync_list && 605 if (sock && sock->fasync_list &&
604 !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) 606 !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
605 __kill_fasync(sock->fasync_list, sig, 607 __kill_fasync(sock->fasync_list, sig,
606 (sig == SIGURG) ? POLL_PRI : POLL_IN); 608 (sig == SIGURG) ? POLL_PRI : POLL_IN);
607 } 609 }
608 read_unlock(&sk->sk_callback_lock); 610 read_unlock(&sk->sk_callback_lock);
609 out: 611 out:
610 return err; 612 return err;
611 } 613 }
612 614
613 static void dn_nsp_otherdata(struct sock *sk, struct sk_buff *skb) 615 static void dn_nsp_otherdata(struct sock *sk, struct sk_buff *skb)
614 { 616 {
615 struct dn_scp *scp = DN_SK(sk); 617 struct dn_scp *scp = DN_SK(sk);
616 unsigned short segnum; 618 unsigned short segnum;
617 struct dn_skb_cb *cb = DN_SKB_CB(skb); 619 struct dn_skb_cb *cb = DN_SKB_CB(skb);
618 int queued = 0; 620 int queued = 0;
619 621
620 if (skb->len < 2) 622 if (skb->len < 2)
621 goto out; 623 goto out;
622 624
623 cb->segnum = segnum = dn_ntohs(*(__le16 *)skb->data); 625 cb->segnum = segnum = le16_to_cpu(*(__le16 *)skb->data);
624 skb_pull(skb, 2); 626 skb_pull(skb, 2);
625 627
626 if (seq_next(scp->numoth_rcv, segnum)) { 628 if (seq_next(scp->numoth_rcv, segnum)) {
627 629
628 if (dn_queue_skb(sk, skb, SIGURG, &scp->other_receive_queue) == 0) { 630 if (dn_queue_skb(sk, skb, SIGURG, &scp->other_receive_queue) == 0) {
629 seq_add(&scp->numoth_rcv, 1); 631 seq_add(&scp->numoth_rcv, 1);
630 scp->other_report = 0; 632 scp->other_report = 0;
631 queued = 1; 633 queued = 1;
632 } 634 }
633 } 635 }
634 636
635 dn_nsp_send_oth_ack(sk); 637 dn_nsp_send_oth_ack(sk);
636 out: 638 out:
637 if (!queued) 639 if (!queued)
638 kfree_skb(skb); 640 kfree_skb(skb);
639 } 641 }
640 642
641 static void dn_nsp_data(struct sock *sk, struct sk_buff *skb) 643 static void dn_nsp_data(struct sock *sk, struct sk_buff *skb)
642 { 644 {
643 int queued = 0; 645 int queued = 0;
644 unsigned short segnum; 646 unsigned short segnum;
645 struct dn_skb_cb *cb = DN_SKB_CB(skb); 647 struct dn_skb_cb *cb = DN_SKB_CB(skb);
646 struct dn_scp *scp = DN_SK(sk); 648 struct dn_scp *scp = DN_SK(sk);
647 649
648 if (skb->len < 2) 650 if (skb->len < 2)
649 goto out; 651 goto out;
650 652
651 cb->segnum = segnum = dn_ntohs(*(__le16 *)skb->data); 653 cb->segnum = segnum = le16_to_cpu(*(__le16 *)skb->data);
652 skb_pull(skb, 2); 654 skb_pull(skb, 2);
653 655
654 if (seq_next(scp->numdat_rcv, segnum)) { 656 if (seq_next(scp->numdat_rcv, segnum)) {
655 if (dn_queue_skb(sk, skb, SIGIO, &sk->sk_receive_queue) == 0) { 657 if (dn_queue_skb(sk, skb, SIGIO, &sk->sk_receive_queue) == 0) {
656 seq_add(&scp->numdat_rcv, 1); 658 seq_add(&scp->numdat_rcv, 1);
657 queued = 1; 659 queued = 1;
658 } 660 }
659 661
660 if ((scp->flowloc_sw == DN_SEND) && dn_congested(sk)) { 662 if ((scp->flowloc_sw == DN_SEND) && dn_congested(sk)) {
661 scp->flowloc_sw = DN_DONTSEND; 663 scp->flowloc_sw = DN_DONTSEND;
662 dn_nsp_send_link(sk, DN_DONTSEND, 0); 664 dn_nsp_send_link(sk, DN_DONTSEND, 0);
663 } 665 }
664 } 666 }
665 667
666 dn_nsp_send_data_ack(sk); 668 dn_nsp_send_data_ack(sk);
667 out: 669 out:
668 if (!queued) 670 if (!queued)
669 kfree_skb(skb); 671 kfree_skb(skb);
670 } 672 }
671 673
672 /* 674 /*
673 * If one of our conninit messages is returned, this function 675 * If one of our conninit messages is returned, this function
674 * deals with it. It puts the socket into the NO_COMMUNICATION 676 * deals with it. It puts the socket into the NO_COMMUNICATION
675 * state. 677 * state.
676 */ 678 */
677 static void dn_returned_conn_init(struct sock *sk, struct sk_buff *skb) 679 static void dn_returned_conn_init(struct sock *sk, struct sk_buff *skb)
678 { 680 {
679 struct dn_scp *scp = DN_SK(sk); 681 struct dn_scp *scp = DN_SK(sk);
680 682
681 if (scp->state == DN_CI) { 683 if (scp->state == DN_CI) {
682 scp->state = DN_NC; 684 scp->state = DN_NC;
683 sk->sk_state = TCP_CLOSE; 685 sk->sk_state = TCP_CLOSE;
684 if (!sock_flag(sk, SOCK_DEAD)) 686 if (!sock_flag(sk, SOCK_DEAD))
685 sk->sk_state_change(sk); 687 sk->sk_state_change(sk);
686 } 688 }
687 689
688 kfree_skb(skb); 690 kfree_skb(skb);
689 } 691 }
690 692
691 static int dn_nsp_no_socket(struct sk_buff *skb, unsigned short reason) 693 static int dn_nsp_no_socket(struct sk_buff *skb, unsigned short reason)
692 { 694 {
693 struct dn_skb_cb *cb = DN_SKB_CB(skb); 695 struct dn_skb_cb *cb = DN_SKB_CB(skb);
694 int ret = NET_RX_DROP; 696 int ret = NET_RX_DROP;
695 697
696 /* Must not reply to returned packets */ 698 /* Must not reply to returned packets */
697 if (cb->rt_flags & DN_RT_F_RTS) 699 if (cb->rt_flags & DN_RT_F_RTS)
698 goto out; 700 goto out;
699 701
700 if ((reason != NSP_REASON_OK) && ((cb->nsp_flags & 0x0c) == 0x08)) { 702 if ((reason != NSP_REASON_OK) && ((cb->nsp_flags & 0x0c) == 0x08)) {
701 switch(cb->nsp_flags & 0x70) { 703 switch(cb->nsp_flags & 0x70) {
702 case 0x10: 704 case 0x10:
703 case 0x60: /* (Retransmitted) Connect Init */ 705 case 0x60: /* (Retransmitted) Connect Init */
704 dn_nsp_return_disc(skb, NSP_DISCINIT, reason); 706 dn_nsp_return_disc(skb, NSP_DISCINIT, reason);
705 ret = NET_RX_SUCCESS; 707 ret = NET_RX_SUCCESS;
706 break; 708 break;
707 case 0x20: /* Connect Confirm */ 709 case 0x20: /* Connect Confirm */
708 dn_nsp_return_disc(skb, NSP_DISCCONF, reason); 710 dn_nsp_return_disc(skb, NSP_DISCCONF, reason);
709 ret = NET_RX_SUCCESS; 711 ret = NET_RX_SUCCESS;
710 break; 712 break;
711 } 713 }
712 } 714 }
713 715
714 out: 716 out:
715 kfree_skb(skb); 717 kfree_skb(skb);
716 return ret; 718 return ret;
717 } 719 }
718 720
719 static int dn_nsp_rx_packet(struct sk_buff *skb) 721 static int dn_nsp_rx_packet(struct sk_buff *skb)
720 { 722 {
721 struct dn_skb_cb *cb = DN_SKB_CB(skb); 723 struct dn_skb_cb *cb = DN_SKB_CB(skb);
722 struct sock *sk = NULL; 724 struct sock *sk = NULL;
723 unsigned char *ptr = (unsigned char *)skb->data; 725 unsigned char *ptr = (unsigned char *)skb->data;
724 unsigned short reason = NSP_REASON_NL; 726 unsigned short reason = NSP_REASON_NL;
725 727
726 if (!pskb_may_pull(skb, 2)) 728 if (!pskb_may_pull(skb, 2))
727 goto free_out; 729 goto free_out;
728 730
729 skb_reset_transport_header(skb); 731 skb_reset_transport_header(skb);
730 cb->nsp_flags = *ptr++; 732 cb->nsp_flags = *ptr++;
731 733
732 if (decnet_debug_level & 2) 734 if (decnet_debug_level & 2)
733 printk(KERN_DEBUG "dn_nsp_rx: Message type 0x%02x\n", (int)cb->nsp_flags); 735 printk(KERN_DEBUG "dn_nsp_rx: Message type 0x%02x\n", (int)cb->nsp_flags);
734 736
735 if (cb->nsp_flags & 0x83) 737 if (cb->nsp_flags & 0x83)
736 goto free_out; 738 goto free_out;
737 739
738 /* 740 /*
739 * Filter out conninits and useless packet types 741 * Filter out conninits and useless packet types
740 */ 742 */
741 if ((cb->nsp_flags & 0x0c) == 0x08) { 743 if ((cb->nsp_flags & 0x0c) == 0x08) {
742 switch(cb->nsp_flags & 0x70) { 744 switch(cb->nsp_flags & 0x70) {
743 case 0x00: /* NOP */ 745 case 0x00: /* NOP */
744 case 0x70: /* Reserved */ 746 case 0x70: /* Reserved */
745 case 0x50: /* Reserved, Phase II node init */ 747 case 0x50: /* Reserved, Phase II node init */
746 goto free_out; 748 goto free_out;
747 case 0x10: 749 case 0x10:
748 case 0x60: 750 case 0x60:
749 if (unlikely(cb->rt_flags & DN_RT_F_RTS)) 751 if (unlikely(cb->rt_flags & DN_RT_F_RTS))
750 goto free_out; 752 goto free_out;
751 sk = dn_find_listener(skb, &reason); 753 sk = dn_find_listener(skb, &reason);
752 goto got_it; 754 goto got_it;
753 } 755 }
754 } 756 }
755 757
756 if (!pskb_may_pull(skb, 3)) 758 if (!pskb_may_pull(skb, 3))
757 goto free_out; 759 goto free_out;
758 760
759 /* 761 /*
760 * Grab the destination address. 762 * Grab the destination address.
761 */ 763 */
762 cb->dst_port = *(__le16 *)ptr; 764 cb->dst_port = *(__le16 *)ptr;
763 cb->src_port = 0; 765 cb->src_port = 0;
764 ptr += 2; 766 ptr += 2;
765 767
766 /* 768 /*
767 * If not a connack, grab the source address too. 769 * If not a connack, grab the source address too.
768 */ 770 */
769 if (pskb_may_pull(skb, 5)) { 771 if (pskb_may_pull(skb, 5)) {
770 cb->src_port = *(__le16 *)ptr; 772 cb->src_port = *(__le16 *)ptr;
771 ptr += 2; 773 ptr += 2;
772 skb_pull(skb, 5); 774 skb_pull(skb, 5);
773 } 775 }
774 776
775 /* 777 /*
776 * Returned packets... 778 * Returned packets...
777 * Swap src & dst and look up in the normal way. 779 * Swap src & dst and look up in the normal way.
778 */ 780 */
779 if (unlikely(cb->rt_flags & DN_RT_F_RTS)) { 781 if (unlikely(cb->rt_flags & DN_RT_F_RTS)) {
780 __le16 tmp = cb->dst_port; 782 __le16 tmp = cb->dst_port;
781 cb->dst_port = cb->src_port; 783 cb->dst_port = cb->src_port;
782 cb->src_port = tmp; 784 cb->src_port = tmp;
783 tmp = cb->dst; 785 tmp = cb->dst;
784 cb->dst = cb->src; 786 cb->dst = cb->src;
785 cb->src = tmp; 787 cb->src = tmp;
786 } 788 }
787 789
788 /* 790 /*
789 * Find the socket to which this skb is destined. 791 * Find the socket to which this skb is destined.
790 */ 792 */
791 sk = dn_find_by_skb(skb); 793 sk = dn_find_by_skb(skb);
792 got_it: 794 got_it:
793 if (sk != NULL) { 795 if (sk != NULL) {
794 struct dn_scp *scp = DN_SK(sk); 796 struct dn_scp *scp = DN_SK(sk);
795 797
796 /* Reset backoff */ 798 /* Reset backoff */
797 scp->nsp_rxtshift = 0; 799 scp->nsp_rxtshift = 0;
798 800
799 /* 801 /*
800 * We linearize everything except data segments here. 802 * We linearize everything except data segments here.
801 */ 803 */
802 if (cb->nsp_flags & ~0x60) { 804 if (cb->nsp_flags & ~0x60) {
803 if (unlikely(skb_linearize(skb))) 805 if (unlikely(skb_linearize(skb)))
804 goto free_out; 806 goto free_out;
805 } 807 }
806 808
807 return sk_receive_skb(sk, skb, 0); 809 return sk_receive_skb(sk, skb, 0);
808 } 810 }
809 811
810 return dn_nsp_no_socket(skb, reason); 812 return dn_nsp_no_socket(skb, reason);
811 813
812 free_out: 814 free_out:
813 kfree_skb(skb); 815 kfree_skb(skb);
814 return NET_RX_DROP; 816 return NET_RX_DROP;
815 } 817 }
816 818
817 int dn_nsp_rx(struct sk_buff *skb) 819 int dn_nsp_rx(struct sk_buff *skb)
818 { 820 {
819 return NF_HOOK(PF_DECnet, NF_DN_LOCAL_IN, skb, skb->dev, NULL, dn_nsp_rx_packet); 821 return NF_HOOK(PF_DECnet, NF_DN_LOCAL_IN, skb, skb->dev, NULL, dn_nsp_rx_packet);
820 } 822 }
821 823
822 /* 824 /*
823 * This is the main receive routine for sockets. It is called 825 * This is the main receive routine for sockets. It is called
824 * from the above when the socket is not busy, and also from 826 * from the above when the socket is not busy, and also from
825 * sock_release() when there is a backlog queued up. 827 * sock_release() when there is a backlog queued up.
826 */ 828 */
827 int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb) 829 int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
828 { 830 {
829 struct dn_scp *scp = DN_SK(sk); 831 struct dn_scp *scp = DN_SK(sk);
830 struct dn_skb_cb *cb = DN_SKB_CB(skb); 832 struct dn_skb_cb *cb = DN_SKB_CB(skb);
831 833
832 if (cb->rt_flags & DN_RT_F_RTS) { 834 if (cb->rt_flags & DN_RT_F_RTS) {
833 if (cb->nsp_flags == 0x18 || cb->nsp_flags == 0x68) 835 if (cb->nsp_flags == 0x18 || cb->nsp_flags == 0x68)
834 dn_returned_conn_init(sk, skb); 836 dn_returned_conn_init(sk, skb);
835 else 837 else
836 kfree_skb(skb); 838 kfree_skb(skb);
837 return NET_RX_SUCCESS; 839 return NET_RX_SUCCESS;
838 } 840 }
839 841
840 /* 842 /*
841 * Control packet. 843 * Control packet.
842 */ 844 */
843 if ((cb->nsp_flags & 0x0c) == 0x08) { 845 if ((cb->nsp_flags & 0x0c) == 0x08) {
844 switch(cb->nsp_flags & 0x70) { 846 switch(cb->nsp_flags & 0x70) {
845 case 0x10: 847 case 0x10:
846 case 0x60: 848 case 0x60:
847 dn_nsp_conn_init(sk, skb); 849 dn_nsp_conn_init(sk, skb);
848 break; 850 break;
849 case 0x20: 851 case 0x20:
850 dn_nsp_conn_conf(sk, skb); 852 dn_nsp_conn_conf(sk, skb);
851 break; 853 break;
852 case 0x30: 854 case 0x30:
853 dn_nsp_disc_init(sk, skb); 855 dn_nsp_disc_init(sk, skb);
854 break; 856 break;
855 case 0x40: 857 case 0x40:
856 dn_nsp_disc_conf(sk, skb); 858 dn_nsp_disc_conf(sk, skb);
857 break; 859 break;
858 } 860 }
859 861
860 } else if (cb->nsp_flags == 0x24) { 862 } else if (cb->nsp_flags == 0x24) {
861 /* 863 /*
862 * Special for connacks, 'cos they don't have 864 * Special for connacks, 'cos they don't have
863 * ack data or ack otherdata info. 865 * ack data or ack otherdata info.
864 */ 866 */
865 dn_nsp_conn_ack(sk, skb); 867 dn_nsp_conn_ack(sk, skb);
866 } else { 868 } else {
867 int other = 1; 869 int other = 1;
868 870
869 /* both data and ack frames can kick a CC socket into RUN */ 871 /* both data and ack frames can kick a CC socket into RUN */
870 if ((scp->state == DN_CC) && !sock_flag(sk, SOCK_DEAD)) { 872 if ((scp->state == DN_CC) && !sock_flag(sk, SOCK_DEAD)) {
871 scp->state = DN_RUN; 873 scp->state = DN_RUN;
872 sk->sk_state = TCP_ESTABLISHED; 874 sk->sk_state = TCP_ESTABLISHED;
873 sk->sk_state_change(sk); 875 sk->sk_state_change(sk);
874 } 876 }
875 877
876 if ((cb->nsp_flags & 0x1c) == 0) 878 if ((cb->nsp_flags & 0x1c) == 0)
877 other = 0; 879 other = 0;
878 if (cb->nsp_flags == 0x04) 880 if (cb->nsp_flags == 0x04)
879 other = 0; 881 other = 0;
880 882
881 /* 883 /*
882 * Read out ack data here, this applies equally 884 * Read out ack data here, this applies equally
883 * to data, other data, link serivce and both 885 * to data, other data, link serivce and both
884 * ack data and ack otherdata. 886 * ack data and ack otherdata.
885 */ 887 */
886 dn_process_ack(sk, skb, other); 888 dn_process_ack(sk, skb, other);
887 889
888 /* 890 /*
889 * If we've some sort of data here then call a 891 * If we've some sort of data here then call a
890 * suitable routine for dealing with it, otherwise 892 * suitable routine for dealing with it, otherwise
891 * the packet is an ack and can be discarded. 893 * the packet is an ack and can be discarded.
892 */ 894 */
893 if ((cb->nsp_flags & 0x0c) == 0) { 895 if ((cb->nsp_flags & 0x0c) == 0) {
894 896
895 if (scp->state != DN_RUN) 897 if (scp->state != DN_RUN)
896 goto free_out; 898 goto free_out;
897 899
898 switch(cb->nsp_flags) { 900 switch(cb->nsp_flags) {
899 case 0x10: /* LS */ 901 case 0x10: /* LS */
900 dn_nsp_linkservice(sk, skb); 902 dn_nsp_linkservice(sk, skb);
901 break; 903 break;
902 case 0x30: /* OD */ 904 case 0x30: /* OD */
903 dn_nsp_otherdata(sk, skb); 905 dn_nsp_otherdata(sk, skb);
904 break; 906 break;
905 default: 907 default:
906 dn_nsp_data(sk, skb); 908 dn_nsp_data(sk, skb);
907 } 909 }
908 910
909 } else { /* Ack, chuck it out here */ 911 } else { /* Ack, chuck it out here */
910 free_out: 912 free_out:
911 kfree_skb(skb); 913 kfree_skb(skb);
912 } 914 }
913 } 915 }
914 916
915 return NET_RX_SUCCESS; 917 return NET_RX_SUCCESS;
916 } 918 }
917 919
918 920
net/decnet/dn_nsp_out.c
1 1
2 /* 2 /*
3 * DECnet An implementation of the DECnet protocol suite for the LINUX 3 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket 4 * operating system. DECnet is implemented using the BSD Socket
5 * interface as the means of communication with the user level. 5 * interface as the means of communication with the user level.
6 * 6 *
7 * DECnet Network Services Protocol (Output) 7 * DECnet Network Services Protocol (Output)
8 * 8 *
9 * Author: Eduardo Marcelo Serrat <emserrat@geocities.com> 9 * Author: Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * 10 *
11 * Changes: 11 * Changes:
12 * 12 *
13 * Steve Whitehouse: Split into dn_nsp_in.c and dn_nsp_out.c from 13 * Steve Whitehouse: Split into dn_nsp_in.c and dn_nsp_out.c from
14 * original dn_nsp.c. 14 * original dn_nsp.c.
15 * Steve Whitehouse: Updated to work with my new routing architecture. 15 * Steve Whitehouse: Updated to work with my new routing architecture.
16 * Steve Whitehouse: Added changes from Eduardo Serrat's patches. 16 * Steve Whitehouse: Added changes from Eduardo Serrat's patches.
17 * Steve Whitehouse: Now conninits have the "return" bit set. 17 * Steve Whitehouse: Now conninits have the "return" bit set.
18 * Steve Whitehouse: Fixes to check alloc'd skbs are non NULL! 18 * Steve Whitehouse: Fixes to check alloc'd skbs are non NULL!
19 * Moved output state machine into one function 19 * Moved output state machine into one function
20 * Steve Whitehouse: New output state machine 20 * Steve Whitehouse: New output state machine
21 * Paul Koning: Connect Confirm message fix. 21 * Paul Koning: Connect Confirm message fix.
22 * Eduardo Serrat: Fix to stop dn_nsp_do_disc() sending malformed packets. 22 * Eduardo Serrat: Fix to stop dn_nsp_do_disc() sending malformed packets.
23 * Steve Whitehouse: dn_nsp_output() and friends needed a spring clean 23 * Steve Whitehouse: dn_nsp_output() and friends needed a spring clean
24 * Steve Whitehouse: Moved dn_nsp_send() in here from route.h 24 * Steve Whitehouse: Moved dn_nsp_send() in here from route.h
25 */ 25 */
26 26
27 /****************************************************************************** 27 /******************************************************************************
28 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 28 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
29 29
30 This program is free software; you can redistribute it and/or modify 30 This program is free software; you can redistribute it and/or modify
31 it under the terms of the GNU General Public License as published by 31 it under the terms of the GNU General Public License as published by
32 the Free Software Foundation; either version 2 of the License, or 32 the Free Software Foundation; either version 2 of the License, or
33 any later version. 33 any later version.
34 34
35 This program is distributed in the hope that it will be useful, 35 This program is distributed in the hope that it will be useful,
36 but WITHOUT ANY WARRANTY; without even the implied warranty of 36 but WITHOUT ANY WARRANTY; without even the implied warranty of
37 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 37 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
38 GNU General Public License for more details. 38 GNU General Public License for more details.
39 *******************************************************************************/ 39 *******************************************************************************/
40 40
41 #include <linux/errno.h> 41 #include <linux/errno.h>
42 #include <linux/types.h> 42 #include <linux/types.h>
43 #include <linux/socket.h> 43 #include <linux/socket.h>
44 #include <linux/in.h> 44 #include <linux/in.h>
45 #include <linux/kernel.h> 45 #include <linux/kernel.h>
46 #include <linux/timer.h> 46 #include <linux/timer.h>
47 #include <linux/string.h> 47 #include <linux/string.h>
48 #include <linux/sockios.h> 48 #include <linux/sockios.h>
49 #include <linux/net.h> 49 #include <linux/net.h>
50 #include <linux/netdevice.h> 50 #include <linux/netdevice.h>
51 #include <linux/inet.h> 51 #include <linux/inet.h>
52 #include <linux/route.h> 52 #include <linux/route.h>
53 #include <net/sock.h> 53 #include <net/sock.h>
54 #include <asm/system.h> 54 #include <asm/system.h>
55 #include <linux/fcntl.h> 55 #include <linux/fcntl.h>
56 #include <linux/mm.h> 56 #include <linux/mm.h>
57 #include <linux/termios.h> 57 #include <linux/termios.h>
58 #include <linux/interrupt.h> 58 #include <linux/interrupt.h>
59 #include <linux/proc_fs.h> 59 #include <linux/proc_fs.h>
60 #include <linux/stat.h> 60 #include <linux/stat.h>
61 #include <linux/init.h> 61 #include <linux/init.h>
62 #include <linux/poll.h> 62 #include <linux/poll.h>
63 #include <linux/if_packet.h> 63 #include <linux/if_packet.h>
64 #include <net/neighbour.h> 64 #include <net/neighbour.h>
65 #include <net/dst.h> 65 #include <net/dst.h>
66 #include <net/flow.h> 66 #include <net/flow.h>
67 #include <net/dn.h> 67 #include <net/dn.h>
68 #include <net/dn_nsp.h> 68 #include <net/dn_nsp.h>
69 #include <net/dn_dev.h> 69 #include <net/dn_dev.h>
70 #include <net/dn_route.h> 70 #include <net/dn_route.h>
71 71
72 72
73 static int nsp_backoff[NSP_MAXRXTSHIFT + 1] = { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; 73 static int nsp_backoff[NSP_MAXRXTSHIFT + 1] = { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 };
74 74
75 static void dn_nsp_send(struct sk_buff *skb) 75 static void dn_nsp_send(struct sk_buff *skb)
76 { 76 {
77 struct sock *sk = skb->sk; 77 struct sock *sk = skb->sk;
78 struct dn_scp *scp = DN_SK(sk); 78 struct dn_scp *scp = DN_SK(sk);
79 struct dst_entry *dst; 79 struct dst_entry *dst;
80 struct flowi fl; 80 struct flowi fl;
81 81
82 skb_reset_transport_header(skb); 82 skb_reset_transport_header(skb);
83 scp->stamp = jiffies; 83 scp->stamp = jiffies;
84 84
85 dst = sk_dst_check(sk, 0); 85 dst = sk_dst_check(sk, 0);
86 if (dst) { 86 if (dst) {
87 try_again: 87 try_again:
88 skb->dst = dst; 88 skb->dst = dst;
89 dst_output(skb); 89 dst_output(skb);
90 return; 90 return;
91 } 91 }
92 92
93 memset(&fl, 0, sizeof(fl)); 93 memset(&fl, 0, sizeof(fl));
94 fl.oif = sk->sk_bound_dev_if; 94 fl.oif = sk->sk_bound_dev_if;
95 fl.fld_src = dn_saddr2dn(&scp->addr); 95 fl.fld_src = dn_saddr2dn(&scp->addr);
96 fl.fld_dst = dn_saddr2dn(&scp->peer); 96 fl.fld_dst = dn_saddr2dn(&scp->peer);
97 dn_sk_ports_copy(&fl, scp); 97 dn_sk_ports_copy(&fl, scp);
98 fl.proto = DNPROTO_NSP; 98 fl.proto = DNPROTO_NSP;
99 if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, 0) == 0) { 99 if (dn_route_output_sock(&sk->sk_dst_cache, &fl, sk, 0) == 0) {
100 dst = sk_dst_get(sk); 100 dst = sk_dst_get(sk);
101 sk->sk_route_caps = dst->dev->features; 101 sk->sk_route_caps = dst->dev->features;
102 goto try_again; 102 goto try_again;
103 } 103 }
104 104
105 sk->sk_err = EHOSTUNREACH; 105 sk->sk_err = EHOSTUNREACH;
106 if (!sock_flag(sk, SOCK_DEAD)) 106 if (!sock_flag(sk, SOCK_DEAD))
107 sk->sk_state_change(sk); 107 sk->sk_state_change(sk);
108 } 108 }
109 109
110 110
111 /* 111 /*
112 * If sk == NULL, then we assume that we are supposed to be making 112 * If sk == NULL, then we assume that we are supposed to be making
113 * a routing layer skb. If sk != NULL, then we are supposed to be 113 * a routing layer skb. If sk != NULL, then we are supposed to be
114 * creating an skb for the NSP layer. 114 * creating an skb for the NSP layer.
115 * 115 *
116 * The eventual aim is for each socket to have a cached header size 116 * The eventual aim is for each socket to have a cached header size
117 * for its outgoing packets, and to set hdr from this when sk != NULL. 117 * for its outgoing packets, and to set hdr from this when sk != NULL.
118 */ 118 */
119 struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri) 119 struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri)
120 { 120 {
121 struct sk_buff *skb; 121 struct sk_buff *skb;
122 int hdr = 64; 122 int hdr = 64;
123 123
124 if ((skb = alloc_skb(size + hdr, pri)) == NULL) 124 if ((skb = alloc_skb(size + hdr, pri)) == NULL)
125 return NULL; 125 return NULL;
126 126
127 skb->protocol = htons(ETH_P_DNA_RT); 127 skb->protocol = htons(ETH_P_DNA_RT);
128 skb->pkt_type = PACKET_OUTGOING; 128 skb->pkt_type = PACKET_OUTGOING;
129 129
130 if (sk) 130 if (sk)
131 skb_set_owner_w(skb, sk); 131 skb_set_owner_w(skb, sk);
132 132
133 skb_reserve(skb, hdr); 133 skb_reserve(skb, hdr);
134 134
135 return skb; 135 return skb;
136 } 136 }
137 137
138 /* 138 /*
139 * Calculate persist timer based upon the smoothed round 139 * Calculate persist timer based upon the smoothed round
140 * trip time and the variance. Backoff according to the 140 * trip time and the variance. Backoff according to the
141 * nsp_backoff[] array. 141 * nsp_backoff[] array.
142 */ 142 */
143 unsigned long dn_nsp_persist(struct sock *sk) 143 unsigned long dn_nsp_persist(struct sock *sk)
144 { 144 {
145 struct dn_scp *scp = DN_SK(sk); 145 struct dn_scp *scp = DN_SK(sk);
146 146
147 unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1; 147 unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1;
148 148
149 t *= nsp_backoff[scp->nsp_rxtshift]; 149 t *= nsp_backoff[scp->nsp_rxtshift];
150 150
151 if (t < HZ) t = HZ; 151 if (t < HZ) t = HZ;
152 if (t > (600*HZ)) t = (600*HZ); 152 if (t > (600*HZ)) t = (600*HZ);
153 153
154 if (scp->nsp_rxtshift < NSP_MAXRXTSHIFT) 154 if (scp->nsp_rxtshift < NSP_MAXRXTSHIFT)
155 scp->nsp_rxtshift++; 155 scp->nsp_rxtshift++;
156 156
157 /* printk(KERN_DEBUG "rxtshift %lu, t=%lu\n", scp->nsp_rxtshift, t); */ 157 /* printk(KERN_DEBUG "rxtshift %lu, t=%lu\n", scp->nsp_rxtshift, t); */
158 158
159 return t; 159 return t;
160 } 160 }
161 161
162 /* 162 /*
163 * This is called each time we get an estimate for the rtt 163 * This is called each time we get an estimate for the rtt
164 * on the link. 164 * on the link.
165 */ 165 */
166 static void dn_nsp_rtt(struct sock *sk, long rtt) 166 static void dn_nsp_rtt(struct sock *sk, long rtt)
167 { 167 {
168 struct dn_scp *scp = DN_SK(sk); 168 struct dn_scp *scp = DN_SK(sk);
169 long srtt = (long)scp->nsp_srtt; 169 long srtt = (long)scp->nsp_srtt;
170 long rttvar = (long)scp->nsp_rttvar; 170 long rttvar = (long)scp->nsp_rttvar;
171 long delta; 171 long delta;
172 172
173 /* 173 /*
174 * If the jiffies clock flips over in the middle of timestamp 174 * If the jiffies clock flips over in the middle of timestamp
175 * gathering this value might turn out negative, so we make sure 175 * gathering this value might turn out negative, so we make sure
176 * that is it always positive here. 176 * that is it always positive here.
177 */ 177 */
178 if (rtt < 0) 178 if (rtt < 0)
179 rtt = -rtt; 179 rtt = -rtt;
180 /* 180 /*
181 * Add new rtt to smoothed average 181 * Add new rtt to smoothed average
182 */ 182 */
183 delta = ((rtt << 3) - srtt); 183 delta = ((rtt << 3) - srtt);
184 srtt += (delta >> 3); 184 srtt += (delta >> 3);
185 if (srtt >= 1) 185 if (srtt >= 1)
186 scp->nsp_srtt = (unsigned long)srtt; 186 scp->nsp_srtt = (unsigned long)srtt;
187 else 187 else
188 scp->nsp_srtt = 1; 188 scp->nsp_srtt = 1;
189 189
190 /* 190 /*
191 * Add new rtt varience to smoothed varience 191 * Add new rtt varience to smoothed varience
192 */ 192 */
193 delta >>= 1; 193 delta >>= 1;
194 rttvar += ((((delta>0)?(delta):(-delta)) - rttvar) >> 2); 194 rttvar += ((((delta>0)?(delta):(-delta)) - rttvar) >> 2);
195 if (rttvar >= 1) 195 if (rttvar >= 1)
196 scp->nsp_rttvar = (unsigned long)rttvar; 196 scp->nsp_rttvar = (unsigned long)rttvar;
197 else 197 else
198 scp->nsp_rttvar = 1; 198 scp->nsp_rttvar = 1;
199 199
200 /* printk(KERN_DEBUG "srtt=%lu rttvar=%lu\n", scp->nsp_srtt, scp->nsp_rttvar); */ 200 /* printk(KERN_DEBUG "srtt=%lu rttvar=%lu\n", scp->nsp_srtt, scp->nsp_rttvar); */
201 } 201 }
202 202
203 /** 203 /**
204 * dn_nsp_clone_and_send - Send a data packet by cloning it 204 * dn_nsp_clone_and_send - Send a data packet by cloning it
205 * @skb: The packet to clone and transmit 205 * @skb: The packet to clone and transmit
206 * @gfp: memory allocation flag 206 * @gfp: memory allocation flag
207 * 207 *
208 * Clone a queued data or other data packet and transmit it. 208 * Clone a queued data or other data packet and transmit it.
209 * 209 *
210 * Returns: The number of times the packet has been sent previously 210 * Returns: The number of times the packet has been sent previously
211 */ 211 */
212 static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, 212 static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb,
213 gfp_t gfp) 213 gfp_t gfp)
214 { 214 {
215 struct dn_skb_cb *cb = DN_SKB_CB(skb); 215 struct dn_skb_cb *cb = DN_SKB_CB(skb);
216 struct sk_buff *skb2; 216 struct sk_buff *skb2;
217 int ret = 0; 217 int ret = 0;
218 218
219 if ((skb2 = skb_clone(skb, gfp)) != NULL) { 219 if ((skb2 = skb_clone(skb, gfp)) != NULL) {
220 ret = cb->xmit_count; 220 ret = cb->xmit_count;
221 cb->xmit_count++; 221 cb->xmit_count++;
222 cb->stamp = jiffies; 222 cb->stamp = jiffies;
223 skb2->sk = skb->sk; 223 skb2->sk = skb->sk;
224 dn_nsp_send(skb2); 224 dn_nsp_send(skb2);
225 } 225 }
226 226
227 return ret; 227 return ret;
228 } 228 }
229 229
230 /** 230 /**
231 * dn_nsp_output - Try and send something from socket queues 231 * dn_nsp_output - Try and send something from socket queues
232 * @sk: The socket whose queues are to be investigated 232 * @sk: The socket whose queues are to be investigated
233 * 233 *
234 * Try and send the packet on the end of the data and other data queues. 234 * Try and send the packet on the end of the data and other data queues.
235 * Other data gets priority over data, and if we retransmit a packet we 235 * Other data gets priority over data, and if we retransmit a packet we
236 * reduce the window by dividing it in two. 236 * reduce the window by dividing it in two.
237 * 237 *
238 */ 238 */
239 void dn_nsp_output(struct sock *sk) 239 void dn_nsp_output(struct sock *sk)
240 { 240 {
241 struct dn_scp *scp = DN_SK(sk); 241 struct dn_scp *scp = DN_SK(sk);
242 struct sk_buff *skb; 242 struct sk_buff *skb;
243 unsigned reduce_win = 0; 243 unsigned reduce_win = 0;
244 244
245 /* 245 /*
246 * First we check for otherdata/linkservice messages 246 * First we check for otherdata/linkservice messages
247 */ 247 */
248 if ((skb = skb_peek(&scp->other_xmit_queue)) != NULL) 248 if ((skb = skb_peek(&scp->other_xmit_queue)) != NULL)
249 reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC); 249 reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC);
250 250
251 /* 251 /*
252 * If we may not send any data, we don't. 252 * If we may not send any data, we don't.
253 * If we are still trying to get some other data down the 253 * If we are still trying to get some other data down the
254 * channel, we don't try and send any data. 254 * channel, we don't try and send any data.
255 */ 255 */
256 if (reduce_win || (scp->flowrem_sw != DN_SEND)) 256 if (reduce_win || (scp->flowrem_sw != DN_SEND))
257 goto recalc_window; 257 goto recalc_window;
258 258
259 if ((skb = skb_peek(&scp->data_xmit_queue)) != NULL) 259 if ((skb = skb_peek(&scp->data_xmit_queue)) != NULL)
260 reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC); 260 reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC);
261 261
262 /* 262 /*
263 * If we've sent any frame more than once, we cut the 263 * If we've sent any frame more than once, we cut the
264 * send window size in half. There is always a minimum 264 * send window size in half. There is always a minimum
265 * window size of one available. 265 * window size of one available.
266 */ 266 */
267 recalc_window: 267 recalc_window:
268 if (reduce_win) { 268 if (reduce_win) {
269 scp->snd_window >>= 1; 269 scp->snd_window >>= 1;
270 if (scp->snd_window < NSP_MIN_WINDOW) 270 if (scp->snd_window < NSP_MIN_WINDOW)
271 scp->snd_window = NSP_MIN_WINDOW; 271 scp->snd_window = NSP_MIN_WINDOW;
272 } 272 }
273 } 273 }
274 274
275 int dn_nsp_xmit_timeout(struct sock *sk) 275 int dn_nsp_xmit_timeout(struct sock *sk)
276 { 276 {
277 struct dn_scp *scp = DN_SK(sk); 277 struct dn_scp *scp = DN_SK(sk);
278 278
279 dn_nsp_output(sk); 279 dn_nsp_output(sk);
280 280
281 if (!skb_queue_empty(&scp->data_xmit_queue) || 281 if (!skb_queue_empty(&scp->data_xmit_queue) ||
282 !skb_queue_empty(&scp->other_xmit_queue)) 282 !skb_queue_empty(&scp->other_xmit_queue))
283 scp->persist = dn_nsp_persist(sk); 283 scp->persist = dn_nsp_persist(sk);
284 284
285 return 0; 285 return 0;
286 } 286 }
287 287
288 static inline __le16 *dn_mk_common_header(struct dn_scp *scp, struct sk_buff *skb, unsigned char msgflag, int len) 288 static inline __le16 *dn_mk_common_header(struct dn_scp *scp, struct sk_buff *skb, unsigned char msgflag, int len)
289 { 289 {
290 unsigned char *ptr = skb_push(skb, len); 290 unsigned char *ptr = skb_push(skb, len);
291 291
292 BUG_ON(len < 5); 292 BUG_ON(len < 5);
293 293
294 *ptr++ = msgflag; 294 *ptr++ = msgflag;
295 *((__le16 *)ptr) = scp->addrrem; 295 *((__le16 *)ptr) = scp->addrrem;
296 ptr += 2; 296 ptr += 2;
297 *((__le16 *)ptr) = scp->addrloc; 297 *((__le16 *)ptr) = scp->addrloc;
298 ptr += 2; 298 ptr += 2;
299 return (__le16 __force *)ptr; 299 return (__le16 __force *)ptr;
300 } 300 }
301 301
302 static __le16 *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned char msgflag, int hlen, int other) 302 static __le16 *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned char msgflag, int hlen, int other)
303 { 303 {
304 struct dn_scp *scp = DN_SK(sk); 304 struct dn_scp *scp = DN_SK(sk);
305 unsigned short acknum = scp->numdat_rcv & 0x0FFF; 305 unsigned short acknum = scp->numdat_rcv & 0x0FFF;
306 unsigned short ackcrs = scp->numoth_rcv & 0x0FFF; 306 unsigned short ackcrs = scp->numoth_rcv & 0x0FFF;
307 __le16 *ptr; 307 __le16 *ptr;
308 308
309 BUG_ON(hlen < 9); 309 BUG_ON(hlen < 9);
310 310
311 scp->ackxmt_dat = acknum; 311 scp->ackxmt_dat = acknum;
312 scp->ackxmt_oth = ackcrs; 312 scp->ackxmt_oth = ackcrs;
313 acknum |= 0x8000; 313 acknum |= 0x8000;
314 ackcrs |= 0x8000; 314 ackcrs |= 0x8000;
315 315
316 /* If this is an "other data/ack" message, swap acknum and ackcrs */ 316 /* If this is an "other data/ack" message, swap acknum and ackcrs */
317 if (other) { 317 if (other) {
318 unsigned short tmp = acknum; 318 unsigned short tmp = acknum;
319 acknum = ackcrs; 319 acknum = ackcrs;
320 ackcrs = tmp; 320 ackcrs = tmp;
321 } 321 }
322 322
323 /* Set "cross subchannel" bit in ackcrs */ 323 /* Set "cross subchannel" bit in ackcrs */
324 ackcrs |= 0x2000; 324 ackcrs |= 0x2000;
325 325
326 ptr = (__le16 *)dn_mk_common_header(scp, skb, msgflag, hlen); 326 ptr = (__le16 *)dn_mk_common_header(scp, skb, msgflag, hlen);
327 327
328 *ptr++ = dn_htons(acknum); 328 *ptr++ = cpu_to_le16(acknum);
329 *ptr++ = dn_htons(ackcrs); 329 *ptr++ = cpu_to_le16(ackcrs);
330 330
331 return ptr; 331 return ptr;
332 } 332 }
333 333
334 static __le16 *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *skb, int oth) 334 static __le16 *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *skb, int oth)
335 { 335 {
336 struct dn_scp *scp = DN_SK(sk); 336 struct dn_scp *scp = DN_SK(sk);
337 struct dn_skb_cb *cb = DN_SKB_CB(skb); 337 struct dn_skb_cb *cb = DN_SKB_CB(skb);
338 __le16 *ptr = dn_mk_ack_header(sk, skb, cb->nsp_flags, 11, oth); 338 __le16 *ptr = dn_mk_ack_header(sk, skb, cb->nsp_flags, 11, oth);
339 339
340 if (unlikely(oth)) { 340 if (unlikely(oth)) {
341 cb->segnum = scp->numoth; 341 cb->segnum = scp->numoth;
342 seq_add(&scp->numoth, 1); 342 seq_add(&scp->numoth, 1);
343 } else { 343 } else {
344 cb->segnum = scp->numdat; 344 cb->segnum = scp->numdat;
345 seq_add(&scp->numdat, 1); 345 seq_add(&scp->numdat, 1);
346 } 346 }
347 *(ptr++) = dn_htons(cb->segnum); 347 *(ptr++) = cpu_to_le16(cb->segnum);
348 348
349 return ptr; 349 return ptr;
350 } 350 }
351 351
352 void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, 352 void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb,
353 gfp_t gfp, int oth) 353 gfp_t gfp, int oth)
354 { 354 {
355 struct dn_scp *scp = DN_SK(sk); 355 struct dn_scp *scp = DN_SK(sk);
356 struct dn_skb_cb *cb = DN_SKB_CB(skb); 356 struct dn_skb_cb *cb = DN_SKB_CB(skb);
357 unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1; 357 unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1;
358 358
359 cb->xmit_count = 0; 359 cb->xmit_count = 0;
360 dn_nsp_mk_data_header(sk, skb, oth); 360 dn_nsp_mk_data_header(sk, skb, oth);
361 361
362 /* 362 /*
363 * Slow start: If we have been idle for more than 363 * Slow start: If we have been idle for more than
364 * one RTT, then reset window to min size. 364 * one RTT, then reset window to min size.
365 */ 365 */
366 if ((jiffies - scp->stamp) > t) 366 if ((jiffies - scp->stamp) > t)
367 scp->snd_window = NSP_MIN_WINDOW; 367 scp->snd_window = NSP_MIN_WINDOW;
368 368
369 if (oth) 369 if (oth)
370 skb_queue_tail(&scp->other_xmit_queue, skb); 370 skb_queue_tail(&scp->other_xmit_queue, skb);
371 else 371 else
372 skb_queue_tail(&scp->data_xmit_queue, skb); 372 skb_queue_tail(&scp->data_xmit_queue, skb);
373 373
374 if (scp->flowrem_sw != DN_SEND) 374 if (scp->flowrem_sw != DN_SEND)
375 return; 375 return;
376 376
377 dn_nsp_clone_and_send(skb, gfp); 377 dn_nsp_clone_and_send(skb, gfp);
378 } 378 }
379 379
380 380
381 int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum) 381 int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum)
382 { 382 {
383 struct dn_skb_cb *cb = DN_SKB_CB(skb); 383 struct dn_skb_cb *cb = DN_SKB_CB(skb);
384 struct dn_scp *scp = DN_SK(sk); 384 struct dn_scp *scp = DN_SK(sk);
385 struct sk_buff *skb2, *list, *ack = NULL; 385 struct sk_buff *skb2, *list, *ack = NULL;
386 int wakeup = 0; 386 int wakeup = 0;
387 int try_retrans = 0; 387 int try_retrans = 0;
388 unsigned long reftime = cb->stamp; 388 unsigned long reftime = cb->stamp;
389 unsigned long pkttime; 389 unsigned long pkttime;
390 unsigned short xmit_count; 390 unsigned short xmit_count;
391 unsigned short segnum; 391 unsigned short segnum;
392 392
393 skb2 = q->next; 393 skb2 = q->next;
394 list = (struct sk_buff *)q; 394 list = (struct sk_buff *)q;
395 while(list != skb2) { 395 while(list != skb2) {
396 struct dn_skb_cb *cb2 = DN_SKB_CB(skb2); 396 struct dn_skb_cb *cb2 = DN_SKB_CB(skb2);
397 397
398 if (dn_before_or_equal(cb2->segnum, acknum)) 398 if (dn_before_or_equal(cb2->segnum, acknum))
399 ack = skb2; 399 ack = skb2;
400 400
401 /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */ 401 /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */
402 402
403 skb2 = skb2->next; 403 skb2 = skb2->next;
404 404
405 if (ack == NULL) 405 if (ack == NULL)
406 continue; 406 continue;
407 407
408 /* printk(KERN_DEBUG "check_xmit_queue: %04x, %d\n", acknum, cb2->xmit_count); */ 408 /* printk(KERN_DEBUG "check_xmit_queue: %04x, %d\n", acknum, cb2->xmit_count); */
409 409
410 /* Does _last_ packet acked have xmit_count > 1 */ 410 /* Does _last_ packet acked have xmit_count > 1 */
411 try_retrans = 0; 411 try_retrans = 0;
412 /* Remember to wake up the sending process */ 412 /* Remember to wake up the sending process */
413 wakeup = 1; 413 wakeup = 1;
414 /* Keep various statistics */ 414 /* Keep various statistics */
415 pkttime = cb2->stamp; 415 pkttime = cb2->stamp;
416 xmit_count = cb2->xmit_count; 416 xmit_count = cb2->xmit_count;
417 segnum = cb2->segnum; 417 segnum = cb2->segnum;
418 /* Remove and drop ack'ed packet */ 418 /* Remove and drop ack'ed packet */
419 skb_unlink(ack, q); 419 skb_unlink(ack, q);
420 kfree_skb(ack); 420 kfree_skb(ack);
421 ack = NULL; 421 ack = NULL;
422 422
423 /* 423 /*
424 * We don't expect to see acknowledgements for packets we 424 * We don't expect to see acknowledgements for packets we
425 * haven't sent yet. 425 * haven't sent yet.
426 */ 426 */
427 WARN_ON(xmit_count == 0); 427 WARN_ON(xmit_count == 0);
428 428
429 /* 429 /*
430 * If the packet has only been sent once, we can use it 430 * If the packet has only been sent once, we can use it
431 * to calculate the RTT and also open the window a little 431 * to calculate the RTT and also open the window a little
432 * further. 432 * further.
433 */ 433 */
434 if (xmit_count == 1) { 434 if (xmit_count == 1) {
435 if (dn_equal(segnum, acknum)) 435 if (dn_equal(segnum, acknum))
436 dn_nsp_rtt(sk, (long)(pkttime - reftime)); 436 dn_nsp_rtt(sk, (long)(pkttime - reftime));
437 437
438 if (scp->snd_window < scp->max_window) 438 if (scp->snd_window < scp->max_window)
439 scp->snd_window++; 439 scp->snd_window++;
440 } 440 }
441 441
442 /* 442 /*
443 * Packet has been sent more than once. If this is the last 443 * Packet has been sent more than once. If this is the last
444 * packet to be acknowledged then we want to send the next 444 * packet to be acknowledged then we want to send the next
445 * packet in the send queue again (assumes the remote host does 445 * packet in the send queue again (assumes the remote host does
446 * go-back-N error control). 446 * go-back-N error control).
447 */ 447 */
448 if (xmit_count > 1) 448 if (xmit_count > 1)
449 try_retrans = 1; 449 try_retrans = 1;
450 } 450 }
451 451
452 if (try_retrans) 452 if (try_retrans)
453 dn_nsp_output(sk); 453 dn_nsp_output(sk);
454 454
455 return wakeup; 455 return wakeup;
456 } 456 }
457 457
458 void dn_nsp_send_data_ack(struct sock *sk) 458 void dn_nsp_send_data_ack(struct sock *sk)
459 { 459 {
460 struct sk_buff *skb = NULL; 460 struct sk_buff *skb = NULL;
461 461
462 if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL) 462 if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
463 return; 463 return;
464 464
465 skb_reserve(skb, 9); 465 skb_reserve(skb, 9);
466 dn_mk_ack_header(sk, skb, 0x04, 9, 0); 466 dn_mk_ack_header(sk, skb, 0x04, 9, 0);
467 dn_nsp_send(skb); 467 dn_nsp_send(skb);
468 } 468 }
469 469
470 void dn_nsp_send_oth_ack(struct sock *sk) 470 void dn_nsp_send_oth_ack(struct sock *sk)
471 { 471 {
472 struct sk_buff *skb = NULL; 472 struct sk_buff *skb = NULL;
473 473
474 if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL) 474 if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
475 return; 475 return;
476 476
477 skb_reserve(skb, 9); 477 skb_reserve(skb, 9);
478 dn_mk_ack_header(sk, skb, 0x14, 9, 1); 478 dn_mk_ack_header(sk, skb, 0x14, 9, 1);
479 dn_nsp_send(skb); 479 dn_nsp_send(skb);
480 } 480 }
481 481
482 482
483 void dn_send_conn_ack (struct sock *sk) 483 void dn_send_conn_ack (struct sock *sk)
484 { 484 {
485 struct dn_scp *scp = DN_SK(sk); 485 struct dn_scp *scp = DN_SK(sk);
486 struct sk_buff *skb = NULL; 486 struct sk_buff *skb = NULL;
487 struct nsp_conn_ack_msg *msg; 487 struct nsp_conn_ack_msg *msg;
488 488
489 if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL) 489 if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL)
490 return; 490 return;
491 491
492 msg = (struct nsp_conn_ack_msg *)skb_put(skb, 3); 492 msg = (struct nsp_conn_ack_msg *)skb_put(skb, 3);
493 msg->msgflg = 0x24; 493 msg->msgflg = 0x24;
494 msg->dstaddr = scp->addrrem; 494 msg->dstaddr = scp->addrrem;
495 495
496 dn_nsp_send(skb); 496 dn_nsp_send(skb);
497 } 497 }
498 498
499 void dn_nsp_delayed_ack(struct sock *sk) 499 void dn_nsp_delayed_ack(struct sock *sk)
500 { 500 {
501 struct dn_scp *scp = DN_SK(sk); 501 struct dn_scp *scp = DN_SK(sk);
502 502
503 if (scp->ackxmt_oth != scp->numoth_rcv) 503 if (scp->ackxmt_oth != scp->numoth_rcv)
504 dn_nsp_send_oth_ack(sk); 504 dn_nsp_send_oth_ack(sk);
505 505
506 if (scp->ackxmt_dat != scp->numdat_rcv) 506 if (scp->ackxmt_dat != scp->numdat_rcv)
507 dn_nsp_send_data_ack(sk); 507 dn_nsp_send_data_ack(sk);
508 } 508 }
509 509
510 static int dn_nsp_retrans_conn_conf(struct sock *sk) 510 static int dn_nsp_retrans_conn_conf(struct sock *sk)
511 { 511 {
512 struct dn_scp *scp = DN_SK(sk); 512 struct dn_scp *scp = DN_SK(sk);
513 513
514 if (scp->state == DN_CC) 514 if (scp->state == DN_CC)
515 dn_send_conn_conf(sk, GFP_ATOMIC); 515 dn_send_conn_conf(sk, GFP_ATOMIC);
516 516
517 return 0; 517 return 0;
518 } 518 }
519 519
520 void dn_send_conn_conf(struct sock *sk, gfp_t gfp) 520 void dn_send_conn_conf(struct sock *sk, gfp_t gfp)
521 { 521 {
522 struct dn_scp *scp = DN_SK(sk); 522 struct dn_scp *scp = DN_SK(sk);
523 struct sk_buff *skb = NULL; 523 struct sk_buff *skb = NULL;
524 struct nsp_conn_init_msg *msg; 524 struct nsp_conn_init_msg *msg;
525 __u8 len = (__u8)dn_ntohs(scp->conndata_out.opt_optl); 525 __u8 len = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);
526 526
527 if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL) 527 if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL)
528 return; 528 return;
529 529
530 msg = (struct nsp_conn_init_msg *)skb_put(skb, sizeof(*msg)); 530 msg = (struct nsp_conn_init_msg *)skb_put(skb, sizeof(*msg));
531 msg->msgflg = 0x28; 531 msg->msgflg = 0x28;
532 msg->dstaddr = scp->addrrem; 532 msg->dstaddr = scp->addrrem;
533 msg->srcaddr = scp->addrloc; 533 msg->srcaddr = scp->addrloc;
534 msg->services = scp->services_loc; 534 msg->services = scp->services_loc;
535 msg->info = scp->info_loc; 535 msg->info = scp->info_loc;
536 msg->segsize = dn_htons(scp->segsize_loc); 536 msg->segsize = cpu_to_le16(scp->segsize_loc);
537 537
538 *skb_put(skb,1) = len; 538 *skb_put(skb,1) = len;
539 539
540 if (len > 0) 540 if (len > 0)
541 memcpy(skb_put(skb, len), scp->conndata_out.opt_data, len); 541 memcpy(skb_put(skb, len), scp->conndata_out.opt_data, len);
542 542
543 543
544 dn_nsp_send(skb); 544 dn_nsp_send(skb);
545 545
546 scp->persist = dn_nsp_persist(sk); 546 scp->persist = dn_nsp_persist(sk);
547 scp->persist_fxn = dn_nsp_retrans_conn_conf; 547 scp->persist_fxn = dn_nsp_retrans_conn_conf;
548 } 548 }
549 549
550 550
551 static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, 551 static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
552 unsigned short reason, gfp_t gfp, 552 unsigned short reason, gfp_t gfp,
553 struct dst_entry *dst, 553 struct dst_entry *dst,
554 int ddl, unsigned char *dd, __le16 rem, __le16 loc) 554 int ddl, unsigned char *dd, __le16 rem, __le16 loc)
555 { 555 {
556 struct sk_buff *skb = NULL; 556 struct sk_buff *skb = NULL;
557 int size = 7 + ddl + ((msgflg == NSP_DISCINIT) ? 1 : 0); 557 int size = 7 + ddl + ((msgflg == NSP_DISCINIT) ? 1 : 0);
558 unsigned char *msg; 558 unsigned char *msg;
559 559
560 if ((dst == NULL) || (rem == 0)) { 560 if ((dst == NULL) || (rem == 0)) {
561 if (net_ratelimit()) 561 if (net_ratelimit())
562 printk(KERN_DEBUG "DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n", dn_ntohs(rem), dst); 562 printk(KERN_DEBUG "DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n", le16_to_cpu(rem), dst);
563 return; 563 return;
564 } 564 }
565 565
566 if ((skb = dn_alloc_skb(sk, size, gfp)) == NULL) 566 if ((skb = dn_alloc_skb(sk, size, gfp)) == NULL)
567 return; 567 return;
568 568
569 msg = skb_put(skb, size); 569 msg = skb_put(skb, size);
570 *msg++ = msgflg; 570 *msg++ = msgflg;
571 *(__le16 *)msg = rem; 571 *(__le16 *)msg = rem;
572 msg += 2; 572 msg += 2;
573 *(__le16 *)msg = loc; 573 *(__le16 *)msg = loc;
574 msg += 2; 574 msg += 2;
575 *(__le16 *)msg = dn_htons(reason); 575 *(__le16 *)msg = cpu_to_le16(reason);
576 msg += 2; 576 msg += 2;
577 if (msgflg == NSP_DISCINIT) 577 if (msgflg == NSP_DISCINIT)
578 *msg++ = ddl; 578 *msg++ = ddl;
579 579
580 if (ddl) { 580 if (ddl) {
581 memcpy(msg, dd, ddl); 581 memcpy(msg, dd, ddl);
582 } 582 }
583 583
584 /* 584 /*
585 * This doesn't go via the dn_nsp_send() function since we need 585 * This doesn't go via the dn_nsp_send() function since we need
586 * to be able to send disc packets out which have no socket 586 * to be able to send disc packets out which have no socket
587 * associations. 587 * associations.
588 */ 588 */
589 skb->dst = dst_clone(dst); 589 skb->dst = dst_clone(dst);
590 dst_output(skb); 590 dst_output(skb);
591 } 591 }
592 592
593 593
594 void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, 594 void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg,
595 unsigned short reason, gfp_t gfp) 595 unsigned short reason, gfp_t gfp)
596 { 596 {
597 struct dn_scp *scp = DN_SK(sk); 597 struct dn_scp *scp = DN_SK(sk);
598 int ddl = 0; 598 int ddl = 0;
599 599
600 if (msgflg == NSP_DISCINIT) 600 if (msgflg == NSP_DISCINIT)
601 ddl = dn_ntohs(scp->discdata_out.opt_optl); 601 ddl = le16_to_cpu(scp->discdata_out.opt_optl);
602 602
603 if (reason == 0) 603 if (reason == 0)
604 reason = dn_ntohs(scp->discdata_out.opt_status); 604 reason = le16_to_cpu(scp->discdata_out.opt_status);
605 605
606 dn_nsp_do_disc(sk, msgflg, reason, gfp, sk->sk_dst_cache, ddl, 606 dn_nsp_do_disc(sk, msgflg, reason, gfp, sk->sk_dst_cache, ddl,
607 scp->discdata_out.opt_data, scp->addrrem, scp->addrloc); 607 scp->discdata_out.opt_data, scp->addrrem, scp->addrloc);
608 } 608 }
609 609
610 610
611 void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg, 611 void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg,
612 unsigned short reason) 612 unsigned short reason)
613 { 613 {
614 struct dn_skb_cb *cb = DN_SKB_CB(skb); 614 struct dn_skb_cb *cb = DN_SKB_CB(skb);
615 int ddl = 0; 615 int ddl = 0;
616 gfp_t gfp = GFP_ATOMIC; 616 gfp_t gfp = GFP_ATOMIC;
617 617
618 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, 618 dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl,
619 NULL, cb->src_port, cb->dst_port); 619 NULL, cb->src_port, cb->dst_port);
620 } 620 }
621 621
622 622
623 void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval) 623 void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval)
624 { 624 {
625 struct dn_scp *scp = DN_SK(sk); 625 struct dn_scp *scp = DN_SK(sk);
626 struct sk_buff *skb; 626 struct sk_buff *skb;
627 unsigned char *ptr; 627 unsigned char *ptr;
628 gfp_t gfp = GFP_ATOMIC; 628 gfp_t gfp = GFP_ATOMIC;
629 629
630 if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) 630 if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL)
631 return; 631 return;
632 632
633 skb_reserve(skb, DN_MAX_NSP_DATA_HEADER); 633 skb_reserve(skb, DN_MAX_NSP_DATA_HEADER);
634 ptr = skb_put(skb, 2); 634 ptr = skb_put(skb, 2);
635 DN_SKB_CB(skb)->nsp_flags = 0x10; 635 DN_SKB_CB(skb)->nsp_flags = 0x10;
636 *ptr++ = lsflags; 636 *ptr++ = lsflags;
637 *ptr = fcval; 637 *ptr = fcval;
638 638
639 dn_nsp_queue_xmit(sk, skb, gfp, 1); 639 dn_nsp_queue_xmit(sk, skb, gfp, 1);
640 640
641 scp->persist = dn_nsp_persist(sk); 641 scp->persist = dn_nsp_persist(sk);
642 scp->persist_fxn = dn_nsp_xmit_timeout; 642 scp->persist_fxn = dn_nsp_xmit_timeout;
643 } 643 }
644 644
645 static int dn_nsp_retrans_conninit(struct sock *sk) 645 static int dn_nsp_retrans_conninit(struct sock *sk)
646 { 646 {
647 struct dn_scp *scp = DN_SK(sk); 647 struct dn_scp *scp = DN_SK(sk);
648 648
649 if (scp->state == DN_CI) 649 if (scp->state == DN_CI)
650 dn_nsp_send_conninit(sk, NSP_RCI); 650 dn_nsp_send_conninit(sk, NSP_RCI);
651 651
652 return 0; 652 return 0;
653 } 653 }
654 654
655 void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg) 655 void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
656 { 656 {
657 struct dn_scp *scp = DN_SK(sk); 657 struct dn_scp *scp = DN_SK(sk);
658 struct nsp_conn_init_msg *msg; 658 struct nsp_conn_init_msg *msg;
659 unsigned char aux; 659 unsigned char aux;
660 unsigned char menuver; 660 unsigned char menuver;
661 struct dn_skb_cb *cb; 661 struct dn_skb_cb *cb;
662 unsigned char type = 1; 662 unsigned char type = 1;
663 gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; 663 gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC;
664 struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); 664 struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation);
665 665
666 if (!skb) 666 if (!skb)
667 return; 667 return;
668 668
669 cb = DN_SKB_CB(skb); 669 cb = DN_SKB_CB(skb);
670 msg = (struct nsp_conn_init_msg *)skb_put(skb,sizeof(*msg)); 670 msg = (struct nsp_conn_init_msg *)skb_put(skb,sizeof(*msg));
671 671
672 msg->msgflg = msgflg; 672 msg->msgflg = msgflg;
673 msg->dstaddr = 0x0000; /* Remote Node will assign it*/ 673 msg->dstaddr = 0x0000; /* Remote Node will assign it*/
674 674
675 msg->srcaddr = scp->addrloc; 675 msg->srcaddr = scp->addrloc;
676 msg->services = scp->services_loc; /* Requested flow control */ 676 msg->services = scp->services_loc; /* Requested flow control */
677 msg->info = scp->info_loc; /* Version Number */ 677 msg->info = scp->info_loc; /* Version Number */
678 msg->segsize = dn_htons(scp->segsize_loc); /* Max segment size */ 678 msg->segsize = cpu_to_le16(scp->segsize_loc); /* Max segment size */
679 679
680 if (scp->peer.sdn_objnum) 680 if (scp->peer.sdn_objnum)
681 type = 0; 681 type = 0;
682 682
683 skb_put(skb, dn_sockaddr2username(&scp->peer, 683 skb_put(skb, dn_sockaddr2username(&scp->peer,
684 skb_tail_pointer(skb), type)); 684 skb_tail_pointer(skb), type));
685 skb_put(skb, dn_sockaddr2username(&scp->addr, 685 skb_put(skb, dn_sockaddr2username(&scp->addr,
686 skb_tail_pointer(skb), 2)); 686 skb_tail_pointer(skb), 2));
687 687
688 menuver = DN_MENUVER_ACC | DN_MENUVER_USR; 688 menuver = DN_MENUVER_ACC | DN_MENUVER_USR;
689 if (scp->peer.sdn_flags & SDF_PROXY) 689 if (scp->peer.sdn_flags & SDF_PROXY)
690 menuver |= DN_MENUVER_PRX; 690 menuver |= DN_MENUVER_PRX;
691 if (scp->peer.sdn_flags & SDF_UICPROXY) 691 if (scp->peer.sdn_flags & SDF_UICPROXY)
692 menuver |= DN_MENUVER_UIC; 692 menuver |= DN_MENUVER_UIC;
693 693
694 *skb_put(skb, 1) = menuver; /* Menu Version */ 694 *skb_put(skb, 1) = menuver; /* Menu Version */
695 695
696 aux = scp->accessdata.acc_userl; 696 aux = scp->accessdata.acc_userl;
697 *skb_put(skb, 1) = aux; 697 *skb_put(skb, 1) = aux;
698 if (aux > 0) 698 if (aux > 0)
699 memcpy(skb_put(skb, aux), scp->accessdata.acc_user, aux); 699 memcpy(skb_put(skb, aux), scp->accessdata.acc_user, aux);
700 700
701 aux = scp->accessdata.acc_passl; 701 aux = scp->accessdata.acc_passl;
702 *skb_put(skb, 1) = aux; 702 *skb_put(skb, 1) = aux;
703 if (aux > 0) 703 if (aux > 0)
704 memcpy(skb_put(skb, aux), scp->accessdata.acc_pass, aux); 704 memcpy(skb_put(skb, aux), scp->accessdata.acc_pass, aux);
705 705
706 aux = scp->accessdata.acc_accl; 706 aux = scp->accessdata.acc_accl;
707 *skb_put(skb, 1) = aux; 707 *skb_put(skb, 1) = aux;
708 if (aux > 0) 708 if (aux > 0)
709 memcpy(skb_put(skb, aux), scp->accessdata.acc_acc, aux); 709 memcpy(skb_put(skb, aux), scp->accessdata.acc_acc, aux);
710 710
711 aux = (__u8)dn_ntohs(scp->conndata_out.opt_optl); 711 aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);
712 *skb_put(skb, 1) = aux; 712 *skb_put(skb, 1) = aux;
713 if (aux > 0) 713 if (aux > 0)
714 memcpy(skb_put(skb,aux), scp->conndata_out.opt_data, aux); 714 memcpy(skb_put(skb,aux), scp->conndata_out.opt_data, aux);
715 715
716 scp->persist = dn_nsp_persist(sk); 716 scp->persist = dn_nsp_persist(sk);
717 scp->persist_fxn = dn_nsp_retrans_conninit; 717 scp->persist_fxn = dn_nsp_retrans_conninit;
718 718
719 cb->rt_flags = DN_RT_F_RQR; 719 cb->rt_flags = DN_RT_F_RQR;
720 720
721 dn_nsp_send(skb); 721 dn_nsp_send(skb);
722 } 722 }
723 723
724 724
net/decnet/dn_route.c
1 /* 1 /*
2 * DECnet An implementation of the DECnet protocol suite for the LINUX 2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket 3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * DECnet Routing Functions (Endnode and Router) 6 * DECnet Routing Functions (Endnode and Router)
7 * 7 *
8 * Authors: Steve Whitehouse <SteveW@ACM.org> 8 * Authors: Steve Whitehouse <SteveW@ACM.org>
9 * Eduardo Marcelo Serrat <emserrat@geocities.com> 9 * Eduardo Marcelo Serrat <emserrat@geocities.com>
10 * 10 *
11 * Changes: 11 * Changes:
12 * Steve Whitehouse : Fixes to allow "intra-ethernet" and 12 * Steve Whitehouse : Fixes to allow "intra-ethernet" and
13 * "return-to-sender" bits on outgoing 13 * "return-to-sender" bits on outgoing
14 * packets. 14 * packets.
15 * Steve Whitehouse : Timeouts for cached routes. 15 * Steve Whitehouse : Timeouts for cached routes.
16 * Steve Whitehouse : Use dst cache for input routes too. 16 * Steve Whitehouse : Use dst cache for input routes too.
17 * Steve Whitehouse : Fixed error values in dn_send_skb. 17 * Steve Whitehouse : Fixed error values in dn_send_skb.
18 * Steve Whitehouse : Rework routing functions to better fit 18 * Steve Whitehouse : Rework routing functions to better fit
19 * DECnet routing design 19 * DECnet routing design
20 * Alexey Kuznetsov : New SMP locking 20 * Alexey Kuznetsov : New SMP locking
21 * Steve Whitehouse : More SMP locking changes & dn_cache_dump() 21 * Steve Whitehouse : More SMP locking changes & dn_cache_dump()
22 * Steve Whitehouse : Prerouting NF hook, now really is prerouting. 22 * Steve Whitehouse : Prerouting NF hook, now really is prerouting.
23 * Fixed possible skb leak in rtnetlink funcs. 23 * Fixed possible skb leak in rtnetlink funcs.
24 * Steve Whitehouse : Dave Miller's dynamic hash table sizing and 24 * Steve Whitehouse : Dave Miller's dynamic hash table sizing and
25 * Alexey Kuznetsov's finer grained locking 25 * Alexey Kuznetsov's finer grained locking
26 * from ipv4/route.c. 26 * from ipv4/route.c.
27 * Steve Whitehouse : Routing is now starting to look like a 27 * Steve Whitehouse : Routing is now starting to look like a
28 * sensible set of code now, mainly due to 28 * sensible set of code now, mainly due to
29 * my copying the IPv4 routing code. The 29 * my copying the IPv4 routing code. The
30 * hooks here are modified and will continue 30 * hooks here are modified and will continue
31 * to evolve for a while. 31 * to evolve for a while.
32 * Steve Whitehouse : Real SMP at last :-) Also new netfilter 32 * Steve Whitehouse : Real SMP at last :-) Also new netfilter
33 * stuff. Look out raw sockets your days 33 * stuff. Look out raw sockets your days
34 * are numbered! 34 * are numbered!
35 * Steve Whitehouse : Added return-to-sender functions. Added 35 * Steve Whitehouse : Added return-to-sender functions. Added
36 * backlog congestion level return codes. 36 * backlog congestion level return codes.
37 * Steve Whitehouse : Fixed bug where routes were set up with 37 * Steve Whitehouse : Fixed bug where routes were set up with
38 * no ref count on net devices. 38 * no ref count on net devices.
39 * Steve Whitehouse : RCU for the route cache 39 * Steve Whitehouse : RCU for the route cache
40 * Steve Whitehouse : Preparations for the flow cache 40 * Steve Whitehouse : Preparations for the flow cache
41 * Steve Whitehouse : Prepare for nonlinear skbs 41 * Steve Whitehouse : Prepare for nonlinear skbs
42 */ 42 */
43 43
44 /****************************************************************************** 44 /******************************************************************************
45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com
46 46
47 This program is free software; you can redistribute it and/or modify 47 This program is free software; you can redistribute it and/or modify
48 it under the terms of the GNU General Public License as published by 48 it under the terms of the GNU General Public License as published by
49 the Free Software Foundation; either version 2 of the License, or 49 the Free Software Foundation; either version 2 of the License, or
50 any later version. 50 any later version.
51 51
52 This program is distributed in the hope that it will be useful, 52 This program is distributed in the hope that it will be useful,
53 but WITHOUT ANY WARRANTY; without even the implied warranty of 53 but WITHOUT ANY WARRANTY; without even the implied warranty of
54 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 54 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
55 GNU General Public License for more details. 55 GNU General Public License for more details.
56 *******************************************************************************/ 56 *******************************************************************************/
57 57
58 #include <linux/errno.h> 58 #include <linux/errno.h>
59 #include <linux/types.h> 59 #include <linux/types.h>
60 #include <linux/socket.h> 60 #include <linux/socket.h>
61 #include <linux/in.h> 61 #include <linux/in.h>
62 #include <linux/kernel.h> 62 #include <linux/kernel.h>
63 #include <linux/sockios.h> 63 #include <linux/sockios.h>
64 #include <linux/net.h> 64 #include <linux/net.h>
65 #include <linux/netdevice.h> 65 #include <linux/netdevice.h>
66 #include <linux/inet.h> 66 #include <linux/inet.h>
67 #include <linux/route.h> 67 #include <linux/route.h>
68 #include <linux/in_route.h> 68 #include <linux/in_route.h>
69 #include <net/sock.h> 69 #include <net/sock.h>
70 #include <linux/mm.h> 70 #include <linux/mm.h>
71 #include <linux/proc_fs.h> 71 #include <linux/proc_fs.h>
72 #include <linux/seq_file.h> 72 #include <linux/seq_file.h>
73 #include <linux/init.h> 73 #include <linux/init.h>
74 #include <linux/rtnetlink.h> 74 #include <linux/rtnetlink.h>
75 #include <linux/string.h> 75 #include <linux/string.h>
76 #include <linux/netfilter_decnet.h> 76 #include <linux/netfilter_decnet.h>
77 #include <linux/rcupdate.h> 77 #include <linux/rcupdate.h>
78 #include <linux/times.h> 78 #include <linux/times.h>
79 #include <asm/errno.h> 79 #include <asm/errno.h>
80 #include <net/net_namespace.h> 80 #include <net/net_namespace.h>
81 #include <net/netlink.h> 81 #include <net/netlink.h>
82 #include <net/neighbour.h> 82 #include <net/neighbour.h>
83 #include <net/dst.h> 83 #include <net/dst.h>
84 #include <net/flow.h> 84 #include <net/flow.h>
85 #include <net/fib_rules.h> 85 #include <net/fib_rules.h>
86 #include <net/dn.h> 86 #include <net/dn.h>
87 #include <net/dn_dev.h> 87 #include <net/dn_dev.h>
88 #include <net/dn_nsp.h> 88 #include <net/dn_nsp.h>
89 #include <net/dn_route.h> 89 #include <net/dn_route.h>
90 #include <net/dn_neigh.h> 90 #include <net/dn_neigh.h>
91 #include <net/dn_fib.h> 91 #include <net/dn_fib.h>
92 92
93 struct dn_rt_hash_bucket 93 struct dn_rt_hash_bucket
94 { 94 {
95 struct dn_route *chain; 95 struct dn_route *chain;
96 spinlock_t lock; 96 spinlock_t lock;
97 }; 97 };
98 98
99 extern struct neigh_table dn_neigh_table; 99 extern struct neigh_table dn_neigh_table;
100 100
101 101
102 static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00}; 102 static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00};
103 103
104 static const int dn_rt_min_delay = 2 * HZ; 104 static const int dn_rt_min_delay = 2 * HZ;
105 static const int dn_rt_max_delay = 10 * HZ; 105 static const int dn_rt_max_delay = 10 * HZ;
106 static const int dn_rt_mtu_expires = 10 * 60 * HZ; 106 static const int dn_rt_mtu_expires = 10 * 60 * HZ;
107 107
108 static unsigned long dn_rt_deadline; 108 static unsigned long dn_rt_deadline;
109 109
110 static int dn_dst_gc(struct dst_ops *ops); 110 static int dn_dst_gc(struct dst_ops *ops);
111 static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); 111 static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
112 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); 112 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
113 static void dn_dst_link_failure(struct sk_buff *); 113 static void dn_dst_link_failure(struct sk_buff *);
114 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu); 114 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
115 static int dn_route_input(struct sk_buff *); 115 static int dn_route_input(struct sk_buff *);
116 static void dn_run_flush(unsigned long dummy); 116 static void dn_run_flush(unsigned long dummy);
117 117
118 static struct dn_rt_hash_bucket *dn_rt_hash_table; 118 static struct dn_rt_hash_bucket *dn_rt_hash_table;
119 static unsigned dn_rt_hash_mask; 119 static unsigned dn_rt_hash_mask;
120 120
121 static struct timer_list dn_route_timer; 121 static struct timer_list dn_route_timer;
122 static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0); 122 static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0);
123 int decnet_dst_gc_interval = 2; 123 int decnet_dst_gc_interval = 2;
124 124
125 static struct dst_ops dn_dst_ops = { 125 static struct dst_ops dn_dst_ops = {
126 .family = PF_DECnet, 126 .family = PF_DECnet,
127 .protocol = __constant_htons(ETH_P_DNA_RT), 127 .protocol = __constant_htons(ETH_P_DNA_RT),
128 .gc_thresh = 128, 128 .gc_thresh = 128,
129 .gc = dn_dst_gc, 129 .gc = dn_dst_gc,
130 .check = dn_dst_check, 130 .check = dn_dst_check,
131 .negative_advice = dn_dst_negative_advice, 131 .negative_advice = dn_dst_negative_advice,
132 .link_failure = dn_dst_link_failure, 132 .link_failure = dn_dst_link_failure,
133 .update_pmtu = dn_dst_update_pmtu, 133 .update_pmtu = dn_dst_update_pmtu,
134 .entries = ATOMIC_INIT(0), 134 .entries = ATOMIC_INIT(0),
135 }; 135 };
136 136
137 static __inline__ unsigned dn_hash(__le16 src, __le16 dst) 137 static __inline__ unsigned dn_hash(__le16 src, __le16 dst)
138 { 138 {
139 __u16 tmp = (__u16 __force)(src ^ dst); 139 __u16 tmp = (__u16 __force)(src ^ dst);
140 tmp ^= (tmp >> 3); 140 tmp ^= (tmp >> 3);
141 tmp ^= (tmp >> 5); 141 tmp ^= (tmp >> 5);
142 tmp ^= (tmp >> 10); 142 tmp ^= (tmp >> 10);
143 return dn_rt_hash_mask & (unsigned)tmp; 143 return dn_rt_hash_mask & (unsigned)tmp;
144 } 144 }
145 145
146 static inline void dnrt_free(struct dn_route *rt) 146 static inline void dnrt_free(struct dn_route *rt)
147 { 147 {
148 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 148 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
149 } 149 }
150 150
151 static inline void dnrt_drop(struct dn_route *rt) 151 static inline void dnrt_drop(struct dn_route *rt)
152 { 152 {
153 dst_release(&rt->u.dst); 153 dst_release(&rt->u.dst);
154 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 154 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
155 } 155 }
156 156
157 static void dn_dst_check_expire(unsigned long dummy) 157 static void dn_dst_check_expire(unsigned long dummy)
158 { 158 {
159 int i; 159 int i;
160 struct dn_route *rt, **rtp; 160 struct dn_route *rt, **rtp;
161 unsigned long now = jiffies; 161 unsigned long now = jiffies;
162 unsigned long expire = 120 * HZ; 162 unsigned long expire = 120 * HZ;
163 163
164 for(i = 0; i <= dn_rt_hash_mask; i++) { 164 for(i = 0; i <= dn_rt_hash_mask; i++) {
165 rtp = &dn_rt_hash_table[i].chain; 165 rtp = &dn_rt_hash_table[i].chain;
166 166
167 spin_lock(&dn_rt_hash_table[i].lock); 167 spin_lock(&dn_rt_hash_table[i].lock);
168 while((rt=*rtp) != NULL) { 168 while((rt=*rtp) != NULL) {
169 if (atomic_read(&rt->u.dst.__refcnt) || 169 if (atomic_read(&rt->u.dst.__refcnt) ||
170 (now - rt->u.dst.lastuse) < expire) { 170 (now - rt->u.dst.lastuse) < expire) {
171 rtp = &rt->u.dst.dn_next; 171 rtp = &rt->u.dst.dn_next;
172 continue; 172 continue;
173 } 173 }
174 *rtp = rt->u.dst.dn_next; 174 *rtp = rt->u.dst.dn_next;
175 rt->u.dst.dn_next = NULL; 175 rt->u.dst.dn_next = NULL;
176 dnrt_free(rt); 176 dnrt_free(rt);
177 } 177 }
178 spin_unlock(&dn_rt_hash_table[i].lock); 178 spin_unlock(&dn_rt_hash_table[i].lock);
179 179
180 if ((jiffies - now) > 0) 180 if ((jiffies - now) > 0)
181 break; 181 break;
182 } 182 }
183 183
184 mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ); 184 mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ);
185 } 185 }
186 186
187 static int dn_dst_gc(struct dst_ops *ops) 187 static int dn_dst_gc(struct dst_ops *ops)
188 { 188 {
189 struct dn_route *rt, **rtp; 189 struct dn_route *rt, **rtp;
190 int i; 190 int i;
191 unsigned long now = jiffies; 191 unsigned long now = jiffies;
192 unsigned long expire = 10 * HZ; 192 unsigned long expire = 10 * HZ;
193 193
194 for(i = 0; i <= dn_rt_hash_mask; i++) { 194 for(i = 0; i <= dn_rt_hash_mask; i++) {
195 195
196 spin_lock_bh(&dn_rt_hash_table[i].lock); 196 spin_lock_bh(&dn_rt_hash_table[i].lock);
197 rtp = &dn_rt_hash_table[i].chain; 197 rtp = &dn_rt_hash_table[i].chain;
198 198
199 while((rt=*rtp) != NULL) { 199 while((rt=*rtp) != NULL) {
200 if (atomic_read(&rt->u.dst.__refcnt) || 200 if (atomic_read(&rt->u.dst.__refcnt) ||
201 (now - rt->u.dst.lastuse) < expire) { 201 (now - rt->u.dst.lastuse) < expire) {
202 rtp = &rt->u.dst.dn_next; 202 rtp = &rt->u.dst.dn_next;
203 continue; 203 continue;
204 } 204 }
205 *rtp = rt->u.dst.dn_next; 205 *rtp = rt->u.dst.dn_next;
206 rt->u.dst.dn_next = NULL; 206 rt->u.dst.dn_next = NULL;
207 dnrt_drop(rt); 207 dnrt_drop(rt);
208 break; 208 break;
209 } 209 }
210 spin_unlock_bh(&dn_rt_hash_table[i].lock); 210 spin_unlock_bh(&dn_rt_hash_table[i].lock);
211 } 211 }
212 212
213 return 0; 213 return 0;
214 } 214 }
215 215
216 /* 216 /*
217 * The decnet standards don't impose a particular minimum mtu, what they 217 * The decnet standards don't impose a particular minimum mtu, what they
218 * do insist on is that the routing layer accepts a datagram of at least 218 * do insist on is that the routing layer accepts a datagram of at least
219 * 230 bytes long. Here we have to subtract the routing header length from 219 * 230 bytes long. Here we have to subtract the routing header length from
220 * 230 to get the minimum acceptable mtu. If there is no neighbour, then we 220 * 230 to get the minimum acceptable mtu. If there is no neighbour, then we
221 * assume the worst and use a long header size. 221 * assume the worst and use a long header size.
222 * 222 *
223 * We update both the mtu and the advertised mss (i.e. the segment size we 223 * We update both the mtu and the advertised mss (i.e. the segment size we
224 * advertise to the other end). 224 * advertise to the other end).
225 */ 225 */
226 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) 226 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
227 { 227 {
228 u32 min_mtu = 230; 228 u32 min_mtu = 230;
229 struct dn_dev *dn = dst->neighbour ? 229 struct dn_dev *dn = dst->neighbour ?
230 (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL; 230 (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL;
231 231
232 if (dn && dn->use_long == 0) 232 if (dn && dn->use_long == 0)
233 min_mtu -= 6; 233 min_mtu -= 6;
234 else 234 else
235 min_mtu -= 21; 235 min_mtu -= 21;
236 236
237 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) { 237 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) {
238 if (!(dst_metric_locked(dst, RTAX_MTU))) { 238 if (!(dst_metric_locked(dst, RTAX_MTU))) {
239 dst->metrics[RTAX_MTU-1] = mtu; 239 dst->metrics[RTAX_MTU-1] = mtu;
240 dst_set_expires(dst, dn_rt_mtu_expires); 240 dst_set_expires(dst, dn_rt_mtu_expires);
241 } 241 }
242 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { 242 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) {
243 u32 mss = mtu - DN_MAX_NSP_DATA_HEADER; 243 u32 mss = mtu - DN_MAX_NSP_DATA_HEADER;
244 if (dst_metric(dst, RTAX_ADVMSS) > mss) 244 if (dst_metric(dst, RTAX_ADVMSS) > mss)
245 dst->metrics[RTAX_ADVMSS-1] = mss; 245 dst->metrics[RTAX_ADVMSS-1] = mss;
246 } 246 }
247 } 247 }
248 } 248 }
249 249
250 /* 250 /*
251 * When a route has been marked obsolete. (e.g. routing cache flush) 251 * When a route has been marked obsolete. (e.g. routing cache flush)
252 */ 252 */
253 static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie) 253 static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie)
254 { 254 {
255 return NULL; 255 return NULL;
256 } 256 }
257 257
258 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst) 258 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst)
259 { 259 {
260 dst_release(dst); 260 dst_release(dst);
261 return NULL; 261 return NULL;
262 } 262 }
263 263
264 static void dn_dst_link_failure(struct sk_buff *skb) 264 static void dn_dst_link_failure(struct sk_buff *skb)
265 { 265 {
266 return; 266 return;
267 } 267 }
268 268
269 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 269 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
270 { 270 {
271 return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) | 271 return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) |
272 (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) | 272 (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) |
273 (fl1->mark ^ fl2->mark) | 273 (fl1->mark ^ fl2->mark) |
274 (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) | 274 (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) |
275 (fl1->oif ^ fl2->oif) | 275 (fl1->oif ^ fl2->oif) |
276 (fl1->iif ^ fl2->iif)) == 0; 276 (fl1->iif ^ fl2->iif)) == 0;
277 } 277 }
278 278
279 static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) 279 static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
280 { 280 {
281 struct dn_route *rth, **rthp; 281 struct dn_route *rth, **rthp;
282 unsigned long now = jiffies; 282 unsigned long now = jiffies;
283 283
284 rthp = &dn_rt_hash_table[hash].chain; 284 rthp = &dn_rt_hash_table[hash].chain;
285 285
286 spin_lock_bh(&dn_rt_hash_table[hash].lock); 286 spin_lock_bh(&dn_rt_hash_table[hash].lock);
287 while((rth = *rthp) != NULL) { 287 while((rth = *rthp) != NULL) {
288 if (compare_keys(&rth->fl, &rt->fl)) { 288 if (compare_keys(&rth->fl, &rt->fl)) {
289 /* Put it first */ 289 /* Put it first */
290 *rthp = rth->u.dst.dn_next; 290 *rthp = rth->u.dst.dn_next;
291 rcu_assign_pointer(rth->u.dst.dn_next, 291 rcu_assign_pointer(rth->u.dst.dn_next,
292 dn_rt_hash_table[hash].chain); 292 dn_rt_hash_table[hash].chain);
293 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); 293 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
294 294
295 dst_use(&rth->u.dst, now); 295 dst_use(&rth->u.dst, now);
296 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 296 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
297 297
298 dnrt_drop(rt); 298 dnrt_drop(rt);
299 *rp = rth; 299 *rp = rth;
300 return 0; 300 return 0;
301 } 301 }
302 rthp = &rth->u.dst.dn_next; 302 rthp = &rth->u.dst.dn_next;
303 } 303 }
304 304
305 rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain); 305 rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain);
306 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); 306 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
307 307
308 dst_use(&rt->u.dst, now); 308 dst_use(&rt->u.dst, now);
309 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 309 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
310 *rp = rt; 310 *rp = rt;
311 return 0; 311 return 0;
312 } 312 }
313 313
314 void dn_run_flush(unsigned long dummy) 314 void dn_run_flush(unsigned long dummy)
315 { 315 {
316 int i; 316 int i;
317 struct dn_route *rt, *next; 317 struct dn_route *rt, *next;
318 318
319 for(i = 0; i < dn_rt_hash_mask; i++) { 319 for(i = 0; i < dn_rt_hash_mask; i++) {
320 spin_lock_bh(&dn_rt_hash_table[i].lock); 320 spin_lock_bh(&dn_rt_hash_table[i].lock);
321 321
322 if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL) 322 if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL)
323 goto nothing_to_declare; 323 goto nothing_to_declare;
324 324
325 for(; rt; rt=next) { 325 for(; rt; rt=next) {
326 next = rt->u.dst.dn_next; 326 next = rt->u.dst.dn_next;
327 rt->u.dst.dn_next = NULL; 327 rt->u.dst.dn_next = NULL;
328 dst_free((struct dst_entry *)rt); 328 dst_free((struct dst_entry *)rt);
329 } 329 }
330 330
331 nothing_to_declare: 331 nothing_to_declare:
332 spin_unlock_bh(&dn_rt_hash_table[i].lock); 332 spin_unlock_bh(&dn_rt_hash_table[i].lock);
333 } 333 }
334 } 334 }
335 335
336 static DEFINE_SPINLOCK(dn_rt_flush_lock); 336 static DEFINE_SPINLOCK(dn_rt_flush_lock);
337 337
338 void dn_rt_cache_flush(int delay) 338 void dn_rt_cache_flush(int delay)
339 { 339 {
340 unsigned long now = jiffies; 340 unsigned long now = jiffies;
341 int user_mode = !in_interrupt(); 341 int user_mode = !in_interrupt();
342 342
343 if (delay < 0) 343 if (delay < 0)
344 delay = dn_rt_min_delay; 344 delay = dn_rt_min_delay;
345 345
346 spin_lock_bh(&dn_rt_flush_lock); 346 spin_lock_bh(&dn_rt_flush_lock);
347 347
348 if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) { 348 if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) {
349 long tmo = (long)(dn_rt_deadline - now); 349 long tmo = (long)(dn_rt_deadline - now);
350 350
351 if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay) 351 if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay)
352 tmo = 0; 352 tmo = 0;
353 353
354 if (delay > tmo) 354 if (delay > tmo)
355 delay = tmo; 355 delay = tmo;
356 } 356 }
357 357
358 if (delay <= 0) { 358 if (delay <= 0) {
359 spin_unlock_bh(&dn_rt_flush_lock); 359 spin_unlock_bh(&dn_rt_flush_lock);
360 dn_run_flush(0); 360 dn_run_flush(0);
361 return; 361 return;
362 } 362 }
363 363
364 if (dn_rt_deadline == 0) 364 if (dn_rt_deadline == 0)
365 dn_rt_deadline = now + dn_rt_max_delay; 365 dn_rt_deadline = now + dn_rt_max_delay;
366 366
367 dn_rt_flush_timer.expires = now + delay; 367 dn_rt_flush_timer.expires = now + delay;
368 add_timer(&dn_rt_flush_timer); 368 add_timer(&dn_rt_flush_timer);
369 spin_unlock_bh(&dn_rt_flush_lock); 369 spin_unlock_bh(&dn_rt_flush_lock);
370 } 370 }
371 371
372 /** 372 /**
373 * dn_return_short - Return a short packet to its sender 373 * dn_return_short - Return a short packet to its sender
374 * @skb: The packet to return 374 * @skb: The packet to return
375 * 375 *
376 */ 376 */
377 static int dn_return_short(struct sk_buff *skb) 377 static int dn_return_short(struct sk_buff *skb)
378 { 378 {
379 struct dn_skb_cb *cb; 379 struct dn_skb_cb *cb;
380 unsigned char *ptr; 380 unsigned char *ptr;
381 __le16 *src; 381 __le16 *src;
382 __le16 *dst; 382 __le16 *dst;
383 __le16 tmp; 383 __le16 tmp;
384 384
385 /* Add back headers */ 385 /* Add back headers */
386 skb_push(skb, skb->data - skb_network_header(skb)); 386 skb_push(skb, skb->data - skb_network_header(skb));
387 387
388 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 388 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
389 return NET_RX_DROP; 389 return NET_RX_DROP;
390 390
391 cb = DN_SKB_CB(skb); 391 cb = DN_SKB_CB(skb);
392 /* Skip packet length and point to flags */ 392 /* Skip packet length and point to flags */
393 ptr = skb->data + 2; 393 ptr = skb->data + 2;
394 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; 394 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
395 395
396 dst = (__le16 *)ptr; 396 dst = (__le16 *)ptr;
397 ptr += 2; 397 ptr += 2;
398 src = (__le16 *)ptr; 398 src = (__le16 *)ptr;
399 ptr += 2; 399 ptr += 2;
400 *ptr = 0; /* Zero hop count */ 400 *ptr = 0; /* Zero hop count */
401 401
402 /* Swap source and destination */ 402 /* Swap source and destination */
403 tmp = *src; 403 tmp = *src;
404 *src = *dst; 404 *src = *dst;
405 *dst = tmp; 405 *dst = tmp;
406 406
407 skb->pkt_type = PACKET_OUTGOING; 407 skb->pkt_type = PACKET_OUTGOING;
408 dn_rt_finish_output(skb, NULL, NULL); 408 dn_rt_finish_output(skb, NULL, NULL);
409 return NET_RX_SUCCESS; 409 return NET_RX_SUCCESS;
410 } 410 }
411 411
412 /** 412 /**
413 * dn_return_long - Return a long packet to its sender 413 * dn_return_long - Return a long packet to its sender
414 * @skb: The long format packet to return 414 * @skb: The long format packet to return
415 * 415 *
416 */ 416 */
417 static int dn_return_long(struct sk_buff *skb) 417 static int dn_return_long(struct sk_buff *skb)
418 { 418 {
419 struct dn_skb_cb *cb; 419 struct dn_skb_cb *cb;
420 unsigned char *ptr; 420 unsigned char *ptr;
421 unsigned char *src_addr, *dst_addr; 421 unsigned char *src_addr, *dst_addr;
422 unsigned char tmp[ETH_ALEN]; 422 unsigned char tmp[ETH_ALEN];
423 423
424 /* Add back all headers */ 424 /* Add back all headers */
425 skb_push(skb, skb->data - skb_network_header(skb)); 425 skb_push(skb, skb->data - skb_network_header(skb));
426 426
427 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 427 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
428 return NET_RX_DROP; 428 return NET_RX_DROP;
429 429
430 cb = DN_SKB_CB(skb); 430 cb = DN_SKB_CB(skb);
431 /* Ignore packet length and point to flags */ 431 /* Ignore packet length and point to flags */
432 ptr = skb->data + 2; 432 ptr = skb->data + 2;
433 433
434 /* Skip padding */ 434 /* Skip padding */
435 if (*ptr & DN_RT_F_PF) { 435 if (*ptr & DN_RT_F_PF) {
436 char padlen = (*ptr & ~DN_RT_F_PF); 436 char padlen = (*ptr & ~DN_RT_F_PF);
437 ptr += padlen; 437 ptr += padlen;
438 } 438 }
439 439
440 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; 440 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
441 ptr += 2; 441 ptr += 2;
442 dst_addr = ptr; 442 dst_addr = ptr;
443 ptr += 8; 443 ptr += 8;
444 src_addr = ptr; 444 src_addr = ptr;
445 ptr += 6; 445 ptr += 6;
446 *ptr = 0; /* Zero hop count */ 446 *ptr = 0; /* Zero hop count */
447 447
448 /* Swap source and destination */ 448 /* Swap source and destination */
449 memcpy(tmp, src_addr, ETH_ALEN); 449 memcpy(tmp, src_addr, ETH_ALEN);
450 memcpy(src_addr, dst_addr, ETH_ALEN); 450 memcpy(src_addr, dst_addr, ETH_ALEN);
451 memcpy(dst_addr, tmp, ETH_ALEN); 451 memcpy(dst_addr, tmp, ETH_ALEN);
452 452
453 skb->pkt_type = PACKET_OUTGOING; 453 skb->pkt_type = PACKET_OUTGOING;
454 dn_rt_finish_output(skb, dst_addr, src_addr); 454 dn_rt_finish_output(skb, dst_addr, src_addr);
455 return NET_RX_SUCCESS; 455 return NET_RX_SUCCESS;
456 } 456 }
457 457
458 /** 458 /**
459 * dn_route_rx_packet - Try and find a route for an incoming packet 459 * dn_route_rx_packet - Try and find a route for an incoming packet
460 * @skb: The packet to find a route for 460 * @skb: The packet to find a route for
461 * 461 *
462 * Returns: result of input function if route is found, error code otherwise 462 * Returns: result of input function if route is found, error code otherwise
463 */ 463 */
464 static int dn_route_rx_packet(struct sk_buff *skb) 464 static int dn_route_rx_packet(struct sk_buff *skb)
465 { 465 {
466 struct dn_skb_cb *cb = DN_SKB_CB(skb); 466 struct dn_skb_cb *cb = DN_SKB_CB(skb);
467 int err; 467 int err;
468 468
469 if ((err = dn_route_input(skb)) == 0) 469 if ((err = dn_route_input(skb)) == 0)
470 return dst_input(skb); 470 return dst_input(skb);
471 471
472 if (decnet_debug_level & 4) { 472 if (decnet_debug_level & 4) {
473 char *devname = skb->dev ? skb->dev->name : "???"; 473 char *devname = skb->dev ? skb->dev->name : "???";
474 struct dn_skb_cb *cb = DN_SKB_CB(skb); 474 struct dn_skb_cb *cb = DN_SKB_CB(skb);
475 printk(KERN_DEBUG 475 printk(KERN_DEBUG
476 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", 476 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
477 (int)cb->rt_flags, devname, skb->len, 477 (int)cb->rt_flags, devname, skb->len,
478 dn_ntohs(cb->src), dn_ntohs(cb->dst), 478 le16_to_cpu(cb->src), le16_to_cpu(cb->dst),
479 err, skb->pkt_type); 479 err, skb->pkt_type);
480 } 480 }
481 481
482 if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) { 482 if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) {
483 switch(cb->rt_flags & DN_RT_PKT_MSK) { 483 switch(cb->rt_flags & DN_RT_PKT_MSK) {
484 case DN_RT_PKT_SHORT: 484 case DN_RT_PKT_SHORT:
485 return dn_return_short(skb); 485 return dn_return_short(skb);
486 case DN_RT_PKT_LONG: 486 case DN_RT_PKT_LONG:
487 return dn_return_long(skb); 487 return dn_return_long(skb);
488 } 488 }
489 } 489 }
490 490
491 kfree_skb(skb); 491 kfree_skb(skb);
492 return NET_RX_DROP; 492 return NET_RX_DROP;
493 } 493 }
494 494
495 static int dn_route_rx_long(struct sk_buff *skb) 495 static int dn_route_rx_long(struct sk_buff *skb)
496 { 496 {
497 struct dn_skb_cb *cb = DN_SKB_CB(skb); 497 struct dn_skb_cb *cb = DN_SKB_CB(skb);
498 unsigned char *ptr = skb->data; 498 unsigned char *ptr = skb->data;
499 499
500 if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */ 500 if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */
501 goto drop_it; 501 goto drop_it;
502 502
503 skb_pull(skb, 20); 503 skb_pull(skb, 20);
504 skb_reset_transport_header(skb); 504 skb_reset_transport_header(skb);
505 505
506 /* Destination info */ 506 /* Destination info */
507 ptr += 2; 507 ptr += 2;
508 cb->dst = dn_eth2dn(ptr); 508 cb->dst = dn_eth2dn(ptr);
509 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 509 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
510 goto drop_it; 510 goto drop_it;
511 ptr += 6; 511 ptr += 6;
512 512
513 513
514 /* Source info */ 514 /* Source info */
515 ptr += 2; 515 ptr += 2;
516 cb->src = dn_eth2dn(ptr); 516 cb->src = dn_eth2dn(ptr);
517 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 517 if (memcmp(ptr, dn_hiord_addr, 4) != 0)
518 goto drop_it; 518 goto drop_it;
519 ptr += 6; 519 ptr += 6;
520 /* Other junk */ 520 /* Other junk */
521 ptr++; 521 ptr++;
522 cb->hops = *ptr++; /* Visit Count */ 522 cb->hops = *ptr++; /* Visit Count */
523 523
524 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); 524 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
525 525
526 drop_it: 526 drop_it:
527 kfree_skb(skb); 527 kfree_skb(skb);
528 return NET_RX_DROP; 528 return NET_RX_DROP;
529 } 529 }
530 530
531 531
532 532
533 static int dn_route_rx_short(struct sk_buff *skb) 533 static int dn_route_rx_short(struct sk_buff *skb)
534 { 534 {
535 struct dn_skb_cb *cb = DN_SKB_CB(skb); 535 struct dn_skb_cb *cb = DN_SKB_CB(skb);
536 unsigned char *ptr = skb->data; 536 unsigned char *ptr = skb->data;
537 537
538 if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */ 538 if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */
539 goto drop_it; 539 goto drop_it;
540 540
541 skb_pull(skb, 5); 541 skb_pull(skb, 5);
542 skb_reset_transport_header(skb); 542 skb_reset_transport_header(skb);
543 543
544 cb->dst = *(__le16 *)ptr; 544 cb->dst = *(__le16 *)ptr;
545 ptr += 2; 545 ptr += 2;
546 cb->src = *(__le16 *)ptr; 546 cb->src = *(__le16 *)ptr;
547 ptr += 2; 547 ptr += 2;
548 cb->hops = *ptr & 0x3f; 548 cb->hops = *ptr & 0x3f;
549 549
550 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); 550 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
551 551
552 drop_it: 552 drop_it:
553 kfree_skb(skb); 553 kfree_skb(skb);
554 return NET_RX_DROP; 554 return NET_RX_DROP;
555 } 555 }
556 556
557 static int dn_route_discard(struct sk_buff *skb) 557 static int dn_route_discard(struct sk_buff *skb)
558 { 558 {
559 /* 559 /*
560 * I know we drop the packet here, but thats considered success in 560 * I know we drop the packet here, but thats considered success in
561 * this case 561 * this case
562 */ 562 */
563 kfree_skb(skb); 563 kfree_skb(skb);
564 return NET_RX_SUCCESS; 564 return NET_RX_SUCCESS;
565 } 565 }
566 566
567 static int dn_route_ptp_hello(struct sk_buff *skb) 567 static int dn_route_ptp_hello(struct sk_buff *skb)
568 { 568 {
569 dn_dev_hello(skb); 569 dn_dev_hello(skb);
570 dn_neigh_pointopoint_hello(skb); 570 dn_neigh_pointopoint_hello(skb);
571 return NET_RX_SUCCESS; 571 return NET_RX_SUCCESS;
572 } 572 }
573 573
574 int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 574 int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
575 { 575 {
576 struct dn_skb_cb *cb; 576 struct dn_skb_cb *cb;
577 unsigned char flags = 0; 577 unsigned char flags = 0;
578 __u16 len = dn_ntohs(*(__le16 *)skb->data); 578 __u16 len = le16_to_cpu(*(__le16 *)skb->data);
579 struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; 579 struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr;
580 unsigned char padlen = 0; 580 unsigned char padlen = 0;
581 581
582 if (!net_eq(dev_net(dev), &init_net)) 582 if (!net_eq(dev_net(dev), &init_net))
583 goto dump_it; 583 goto dump_it;
584 584
585 if (dn == NULL) 585 if (dn == NULL)
586 goto dump_it; 586 goto dump_it;
587 587
588 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 588 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
589 goto out; 589 goto out;
590 590
591 if (!pskb_may_pull(skb, 3)) 591 if (!pskb_may_pull(skb, 3))
592 goto dump_it; 592 goto dump_it;
593 593
594 skb_pull(skb, 2); 594 skb_pull(skb, 2);
595 595
596 if (len > skb->len) 596 if (len > skb->len)
597 goto dump_it; 597 goto dump_it;
598 598
599 skb_trim(skb, len); 599 skb_trim(skb, len);
600 600
601 flags = *skb->data; 601 flags = *skb->data;
602 602
603 cb = DN_SKB_CB(skb); 603 cb = DN_SKB_CB(skb);
604 cb->stamp = jiffies; 604 cb->stamp = jiffies;
605 cb->iif = dev->ifindex; 605 cb->iif = dev->ifindex;
606 606
607 /* 607 /*
608 * If we have padding, remove it. 608 * If we have padding, remove it.
609 */ 609 */
610 if (flags & DN_RT_F_PF) { 610 if (flags & DN_RT_F_PF) {
611 padlen = flags & ~DN_RT_F_PF; 611 padlen = flags & ~DN_RT_F_PF;
612 if (!pskb_may_pull(skb, padlen + 1)) 612 if (!pskb_may_pull(skb, padlen + 1))
613 goto dump_it; 613 goto dump_it;
614 skb_pull(skb, padlen); 614 skb_pull(skb, padlen);
615 flags = *skb->data; 615 flags = *skb->data;
616 } 616 }
617 617
618 skb_reset_network_header(skb); 618 skb_reset_network_header(skb);
619 619
620 /* 620 /*
621 * Weed out future version DECnet 621 * Weed out future version DECnet
622 */ 622 */
623 if (flags & DN_RT_F_VER) 623 if (flags & DN_RT_F_VER)
624 goto dump_it; 624 goto dump_it;
625 625
626 cb->rt_flags = flags; 626 cb->rt_flags = flags;
627 627
628 if (decnet_debug_level & 1) 628 if (decnet_debug_level & 1)
629 printk(KERN_DEBUG 629 printk(KERN_DEBUG
630 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n", 630 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
631 (int)flags, (dev) ? dev->name : "???", len, skb->len, 631 (int)flags, (dev) ? dev->name : "???", len, skb->len,
632 padlen); 632 padlen);
633 633
634 if (flags & DN_RT_PKT_CNTL) { 634 if (flags & DN_RT_PKT_CNTL) {
635 if (unlikely(skb_linearize(skb))) 635 if (unlikely(skb_linearize(skb)))
636 goto dump_it; 636 goto dump_it;
637 637
638 switch(flags & DN_RT_CNTL_MSK) { 638 switch(flags & DN_RT_CNTL_MSK) {
639 case DN_RT_PKT_INIT: 639 case DN_RT_PKT_INIT:
640 dn_dev_init_pkt(skb); 640 dn_dev_init_pkt(skb);
641 break; 641 break;
642 case DN_RT_PKT_VERI: 642 case DN_RT_PKT_VERI:
643 dn_dev_veri_pkt(skb); 643 dn_dev_veri_pkt(skb);
644 break; 644 break;
645 } 645 }
646 646
647 if (dn->parms.state != DN_DEV_S_RU) 647 if (dn->parms.state != DN_DEV_S_RU)
648 goto dump_it; 648 goto dump_it;
649 649
650 switch(flags & DN_RT_CNTL_MSK) { 650 switch(flags & DN_RT_CNTL_MSK) {
651 case DN_RT_PKT_HELO: 651 case DN_RT_PKT_HELO:
652 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello); 652 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello);
653 653
654 case DN_RT_PKT_L1RT: 654 case DN_RT_PKT_L1RT:
655 case DN_RT_PKT_L2RT: 655 case DN_RT_PKT_L2RT:
656 return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard); 656 return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard);
657 case DN_RT_PKT_ERTH: 657 case DN_RT_PKT_ERTH:
658 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello); 658 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello);
659 659
660 case DN_RT_PKT_EEDH: 660 case DN_RT_PKT_EEDH:
661 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello); 661 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello);
662 } 662 }
663 } else { 663 } else {
664 if (dn->parms.state != DN_DEV_S_RU) 664 if (dn->parms.state != DN_DEV_S_RU)
665 goto dump_it; 665 goto dump_it;
666 666
667 skb_pull(skb, 1); /* Pull flags */ 667 skb_pull(skb, 1); /* Pull flags */
668 668
669 switch(flags & DN_RT_PKT_MSK) { 669 switch(flags & DN_RT_PKT_MSK) {
670 case DN_RT_PKT_LONG: 670 case DN_RT_PKT_LONG:
671 return dn_route_rx_long(skb); 671 return dn_route_rx_long(skb);
672 case DN_RT_PKT_SHORT: 672 case DN_RT_PKT_SHORT:
673 return dn_route_rx_short(skb); 673 return dn_route_rx_short(skb);
674 } 674 }
675 } 675 }
676 676
677 dump_it: 677 dump_it:
678 kfree_skb(skb); 678 kfree_skb(skb);
679 out: 679 out:
680 return NET_RX_DROP; 680 return NET_RX_DROP;
681 } 681 }
682 682
683 static int dn_output(struct sk_buff *skb) 683 static int dn_output(struct sk_buff *skb)
684 { 684 {
685 struct dst_entry *dst = skb->dst; 685 struct dst_entry *dst = skb->dst;
686 struct dn_route *rt = (struct dn_route *)dst; 686 struct dn_route *rt = (struct dn_route *)dst;
687 struct net_device *dev = dst->dev; 687 struct net_device *dev = dst->dev;
688 struct dn_skb_cb *cb = DN_SKB_CB(skb); 688 struct dn_skb_cb *cb = DN_SKB_CB(skb);
689 struct neighbour *neigh; 689 struct neighbour *neigh;
690 690
691 int err = -EINVAL; 691 int err = -EINVAL;
692 692
693 if ((neigh = dst->neighbour) == NULL) 693 if ((neigh = dst->neighbour) == NULL)
694 goto error; 694 goto error;
695 695
696 skb->dev = dev; 696 skb->dev = dev;
697 697
698 cb->src = rt->rt_saddr; 698 cb->src = rt->rt_saddr;
699 cb->dst = rt->rt_daddr; 699 cb->dst = rt->rt_daddr;
700 700
701 /* 701 /*
702 * Always set the Intra-Ethernet bit on all outgoing packets 702 * Always set the Intra-Ethernet bit on all outgoing packets
703 * originated on this node. Only valid flag from upper layers 703 * originated on this node. Only valid flag from upper layers
704 * is return-to-sender-requested. Set hop count to 0 too. 704 * is return-to-sender-requested. Set hop count to 0 too.
705 */ 705 */
706 cb->rt_flags &= ~DN_RT_F_RQR; 706 cb->rt_flags &= ~DN_RT_F_RQR;
707 cb->rt_flags |= DN_RT_F_IE; 707 cb->rt_flags |= DN_RT_F_IE;
708 cb->hops = 0; 708 cb->hops = 0;
709 709
710 return NF_HOOK(PF_DECnet, NF_DN_LOCAL_OUT, skb, NULL, dev, neigh->output); 710 return NF_HOOK(PF_DECnet, NF_DN_LOCAL_OUT, skb, NULL, dev, neigh->output);
711 711
712 error: 712 error:
713 if (net_ratelimit()) 713 if (net_ratelimit())
714 printk(KERN_DEBUG "dn_output: This should not happen\n"); 714 printk(KERN_DEBUG "dn_output: This should not happen\n");
715 715
716 kfree_skb(skb); 716 kfree_skb(skb);
717 717
718 return err; 718 return err;
719 } 719 }
720 720
721 static int dn_forward(struct sk_buff *skb) 721 static int dn_forward(struct sk_buff *skb)
722 { 722 {
723 struct dn_skb_cb *cb = DN_SKB_CB(skb); 723 struct dn_skb_cb *cb = DN_SKB_CB(skb);
724 struct dst_entry *dst = skb->dst; 724 struct dst_entry *dst = skb->dst;
725 struct dn_dev *dn_db = dst->dev->dn_ptr; 725 struct dn_dev *dn_db = dst->dev->dn_ptr;
726 struct dn_route *rt; 726 struct dn_route *rt;
727 struct neighbour *neigh = dst->neighbour; 727 struct neighbour *neigh = dst->neighbour;
728 int header_len; 728 int header_len;
729 #ifdef CONFIG_NETFILTER 729 #ifdef CONFIG_NETFILTER
730 struct net_device *dev = skb->dev; 730 struct net_device *dev = skb->dev;
731 #endif 731 #endif
732 732
733 if (skb->pkt_type != PACKET_HOST) 733 if (skb->pkt_type != PACKET_HOST)
734 goto drop; 734 goto drop;
735 735
736 /* Ensure that we have enough space for headers */ 736 /* Ensure that we have enough space for headers */
737 rt = (struct dn_route *)skb->dst; 737 rt = (struct dn_route *)skb->dst;
738 header_len = dn_db->use_long ? 21 : 6; 738 header_len = dn_db->use_long ? 21 : 6;
739 if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len)) 739 if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len))
740 goto drop; 740 goto drop;
741 741
742 /* 742 /*
743 * Hop count exceeded. 743 * Hop count exceeded.
744 */ 744 */
745 if (++cb->hops > 30) 745 if (++cb->hops > 30)
746 goto drop; 746 goto drop;
747 747
748 skb->dev = rt->u.dst.dev; 748 skb->dev = rt->u.dst.dev;
749 749
750 /* 750 /*
751 * If packet goes out same interface it came in on, then set 751 * If packet goes out same interface it came in on, then set
752 * the Intra-Ethernet bit. This has no effect for short 752 * the Intra-Ethernet bit. This has no effect for short
753 * packets, so we don't need to test for them here. 753 * packets, so we don't need to test for them here.
754 */ 754 */
755 cb->rt_flags &= ~DN_RT_F_IE; 755 cb->rt_flags &= ~DN_RT_F_IE;
756 if (rt->rt_flags & RTCF_DOREDIRECT) 756 if (rt->rt_flags & RTCF_DOREDIRECT)
757 cb->rt_flags |= DN_RT_F_IE; 757 cb->rt_flags |= DN_RT_F_IE;
758 758
759 return NF_HOOK(PF_DECnet, NF_DN_FORWARD, skb, dev, skb->dev, neigh->output); 759 return NF_HOOK(PF_DECnet, NF_DN_FORWARD, skb, dev, skb->dev, neigh->output);
760 760
761 drop: 761 drop:
762 kfree_skb(skb); 762 kfree_skb(skb);
763 return NET_RX_DROP; 763 return NET_RX_DROP;
764 } 764 }
765 765
766 /* 766 /*
767 * Used to catch bugs. This should never normally get 767 * Used to catch bugs. This should never normally get
768 * called. 768 * called.
769 */ 769 */
770 static int dn_rt_bug(struct sk_buff *skb) 770 static int dn_rt_bug(struct sk_buff *skb)
771 { 771 {
772 if (net_ratelimit()) { 772 if (net_ratelimit()) {
773 struct dn_skb_cb *cb = DN_SKB_CB(skb); 773 struct dn_skb_cb *cb = DN_SKB_CB(skb);
774 774
775 printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n", 775 printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n",
776 dn_ntohs(cb->src), dn_ntohs(cb->dst)); 776 le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
777 } 777 }
778 778
779 kfree_skb(skb); 779 kfree_skb(skb);
780 780
781 return NET_RX_BAD; 781 return NET_RX_BAD;
782 } 782 }
783 783
784 static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) 784 static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
785 { 785 {
786 struct dn_fib_info *fi = res->fi; 786 struct dn_fib_info *fi = res->fi;
787 struct net_device *dev = rt->u.dst.dev; 787 struct net_device *dev = rt->u.dst.dev;
788 struct neighbour *n; 788 struct neighbour *n;
789 unsigned mss; 789 unsigned mss;
790 790
791 if (fi) { 791 if (fi) {
792 if (DN_FIB_RES_GW(*res) && 792 if (DN_FIB_RES_GW(*res) &&
793 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 793 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
794 rt->rt_gateway = DN_FIB_RES_GW(*res); 794 rt->rt_gateway = DN_FIB_RES_GW(*res);
795 memcpy(rt->u.dst.metrics, fi->fib_metrics, 795 memcpy(rt->u.dst.metrics, fi->fib_metrics,
796 sizeof(rt->u.dst.metrics)); 796 sizeof(rt->u.dst.metrics));
797 } 797 }
798 rt->rt_type = res->type; 798 rt->rt_type = res->type;
799 799
800 if (dev != NULL && rt->u.dst.neighbour == NULL) { 800 if (dev != NULL && rt->u.dst.neighbour == NULL) {
801 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); 801 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
802 if (IS_ERR(n)) 802 if (IS_ERR(n))
803 return PTR_ERR(n); 803 return PTR_ERR(n);
804 rt->u.dst.neighbour = n; 804 rt->u.dst.neighbour = n;
805 } 805 }
806 806
807 if (dst_metric(&rt->u.dst, RTAX_MTU) == 0 || 807 if (dst_metric(&rt->u.dst, RTAX_MTU) == 0 ||
808 dst_metric(&rt->u.dst, RTAX_MTU) > rt->u.dst.dev->mtu) 808 dst_metric(&rt->u.dst, RTAX_MTU) > rt->u.dst.dev->mtu)
809 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; 809 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
810 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst)); 810 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst));
811 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0 || 811 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0 ||
812 dst_metric(&rt->u.dst, RTAX_ADVMSS) > mss) 812 dst_metric(&rt->u.dst, RTAX_ADVMSS) > mss)
813 rt->u.dst.metrics[RTAX_ADVMSS-1] = mss; 813 rt->u.dst.metrics[RTAX_ADVMSS-1] = mss;
814 return 0; 814 return 0;
815 } 815 }
816 816
817 static inline int dn_match_addr(__le16 addr1, __le16 addr2) 817 static inline int dn_match_addr(__le16 addr1, __le16 addr2)
818 { 818 {
819 __u16 tmp = dn_ntohs(addr1) ^ dn_ntohs(addr2); 819 __u16 tmp = le16_to_cpu(addr1) ^ le16_to_cpu(addr2);
820 int match = 16; 820 int match = 16;
821 while(tmp) { 821 while(tmp) {
822 tmp >>= 1; 822 tmp >>= 1;
823 match--; 823 match--;
824 } 824 }
825 return match; 825 return match;
826 } 826 }
827 827
828 static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope) 828 static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope)
829 { 829 {
830 __le16 saddr = 0; 830 __le16 saddr = 0;
831 struct dn_dev *dn_db = dev->dn_ptr; 831 struct dn_dev *dn_db = dev->dn_ptr;
832 struct dn_ifaddr *ifa; 832 struct dn_ifaddr *ifa;
833 int best_match = 0; 833 int best_match = 0;
834 int ret; 834 int ret;
835 835
836 read_lock(&dev_base_lock); 836 read_lock(&dev_base_lock);
837 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { 837 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
838 if (ifa->ifa_scope > scope) 838 if (ifa->ifa_scope > scope)
839 continue; 839 continue;
840 if (!daddr) { 840 if (!daddr) {
841 saddr = ifa->ifa_local; 841 saddr = ifa->ifa_local;
842 break; 842 break;
843 } 843 }
844 ret = dn_match_addr(daddr, ifa->ifa_local); 844 ret = dn_match_addr(daddr, ifa->ifa_local);
845 if (ret > best_match) 845 if (ret > best_match)
846 saddr = ifa->ifa_local; 846 saddr = ifa->ifa_local;
847 if (best_match == 0) 847 if (best_match == 0)
848 saddr = ifa->ifa_local; 848 saddr = ifa->ifa_local;
849 } 849 }
850 read_unlock(&dev_base_lock); 850 read_unlock(&dev_base_lock);
851 851
852 return saddr; 852 return saddr;
853 } 853 }
854 854
855 static inline __le16 __dn_fib_res_prefsrc(struct dn_fib_res *res) 855 static inline __le16 __dn_fib_res_prefsrc(struct dn_fib_res *res)
856 { 856 {
857 return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope); 857 return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope);
858 } 858 }
859 859
860 static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_res *res) 860 static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_res *res)
861 { 861 {
862 __le16 mask = dnet_make_mask(res->prefixlen); 862 __le16 mask = dnet_make_mask(res->prefixlen);
863 return (daddr&~mask)|res->fi->fib_nh->nh_gw; 863 return (daddr&~mask)|res->fi->fib_nh->nh_gw;
864 } 864 }
865 865
866 static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard) 866 static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard)
867 { 867 {
868 struct flowi fl = { .nl_u = { .dn_u = 868 struct flowi fl = { .nl_u = { .dn_u =
869 { .daddr = oldflp->fld_dst, 869 { .daddr = oldflp->fld_dst,
870 .saddr = oldflp->fld_src, 870 .saddr = oldflp->fld_src,
871 .scope = RT_SCOPE_UNIVERSE, 871 .scope = RT_SCOPE_UNIVERSE,
872 } }, 872 } },
873 .mark = oldflp->mark, 873 .mark = oldflp->mark,
874 .iif = init_net.loopback_dev->ifindex, 874 .iif = init_net.loopback_dev->ifindex,
875 .oif = oldflp->oif }; 875 .oif = oldflp->oif };
876 struct dn_route *rt = NULL; 876 struct dn_route *rt = NULL;
877 struct net_device *dev_out = NULL, *dev; 877 struct net_device *dev_out = NULL, *dev;
878 struct neighbour *neigh = NULL; 878 struct neighbour *neigh = NULL;
879 unsigned hash; 879 unsigned hash;
880 unsigned flags = 0; 880 unsigned flags = 0;
881 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST }; 881 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
882 int err; 882 int err;
883 int free_res = 0; 883 int free_res = 0;
884 __le16 gateway = 0; 884 __le16 gateway = 0;
885 885
886 if (decnet_debug_level & 16) 886 if (decnet_debug_level & 16)
887 printk(KERN_DEBUG 887 printk(KERN_DEBUG
888 "dn_route_output_slow: dst=%04x src=%04x mark=%d" 888 "dn_route_output_slow: dst=%04x src=%04x mark=%d"
889 " iif=%d oif=%d\n", dn_ntohs(oldflp->fld_dst), 889 " iif=%d oif=%d\n", le16_to_cpu(oldflp->fld_dst),
890 dn_ntohs(oldflp->fld_src), 890 le16_to_cpu(oldflp->fld_src),
891 oldflp->mark, init_net.loopback_dev->ifindex, oldflp->oif); 891 oldflp->mark, init_net.loopback_dev->ifindex, oldflp->oif);
892 892
893 /* If we have an output interface, verify its a DECnet device */ 893 /* If we have an output interface, verify its a DECnet device */
894 if (oldflp->oif) { 894 if (oldflp->oif) {
895 dev_out = dev_get_by_index(&init_net, oldflp->oif); 895 dev_out = dev_get_by_index(&init_net, oldflp->oif);
896 err = -ENODEV; 896 err = -ENODEV;
897 if (dev_out && dev_out->dn_ptr == NULL) { 897 if (dev_out && dev_out->dn_ptr == NULL) {
898 dev_put(dev_out); 898 dev_put(dev_out);
899 dev_out = NULL; 899 dev_out = NULL;
900 } 900 }
901 if (dev_out == NULL) 901 if (dev_out == NULL)
902 goto out; 902 goto out;
903 } 903 }
904 904
905 /* If we have a source address, verify that its a local address */ 905 /* If we have a source address, verify that its a local address */
906 if (oldflp->fld_src) { 906 if (oldflp->fld_src) {
907 err = -EADDRNOTAVAIL; 907 err = -EADDRNOTAVAIL;
908 908
909 if (dev_out) { 909 if (dev_out) {
910 if (dn_dev_islocal(dev_out, oldflp->fld_src)) 910 if (dn_dev_islocal(dev_out, oldflp->fld_src))
911 goto source_ok; 911 goto source_ok;
912 dev_put(dev_out); 912 dev_put(dev_out);
913 goto out; 913 goto out;
914 } 914 }
915 read_lock(&dev_base_lock); 915 read_lock(&dev_base_lock);
916 for_each_netdev(&init_net, dev) { 916 for_each_netdev(&init_net, dev) {
917 if (!dev->dn_ptr) 917 if (!dev->dn_ptr)
918 continue; 918 continue;
919 if (!dn_dev_islocal(dev, oldflp->fld_src)) 919 if (!dn_dev_islocal(dev, oldflp->fld_src))
920 continue; 920 continue;
921 if ((dev->flags & IFF_LOOPBACK) && 921 if ((dev->flags & IFF_LOOPBACK) &&
922 oldflp->fld_dst && 922 oldflp->fld_dst &&
923 !dn_dev_islocal(dev, oldflp->fld_dst)) 923 !dn_dev_islocal(dev, oldflp->fld_dst))
924 continue; 924 continue;
925 925
926 dev_out = dev; 926 dev_out = dev;
927 break; 927 break;
928 } 928 }
929 read_unlock(&dev_base_lock); 929 read_unlock(&dev_base_lock);
930 if (dev_out == NULL) 930 if (dev_out == NULL)
931 goto out; 931 goto out;
932 dev_hold(dev_out); 932 dev_hold(dev_out);
933 source_ok: 933 source_ok:
934 ; 934 ;
935 } 935 }
936 936
937 /* No destination? Assume its local */ 937 /* No destination? Assume its local */
938 if (!fl.fld_dst) { 938 if (!fl.fld_dst) {
939 fl.fld_dst = fl.fld_src; 939 fl.fld_dst = fl.fld_src;
940 940
941 err = -EADDRNOTAVAIL; 941 err = -EADDRNOTAVAIL;
942 if (dev_out) 942 if (dev_out)
943 dev_put(dev_out); 943 dev_put(dev_out);
944 dev_out = init_net.loopback_dev; 944 dev_out = init_net.loopback_dev;
945 dev_hold(dev_out); 945 dev_hold(dev_out);
946 if (!fl.fld_dst) { 946 if (!fl.fld_dst) {
947 fl.fld_dst = 947 fl.fld_dst =
948 fl.fld_src = dnet_select_source(dev_out, 0, 948 fl.fld_src = dnet_select_source(dev_out, 0,
949 RT_SCOPE_HOST); 949 RT_SCOPE_HOST);
950 if (!fl.fld_dst) 950 if (!fl.fld_dst)
951 goto out; 951 goto out;
952 } 952 }
953 fl.oif = init_net.loopback_dev->ifindex; 953 fl.oif = init_net.loopback_dev->ifindex;
954 res.type = RTN_LOCAL; 954 res.type = RTN_LOCAL;
955 goto make_route; 955 goto make_route;
956 } 956 }
957 957
958 if (decnet_debug_level & 16) 958 if (decnet_debug_level & 16)
959 printk(KERN_DEBUG 959 printk(KERN_DEBUG
960 "dn_route_output_slow: initial checks complete." 960 "dn_route_output_slow: initial checks complete."
961 " dst=%o4x src=%04x oif=%d try_hard=%d\n", 961 " dst=%o4x src=%04x oif=%d try_hard=%d\n",
962 dn_ntohs(fl.fld_dst), dn_ntohs(fl.fld_src), 962 le16_to_cpu(fl.fld_dst), le16_to_cpu(fl.fld_src),
963 fl.oif, try_hard); 963 fl.oif, try_hard);
964 964
965 /* 965 /*
966 * N.B. If the kernel is compiled without router support then 966 * N.B. If the kernel is compiled without router support then
967 * dn_fib_lookup() will evaluate to non-zero so this if () block 967 * dn_fib_lookup() will evaluate to non-zero so this if () block
968 * will always be executed. 968 * will always be executed.
969 */ 969 */
970 err = -ESRCH; 970 err = -ESRCH;
971 if (try_hard || (err = dn_fib_lookup(&fl, &res)) != 0) { 971 if (try_hard || (err = dn_fib_lookup(&fl, &res)) != 0) {
972 struct dn_dev *dn_db; 972 struct dn_dev *dn_db;
973 if (err != -ESRCH) 973 if (err != -ESRCH)
974 goto out; 974 goto out;
975 /* 975 /*
976 * Here the fallback is basically the standard algorithm for 976 * Here the fallback is basically the standard algorithm for
977 * routing in endnodes which is described in the DECnet routing 977 * routing in endnodes which is described in the DECnet routing
978 * docs 978 * docs
979 * 979 *
980 * If we are not trying hard, look in neighbour cache. 980 * If we are not trying hard, look in neighbour cache.
981 * The result is tested to ensure that if a specific output 981 * The result is tested to ensure that if a specific output
982 * device/source address was requested, then we honour that 982 * device/source address was requested, then we honour that
983 * here 983 * here
984 */ 984 */
985 if (!try_hard) { 985 if (!try_hard) {
986 neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fl.fld_dst); 986 neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fl.fld_dst);
987 if (neigh) { 987 if (neigh) {
988 if ((oldflp->oif && 988 if ((oldflp->oif &&
989 (neigh->dev->ifindex != oldflp->oif)) || 989 (neigh->dev->ifindex != oldflp->oif)) ||
990 (oldflp->fld_src && 990 (oldflp->fld_src &&
991 (!dn_dev_islocal(neigh->dev, 991 (!dn_dev_islocal(neigh->dev,
992 oldflp->fld_src)))) { 992 oldflp->fld_src)))) {
993 neigh_release(neigh); 993 neigh_release(neigh);
994 neigh = NULL; 994 neigh = NULL;
995 } else { 995 } else {
996 if (dev_out) 996 if (dev_out)
997 dev_put(dev_out); 997 dev_put(dev_out);
998 if (dn_dev_islocal(neigh->dev, fl.fld_dst)) { 998 if (dn_dev_islocal(neigh->dev, fl.fld_dst)) {
999 dev_out = init_net.loopback_dev; 999 dev_out = init_net.loopback_dev;
1000 res.type = RTN_LOCAL; 1000 res.type = RTN_LOCAL;
1001 } else { 1001 } else {
1002 dev_out = neigh->dev; 1002 dev_out = neigh->dev;
1003 } 1003 }
1004 dev_hold(dev_out); 1004 dev_hold(dev_out);
1005 goto select_source; 1005 goto select_source;
1006 } 1006 }
1007 } 1007 }
1008 } 1008 }
1009 1009
1010 /* Not there? Perhaps its a local address */ 1010 /* Not there? Perhaps its a local address */
1011 if (dev_out == NULL) 1011 if (dev_out == NULL)
1012 dev_out = dn_dev_get_default(); 1012 dev_out = dn_dev_get_default();
1013 err = -ENODEV; 1013 err = -ENODEV;
1014 if (dev_out == NULL) 1014 if (dev_out == NULL)
1015 goto out; 1015 goto out;
1016 dn_db = dev_out->dn_ptr; 1016 dn_db = dev_out->dn_ptr;
1017 /* Possible improvement - check all devices for local addr */ 1017 /* Possible improvement - check all devices for local addr */
1018 if (dn_dev_islocal(dev_out, fl.fld_dst)) { 1018 if (dn_dev_islocal(dev_out, fl.fld_dst)) {
1019 dev_put(dev_out); 1019 dev_put(dev_out);
1020 dev_out = init_net.loopback_dev; 1020 dev_out = init_net.loopback_dev;
1021 dev_hold(dev_out); 1021 dev_hold(dev_out);
1022 res.type = RTN_LOCAL; 1022 res.type = RTN_LOCAL;
1023 goto select_source; 1023 goto select_source;
1024 } 1024 }
1025 /* Not local either.... try sending it to the default router */ 1025 /* Not local either.... try sending it to the default router */
1026 neigh = neigh_clone(dn_db->router); 1026 neigh = neigh_clone(dn_db->router);
1027 BUG_ON(neigh && neigh->dev != dev_out); 1027 BUG_ON(neigh && neigh->dev != dev_out);
1028 1028
1029 /* Ok then, we assume its directly connected and move on */ 1029 /* Ok then, we assume its directly connected and move on */
1030 select_source: 1030 select_source:
1031 if (neigh) 1031 if (neigh)
1032 gateway = ((struct dn_neigh *)neigh)->addr; 1032 gateway = ((struct dn_neigh *)neigh)->addr;
1033 if (gateway == 0) 1033 if (gateway == 0)
1034 gateway = fl.fld_dst; 1034 gateway = fl.fld_dst;
1035 if (fl.fld_src == 0) { 1035 if (fl.fld_src == 0) {
1036 fl.fld_src = dnet_select_source(dev_out, gateway, 1036 fl.fld_src = dnet_select_source(dev_out, gateway,
1037 res.type == RTN_LOCAL ? 1037 res.type == RTN_LOCAL ?
1038 RT_SCOPE_HOST : 1038 RT_SCOPE_HOST :
1039 RT_SCOPE_LINK); 1039 RT_SCOPE_LINK);
1040 if (fl.fld_src == 0 && res.type != RTN_LOCAL) 1040 if (fl.fld_src == 0 && res.type != RTN_LOCAL)
1041 goto e_addr; 1041 goto e_addr;
1042 } 1042 }
1043 fl.oif = dev_out->ifindex; 1043 fl.oif = dev_out->ifindex;
1044 goto make_route; 1044 goto make_route;
1045 } 1045 }
1046 free_res = 1; 1046 free_res = 1;
1047 1047
1048 if (res.type == RTN_NAT) 1048 if (res.type == RTN_NAT)
1049 goto e_inval; 1049 goto e_inval;
1050 1050
1051 if (res.type == RTN_LOCAL) { 1051 if (res.type == RTN_LOCAL) {
1052 if (!fl.fld_src) 1052 if (!fl.fld_src)
1053 fl.fld_src = fl.fld_dst; 1053 fl.fld_src = fl.fld_dst;
1054 if (dev_out) 1054 if (dev_out)
1055 dev_put(dev_out); 1055 dev_put(dev_out);
1056 dev_out = init_net.loopback_dev; 1056 dev_out = init_net.loopback_dev;
1057 dev_hold(dev_out); 1057 dev_hold(dev_out);
1058 fl.oif = dev_out->ifindex; 1058 fl.oif = dev_out->ifindex;
1059 if (res.fi) 1059 if (res.fi)
1060 dn_fib_info_put(res.fi); 1060 dn_fib_info_put(res.fi);
1061 res.fi = NULL; 1061 res.fi = NULL;
1062 goto make_route; 1062 goto make_route;
1063 } 1063 }
1064 1064
1065 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1065 if (res.fi->fib_nhs > 1 && fl.oif == 0)
1066 dn_fib_select_multipath(&fl, &res); 1066 dn_fib_select_multipath(&fl, &res);
1067 1067
1068 /* 1068 /*
1069 * We could add some logic to deal with default routes here and 1069 * We could add some logic to deal with default routes here and
1070 * get rid of some of the special casing above. 1070 * get rid of some of the special casing above.
1071 */ 1071 */
1072 1072
1073 if (!fl.fld_src) 1073 if (!fl.fld_src)
1074 fl.fld_src = DN_FIB_RES_PREFSRC(res); 1074 fl.fld_src = DN_FIB_RES_PREFSRC(res);
1075 1075
1076 if (dev_out) 1076 if (dev_out)
1077 dev_put(dev_out); 1077 dev_put(dev_out);
1078 dev_out = DN_FIB_RES_DEV(res); 1078 dev_out = DN_FIB_RES_DEV(res);
1079 dev_hold(dev_out); 1079 dev_hold(dev_out);
1080 fl.oif = dev_out->ifindex; 1080 fl.oif = dev_out->ifindex;
1081 gateway = DN_FIB_RES_GW(res); 1081 gateway = DN_FIB_RES_GW(res);
1082 1082
1083 make_route: 1083 make_route:
1084 if (dev_out->flags & IFF_LOOPBACK) 1084 if (dev_out->flags & IFF_LOOPBACK)
1085 flags |= RTCF_LOCAL; 1085 flags |= RTCF_LOCAL;
1086 1086
1087 rt = dst_alloc(&dn_dst_ops); 1087 rt = dst_alloc(&dn_dst_ops);
1088 if (rt == NULL) 1088 if (rt == NULL)
1089 goto e_nobufs; 1089 goto e_nobufs;
1090 1090
1091 atomic_set(&rt->u.dst.__refcnt, 1); 1091 atomic_set(&rt->u.dst.__refcnt, 1);
1092 rt->u.dst.flags = DST_HOST; 1092 rt->u.dst.flags = DST_HOST;
1093 1093
1094 rt->fl.fld_src = oldflp->fld_src; 1094 rt->fl.fld_src = oldflp->fld_src;
1095 rt->fl.fld_dst = oldflp->fld_dst; 1095 rt->fl.fld_dst = oldflp->fld_dst;
1096 rt->fl.oif = oldflp->oif; 1096 rt->fl.oif = oldflp->oif;
1097 rt->fl.iif = 0; 1097 rt->fl.iif = 0;
1098 rt->fl.mark = oldflp->mark; 1098 rt->fl.mark = oldflp->mark;
1099 1099
1100 rt->rt_saddr = fl.fld_src; 1100 rt->rt_saddr = fl.fld_src;
1101 rt->rt_daddr = fl.fld_dst; 1101 rt->rt_daddr = fl.fld_dst;
1102 rt->rt_gateway = gateway ? gateway : fl.fld_dst; 1102 rt->rt_gateway = gateway ? gateway : fl.fld_dst;
1103 rt->rt_local_src = fl.fld_src; 1103 rt->rt_local_src = fl.fld_src;
1104 1104
1105 rt->rt_dst_map = fl.fld_dst; 1105 rt->rt_dst_map = fl.fld_dst;
1106 rt->rt_src_map = fl.fld_src; 1106 rt->rt_src_map = fl.fld_src;
1107 1107
1108 rt->u.dst.dev = dev_out; 1108 rt->u.dst.dev = dev_out;
1109 dev_hold(dev_out); 1109 dev_hold(dev_out);
1110 rt->u.dst.neighbour = neigh; 1110 rt->u.dst.neighbour = neigh;
1111 neigh = NULL; 1111 neigh = NULL;
1112 1112
1113 rt->u.dst.lastuse = jiffies; 1113 rt->u.dst.lastuse = jiffies;
1114 rt->u.dst.output = dn_output; 1114 rt->u.dst.output = dn_output;
1115 rt->u.dst.input = dn_rt_bug; 1115 rt->u.dst.input = dn_rt_bug;
1116 rt->rt_flags = flags; 1116 rt->rt_flags = flags;
1117 if (flags & RTCF_LOCAL) 1117 if (flags & RTCF_LOCAL)
1118 rt->u.dst.input = dn_nsp_rx; 1118 rt->u.dst.input = dn_nsp_rx;
1119 1119
1120 err = dn_rt_set_next_hop(rt, &res); 1120 err = dn_rt_set_next_hop(rt, &res);
1121 if (err) 1121 if (err)
1122 goto e_neighbour; 1122 goto e_neighbour;
1123 1123
1124 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); 1124 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
1125 dn_insert_route(rt, hash, (struct dn_route **)pprt); 1125 dn_insert_route(rt, hash, (struct dn_route **)pprt);
1126 1126
1127 done: 1127 done:
1128 if (neigh) 1128 if (neigh)
1129 neigh_release(neigh); 1129 neigh_release(neigh);
1130 if (free_res) 1130 if (free_res)
1131 dn_fib_res_put(&res); 1131 dn_fib_res_put(&res);
1132 if (dev_out) 1132 if (dev_out)
1133 dev_put(dev_out); 1133 dev_put(dev_out);
1134 out: 1134 out:
1135 return err; 1135 return err;
1136 1136
1137 e_addr: 1137 e_addr:
1138 err = -EADDRNOTAVAIL; 1138 err = -EADDRNOTAVAIL;
1139 goto done; 1139 goto done;
1140 e_inval: 1140 e_inval:
1141 err = -EINVAL; 1141 err = -EINVAL;
1142 goto done; 1142 goto done;
1143 e_nobufs: 1143 e_nobufs:
1144 err = -ENOBUFS; 1144 err = -ENOBUFS;
1145 goto done; 1145 goto done;
1146 e_neighbour: 1146 e_neighbour:
1147 dst_free(&rt->u.dst); 1147 dst_free(&rt->u.dst);
1148 goto e_nobufs; 1148 goto e_nobufs;
1149 } 1149 }
1150 1150
1151 1151
1152 /* 1152 /*
1153 * N.B. The flags may be moved into the flowi at some future stage. 1153 * N.B. The flags may be moved into the flowi at some future stage.
1154 */ 1154 */
1155 static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *flp, int flags) 1155 static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *flp, int flags)
1156 { 1156 {
1157 unsigned hash = dn_hash(flp->fld_src, flp->fld_dst); 1157 unsigned hash = dn_hash(flp->fld_src, flp->fld_dst);
1158 struct dn_route *rt = NULL; 1158 struct dn_route *rt = NULL;
1159 1159
1160 if (!(flags & MSG_TRYHARD)) { 1160 if (!(flags & MSG_TRYHARD)) {
1161 rcu_read_lock_bh(); 1161 rcu_read_lock_bh();
1162 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt; 1162 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt;
1163 rt = rcu_dereference(rt->u.dst.dn_next)) { 1163 rt = rcu_dereference(rt->u.dst.dn_next)) {
1164 if ((flp->fld_dst == rt->fl.fld_dst) && 1164 if ((flp->fld_dst == rt->fl.fld_dst) &&
1165 (flp->fld_src == rt->fl.fld_src) && 1165 (flp->fld_src == rt->fl.fld_src) &&
1166 (flp->mark == rt->fl.mark) && 1166 (flp->mark == rt->fl.mark) &&
1167 (rt->fl.iif == 0) && 1167 (rt->fl.iif == 0) &&
1168 (rt->fl.oif == flp->oif)) { 1168 (rt->fl.oif == flp->oif)) {
1169 dst_use(&rt->u.dst, jiffies); 1169 dst_use(&rt->u.dst, jiffies);
1170 rcu_read_unlock_bh(); 1170 rcu_read_unlock_bh();
1171 *pprt = &rt->u.dst; 1171 *pprt = &rt->u.dst;
1172 return 0; 1172 return 0;
1173 } 1173 }
1174 } 1174 }
1175 rcu_read_unlock_bh(); 1175 rcu_read_unlock_bh();
1176 } 1176 }
1177 1177
1178 return dn_route_output_slow(pprt, flp, flags); 1178 return dn_route_output_slow(pprt, flp, flags);
1179 } 1179 }
1180 1180
1181 static int dn_route_output_key(struct dst_entry **pprt, struct flowi *flp, int flags) 1181 static int dn_route_output_key(struct dst_entry **pprt, struct flowi *flp, int flags)
1182 { 1182 {
1183 int err; 1183 int err;
1184 1184
1185 err = __dn_route_output_key(pprt, flp, flags); 1185 err = __dn_route_output_key(pprt, flp, flags);
1186 if (err == 0 && flp->proto) { 1186 if (err == 0 && flp->proto) {
1187 err = xfrm_lookup(&init_net, pprt, flp, NULL, 0); 1187 err = xfrm_lookup(&init_net, pprt, flp, NULL, 0);
1188 } 1188 }
1189 return err; 1189 return err;
1190 } 1190 }
1191 1191
1192 int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock *sk, int flags) 1192 int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock *sk, int flags)
1193 { 1193 {
1194 int err; 1194 int err;
1195 1195
1196 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); 1196 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
1197 if (err == 0 && fl->proto) { 1197 if (err == 0 && fl->proto) {
1198 err = xfrm_lookup(&init_net, pprt, fl, sk, 1198 err = xfrm_lookup(&init_net, pprt, fl, sk,
1199 (flags & MSG_DONTWAIT) ? 0 : XFRM_LOOKUP_WAIT); 1199 (flags & MSG_DONTWAIT) ? 0 : XFRM_LOOKUP_WAIT);
1200 } 1200 }
1201 return err; 1201 return err;
1202 } 1202 }
1203 1203
1204 static int dn_route_input_slow(struct sk_buff *skb) 1204 static int dn_route_input_slow(struct sk_buff *skb)
1205 { 1205 {
1206 struct dn_route *rt = NULL; 1206 struct dn_route *rt = NULL;
1207 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1207 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1208 struct net_device *in_dev = skb->dev; 1208 struct net_device *in_dev = skb->dev;
1209 struct net_device *out_dev = NULL; 1209 struct net_device *out_dev = NULL;
1210 struct dn_dev *dn_db; 1210 struct dn_dev *dn_db;
1211 struct neighbour *neigh = NULL; 1211 struct neighbour *neigh = NULL;
1212 unsigned hash; 1212 unsigned hash;
1213 int flags = 0; 1213 int flags = 0;
1214 __le16 gateway = 0; 1214 __le16 gateway = 0;
1215 __le16 local_src = 0; 1215 __le16 local_src = 0;
1216 struct flowi fl = { .nl_u = { .dn_u = 1216 struct flowi fl = { .nl_u = { .dn_u =
1217 { .daddr = cb->dst, 1217 { .daddr = cb->dst,
1218 .saddr = cb->src, 1218 .saddr = cb->src,
1219 .scope = RT_SCOPE_UNIVERSE, 1219 .scope = RT_SCOPE_UNIVERSE,
1220 } }, 1220 } },
1221 .mark = skb->mark, 1221 .mark = skb->mark,
1222 .iif = skb->dev->ifindex }; 1222 .iif = skb->dev->ifindex };
1223 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; 1223 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
1224 int err = -EINVAL; 1224 int err = -EINVAL;
1225 int free_res = 0; 1225 int free_res = 0;
1226 1226
1227 dev_hold(in_dev); 1227 dev_hold(in_dev);
1228 1228
1229 if ((dn_db = in_dev->dn_ptr) == NULL) 1229 if ((dn_db = in_dev->dn_ptr) == NULL)
1230 goto out; 1230 goto out;
1231 1231
1232 /* Zero source addresses are not allowed */ 1232 /* Zero source addresses are not allowed */
1233 if (fl.fld_src == 0) 1233 if (fl.fld_src == 0)
1234 goto out; 1234 goto out;
1235 1235
1236 /* 1236 /*
1237 * In this case we've just received a packet from a source 1237 * In this case we've just received a packet from a source
1238 * outside ourselves pretending to come from us. We don't 1238 * outside ourselves pretending to come from us. We don't
1239 * allow it any further to prevent routing loops, spoofing and 1239 * allow it any further to prevent routing loops, spoofing and
1240 * other nasties. Loopback packets already have the dst attached 1240 * other nasties. Loopback packets already have the dst attached
1241 * so this only affects packets which have originated elsewhere. 1241 * so this only affects packets which have originated elsewhere.
1242 */ 1242 */
1243 err = -ENOTUNIQ; 1243 err = -ENOTUNIQ;
1244 if (dn_dev_islocal(in_dev, cb->src)) 1244 if (dn_dev_islocal(in_dev, cb->src))
1245 goto out; 1245 goto out;
1246 1246
1247 err = dn_fib_lookup(&fl, &res); 1247 err = dn_fib_lookup(&fl, &res);
1248 if (err) { 1248 if (err) {
1249 if (err != -ESRCH) 1249 if (err != -ESRCH)
1250 goto out; 1250 goto out;
1251 /* 1251 /*
1252 * Is the destination us ? 1252 * Is the destination us ?
1253 */ 1253 */
1254 if (!dn_dev_islocal(in_dev, cb->dst)) 1254 if (!dn_dev_islocal(in_dev, cb->dst))
1255 goto e_inval; 1255 goto e_inval;
1256 1256
1257 res.type = RTN_LOCAL; 1257 res.type = RTN_LOCAL;
1258 } else { 1258 } else {
1259 __le16 src_map = fl.fld_src; 1259 __le16 src_map = fl.fld_src;
1260 free_res = 1; 1260 free_res = 1;
1261 1261
1262 out_dev = DN_FIB_RES_DEV(res); 1262 out_dev = DN_FIB_RES_DEV(res);
1263 if (out_dev == NULL) { 1263 if (out_dev == NULL) {
1264 if (net_ratelimit()) 1264 if (net_ratelimit())
1265 printk(KERN_CRIT "Bug in dn_route_input_slow() " 1265 printk(KERN_CRIT "Bug in dn_route_input_slow() "
1266 "No output device\n"); 1266 "No output device\n");
1267 goto e_inval; 1267 goto e_inval;
1268 } 1268 }
1269 dev_hold(out_dev); 1269 dev_hold(out_dev);
1270 1270
1271 if (res.r) 1271 if (res.r)
1272 src_map = fl.fld_src; /* no NAT support for now */ 1272 src_map = fl.fld_src; /* no NAT support for now */
1273 1273
1274 gateway = DN_FIB_RES_GW(res); 1274 gateway = DN_FIB_RES_GW(res);
1275 if (res.type == RTN_NAT) { 1275 if (res.type == RTN_NAT) {
1276 fl.fld_dst = dn_fib_rules_map_destination(fl.fld_dst, &res); 1276 fl.fld_dst = dn_fib_rules_map_destination(fl.fld_dst, &res);
1277 dn_fib_res_put(&res); 1277 dn_fib_res_put(&res);
1278 free_res = 0; 1278 free_res = 0;
1279 if (dn_fib_lookup(&fl, &res)) 1279 if (dn_fib_lookup(&fl, &res))
1280 goto e_inval; 1280 goto e_inval;
1281 free_res = 1; 1281 free_res = 1;
1282 if (res.type != RTN_UNICAST) 1282 if (res.type != RTN_UNICAST)
1283 goto e_inval; 1283 goto e_inval;
1284 flags |= RTCF_DNAT; 1284 flags |= RTCF_DNAT;
1285 gateway = fl.fld_dst; 1285 gateway = fl.fld_dst;
1286 } 1286 }
1287 fl.fld_src = src_map; 1287 fl.fld_src = src_map;
1288 } 1288 }
1289 1289
1290 switch(res.type) { 1290 switch(res.type) {
1291 case RTN_UNICAST: 1291 case RTN_UNICAST:
1292 /* 1292 /*
1293 * Forwarding check here, we only check for forwarding 1293 * Forwarding check here, we only check for forwarding
1294 * being turned off, if you want to only forward intra 1294 * being turned off, if you want to only forward intra
1295 * area, its up to you to set the routing tables up 1295 * area, its up to you to set the routing tables up
1296 * correctly. 1296 * correctly.
1297 */ 1297 */
1298 if (dn_db->parms.forwarding == 0) 1298 if (dn_db->parms.forwarding == 0)
1299 goto e_inval; 1299 goto e_inval;
1300 1300
1301 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1301 if (res.fi->fib_nhs > 1 && fl.oif == 0)
1302 dn_fib_select_multipath(&fl, &res); 1302 dn_fib_select_multipath(&fl, &res);
1303 1303
1304 /* 1304 /*
1305 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT 1305 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
1306 * flag as a hint to set the intra-ethernet bit when 1306 * flag as a hint to set the intra-ethernet bit when
1307 * forwarding. If we've got NAT in operation, we don't do 1307 * forwarding. If we've got NAT in operation, we don't do
1308 * this optimisation. 1308 * this optimisation.
1309 */ 1309 */
1310 if (out_dev == in_dev && !(flags & RTCF_NAT)) 1310 if (out_dev == in_dev && !(flags & RTCF_NAT))
1311 flags |= RTCF_DOREDIRECT; 1311 flags |= RTCF_DOREDIRECT;
1312 1312
1313 local_src = DN_FIB_RES_PREFSRC(res); 1313 local_src = DN_FIB_RES_PREFSRC(res);
1314 1314
1315 case RTN_BLACKHOLE: 1315 case RTN_BLACKHOLE:
1316 case RTN_UNREACHABLE: 1316 case RTN_UNREACHABLE:
1317 break; 1317 break;
1318 case RTN_LOCAL: 1318 case RTN_LOCAL:
1319 flags |= RTCF_LOCAL; 1319 flags |= RTCF_LOCAL;
1320 fl.fld_src = cb->dst; 1320 fl.fld_src = cb->dst;
1321 fl.fld_dst = cb->src; 1321 fl.fld_dst = cb->src;
1322 1322
1323 /* Routing tables gave us a gateway */ 1323 /* Routing tables gave us a gateway */
1324 if (gateway) 1324 if (gateway)
1325 goto make_route; 1325 goto make_route;
1326 1326
1327 /* Packet was intra-ethernet, so we know its on-link */ 1327 /* Packet was intra-ethernet, so we know its on-link */
1328 if (cb->rt_flags & DN_RT_F_IE) { 1328 if (cb->rt_flags & DN_RT_F_IE) {
1329 gateway = cb->src; 1329 gateway = cb->src;
1330 flags |= RTCF_DIRECTSRC; 1330 flags |= RTCF_DIRECTSRC;
1331 goto make_route; 1331 goto make_route;
1332 } 1332 }
1333 1333
1334 /* Use the default router if there is one */ 1334 /* Use the default router if there is one */
1335 neigh = neigh_clone(dn_db->router); 1335 neigh = neigh_clone(dn_db->router);
1336 if (neigh) { 1336 if (neigh) {
1337 gateway = ((struct dn_neigh *)neigh)->addr; 1337 gateway = ((struct dn_neigh *)neigh)->addr;
1338 goto make_route; 1338 goto make_route;
1339 } 1339 }
1340 1340
1341 /* Close eyes and pray */ 1341 /* Close eyes and pray */
1342 gateway = cb->src; 1342 gateway = cb->src;
1343 flags |= RTCF_DIRECTSRC; 1343 flags |= RTCF_DIRECTSRC;
1344 goto make_route; 1344 goto make_route;
1345 default: 1345 default:
1346 goto e_inval; 1346 goto e_inval;
1347 } 1347 }
1348 1348
1349 make_route: 1349 make_route:
1350 rt = dst_alloc(&dn_dst_ops); 1350 rt = dst_alloc(&dn_dst_ops);
1351 if (rt == NULL) 1351 if (rt == NULL)
1352 goto e_nobufs; 1352 goto e_nobufs;
1353 1353
1354 rt->rt_saddr = fl.fld_src; 1354 rt->rt_saddr = fl.fld_src;
1355 rt->rt_daddr = fl.fld_dst; 1355 rt->rt_daddr = fl.fld_dst;
1356 rt->rt_gateway = fl.fld_dst; 1356 rt->rt_gateway = fl.fld_dst;
1357 if (gateway) 1357 if (gateway)
1358 rt->rt_gateway = gateway; 1358 rt->rt_gateway = gateway;
1359 rt->rt_local_src = local_src ? local_src : rt->rt_saddr; 1359 rt->rt_local_src = local_src ? local_src : rt->rt_saddr;
1360 1360
1361 rt->rt_dst_map = fl.fld_dst; 1361 rt->rt_dst_map = fl.fld_dst;
1362 rt->rt_src_map = fl.fld_src; 1362 rt->rt_src_map = fl.fld_src;
1363 1363
1364 rt->fl.fld_src = cb->src; 1364 rt->fl.fld_src = cb->src;
1365 rt->fl.fld_dst = cb->dst; 1365 rt->fl.fld_dst = cb->dst;
1366 rt->fl.oif = 0; 1366 rt->fl.oif = 0;
1367 rt->fl.iif = in_dev->ifindex; 1367 rt->fl.iif = in_dev->ifindex;
1368 rt->fl.mark = fl.mark; 1368 rt->fl.mark = fl.mark;
1369 1369
1370 rt->u.dst.flags = DST_HOST; 1370 rt->u.dst.flags = DST_HOST;
1371 rt->u.dst.neighbour = neigh; 1371 rt->u.dst.neighbour = neigh;
1372 rt->u.dst.dev = out_dev; 1372 rt->u.dst.dev = out_dev;
1373 rt->u.dst.lastuse = jiffies; 1373 rt->u.dst.lastuse = jiffies;
1374 rt->u.dst.output = dn_rt_bug; 1374 rt->u.dst.output = dn_rt_bug;
1375 switch(res.type) { 1375 switch(res.type) {
1376 case RTN_UNICAST: 1376 case RTN_UNICAST:
1377 rt->u.dst.input = dn_forward; 1377 rt->u.dst.input = dn_forward;
1378 break; 1378 break;
1379 case RTN_LOCAL: 1379 case RTN_LOCAL:
1380 rt->u.dst.output = dn_output; 1380 rt->u.dst.output = dn_output;
1381 rt->u.dst.input = dn_nsp_rx; 1381 rt->u.dst.input = dn_nsp_rx;
1382 rt->u.dst.dev = in_dev; 1382 rt->u.dst.dev = in_dev;
1383 flags |= RTCF_LOCAL; 1383 flags |= RTCF_LOCAL;
1384 break; 1384 break;
1385 default: 1385 default:
1386 case RTN_UNREACHABLE: 1386 case RTN_UNREACHABLE:
1387 case RTN_BLACKHOLE: 1387 case RTN_BLACKHOLE:
1388 rt->u.dst.input = dst_discard; 1388 rt->u.dst.input = dst_discard;
1389 } 1389 }
1390 rt->rt_flags = flags; 1390 rt->rt_flags = flags;
1391 if (rt->u.dst.dev) 1391 if (rt->u.dst.dev)
1392 dev_hold(rt->u.dst.dev); 1392 dev_hold(rt->u.dst.dev);
1393 1393
1394 err = dn_rt_set_next_hop(rt, &res); 1394 err = dn_rt_set_next_hop(rt, &res);
1395 if (err) 1395 if (err)
1396 goto e_neighbour; 1396 goto e_neighbour;
1397 1397
1398 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); 1398 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
1399 dn_insert_route(rt, hash, (struct dn_route **)&skb->dst); 1399 dn_insert_route(rt, hash, (struct dn_route **)&skb->dst);
1400 1400
1401 done: 1401 done:
1402 if (neigh) 1402 if (neigh)
1403 neigh_release(neigh); 1403 neigh_release(neigh);
1404 if (free_res) 1404 if (free_res)
1405 dn_fib_res_put(&res); 1405 dn_fib_res_put(&res);
1406 dev_put(in_dev); 1406 dev_put(in_dev);
1407 if (out_dev) 1407 if (out_dev)
1408 dev_put(out_dev); 1408 dev_put(out_dev);
1409 out: 1409 out:
1410 return err; 1410 return err;
1411 1411
1412 e_inval: 1412 e_inval:
1413 err = -EINVAL; 1413 err = -EINVAL;
1414 goto done; 1414 goto done;
1415 1415
1416 e_nobufs: 1416 e_nobufs:
1417 err = -ENOBUFS; 1417 err = -ENOBUFS;
1418 goto done; 1418 goto done;
1419 1419
1420 e_neighbour: 1420 e_neighbour:
1421 dst_free(&rt->u.dst); 1421 dst_free(&rt->u.dst);
1422 goto done; 1422 goto done;
1423 } 1423 }
1424 1424
1425 int dn_route_input(struct sk_buff *skb) 1425 int dn_route_input(struct sk_buff *skb)
1426 { 1426 {
1427 struct dn_route *rt; 1427 struct dn_route *rt;
1428 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1428 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1429 unsigned hash = dn_hash(cb->src, cb->dst); 1429 unsigned hash = dn_hash(cb->src, cb->dst);
1430 1430
1431 if (skb->dst) 1431 if (skb->dst)
1432 return 0; 1432 return 0;
1433 1433
1434 rcu_read_lock(); 1434 rcu_read_lock();
1435 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; 1435 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
1436 rt = rcu_dereference(rt->u.dst.dn_next)) { 1436 rt = rcu_dereference(rt->u.dst.dn_next)) {
1437 if ((rt->fl.fld_src == cb->src) && 1437 if ((rt->fl.fld_src == cb->src) &&
1438 (rt->fl.fld_dst == cb->dst) && 1438 (rt->fl.fld_dst == cb->dst) &&
1439 (rt->fl.oif == 0) && 1439 (rt->fl.oif == 0) &&
1440 (rt->fl.mark == skb->mark) && 1440 (rt->fl.mark == skb->mark) &&
1441 (rt->fl.iif == cb->iif)) { 1441 (rt->fl.iif == cb->iif)) {
1442 dst_use(&rt->u.dst, jiffies); 1442 dst_use(&rt->u.dst, jiffies);
1443 rcu_read_unlock(); 1443 rcu_read_unlock();
1444 skb->dst = (struct dst_entry *)rt; 1444 skb->dst = (struct dst_entry *)rt;
1445 return 0; 1445 return 0;
1446 } 1446 }
1447 } 1447 }
1448 rcu_read_unlock(); 1448 rcu_read_unlock();
1449 1449
1450 return dn_route_input_slow(skb); 1450 return dn_route_input_slow(skb);
1451 } 1451 }
1452 1452
1453 static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, 1453 static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1454 int event, int nowait, unsigned int flags) 1454 int event, int nowait, unsigned int flags)
1455 { 1455 {
1456 struct dn_route *rt = (struct dn_route *)skb->dst; 1456 struct dn_route *rt = (struct dn_route *)skb->dst;
1457 struct rtmsg *r; 1457 struct rtmsg *r;
1458 struct nlmsghdr *nlh; 1458 struct nlmsghdr *nlh;
1459 unsigned char *b = skb_tail_pointer(skb); 1459 unsigned char *b = skb_tail_pointer(skb);
1460 long expires; 1460 long expires;
1461 1461
1462 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); 1462 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
1463 r = NLMSG_DATA(nlh); 1463 r = NLMSG_DATA(nlh);
1464 r->rtm_family = AF_DECnet; 1464 r->rtm_family = AF_DECnet;
1465 r->rtm_dst_len = 16; 1465 r->rtm_dst_len = 16;
1466 r->rtm_src_len = 0; 1466 r->rtm_src_len = 0;
1467 r->rtm_tos = 0; 1467 r->rtm_tos = 0;
1468 r->rtm_table = RT_TABLE_MAIN; 1468 r->rtm_table = RT_TABLE_MAIN;
1469 RTA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN); 1469 RTA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
1470 r->rtm_type = rt->rt_type; 1470 r->rtm_type = rt->rt_type;
1471 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 1471 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
1472 r->rtm_scope = RT_SCOPE_UNIVERSE; 1472 r->rtm_scope = RT_SCOPE_UNIVERSE;
1473 r->rtm_protocol = RTPROT_UNSPEC; 1473 r->rtm_protocol = RTPROT_UNSPEC;
1474 if (rt->rt_flags & RTCF_NOTIFY) 1474 if (rt->rt_flags & RTCF_NOTIFY)
1475 r->rtm_flags |= RTM_F_NOTIFY; 1475 r->rtm_flags |= RTM_F_NOTIFY;
1476 RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr); 1476 RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr);
1477 if (rt->fl.fld_src) { 1477 if (rt->fl.fld_src) {
1478 r->rtm_src_len = 16; 1478 r->rtm_src_len = 16;
1479 RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src); 1479 RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src);
1480 } 1480 }
1481 if (rt->u.dst.dev) 1481 if (rt->u.dst.dev)
1482 RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex); 1482 RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex);
1483 /* 1483 /*
1484 * Note to self - change this if input routes reverse direction when 1484 * Note to self - change this if input routes reverse direction when
1485 * they deal only with inputs and not with replies like they do 1485 * they deal only with inputs and not with replies like they do
1486 * currently. 1486 * currently.
1487 */ 1487 */
1488 RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); 1488 RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src);
1489 if (rt->rt_daddr != rt->rt_gateway) 1489 if (rt->rt_daddr != rt->rt_gateway)
1490 RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); 1490 RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
1491 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) 1491 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
1492 goto rtattr_failure; 1492 goto rtattr_failure;
1493 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; 1493 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
1494 if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, expires, 1494 if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, expires,
1495 rt->u.dst.error) < 0) 1495 rt->u.dst.error) < 0)
1496 goto rtattr_failure; 1496 goto rtattr_failure;
1497 if (rt->fl.iif) 1497 if (rt->fl.iif)
1498 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); 1498 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
1499 1499
1500 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1500 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1501 return skb->len; 1501 return skb->len;
1502 1502
1503 nlmsg_failure: 1503 nlmsg_failure:
1504 rtattr_failure: 1504 rtattr_failure:
1505 nlmsg_trim(skb, b); 1505 nlmsg_trim(skb, b);
1506 return -1; 1506 return -1;
1507 } 1507 }
1508 1508
1509 /* 1509 /*
1510 * This is called by both endnodes and routers now. 1510 * This is called by both endnodes and routers now.
1511 */ 1511 */
1512 static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) 1512 static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
1513 { 1513 {
1514 struct net *net = sock_net(in_skb->sk); 1514 struct net *net = sock_net(in_skb->sk);
1515 struct rtattr **rta = arg; 1515 struct rtattr **rta = arg;
1516 struct rtmsg *rtm = NLMSG_DATA(nlh); 1516 struct rtmsg *rtm = NLMSG_DATA(nlh);
1517 struct dn_route *rt = NULL; 1517 struct dn_route *rt = NULL;
1518 struct dn_skb_cb *cb; 1518 struct dn_skb_cb *cb;
1519 int err; 1519 int err;
1520 struct sk_buff *skb; 1520 struct sk_buff *skb;
1521 struct flowi fl; 1521 struct flowi fl;
1522 1522
1523 if (net != &init_net) 1523 if (net != &init_net)
1524 return -EINVAL; 1524 return -EINVAL;
1525 1525
1526 memset(&fl, 0, sizeof(fl)); 1526 memset(&fl, 0, sizeof(fl));
1527 fl.proto = DNPROTO_NSP; 1527 fl.proto = DNPROTO_NSP;
1528 1528
1529 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1529 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1530 if (skb == NULL) 1530 if (skb == NULL)
1531 return -ENOBUFS; 1531 return -ENOBUFS;
1532 skb_reset_mac_header(skb); 1532 skb_reset_mac_header(skb);
1533 cb = DN_SKB_CB(skb); 1533 cb = DN_SKB_CB(skb);
1534 1534
1535 if (rta[RTA_SRC-1]) 1535 if (rta[RTA_SRC-1])
1536 memcpy(&fl.fld_src, RTA_DATA(rta[RTA_SRC-1]), 2); 1536 memcpy(&fl.fld_src, RTA_DATA(rta[RTA_SRC-1]), 2);
1537 if (rta[RTA_DST-1]) 1537 if (rta[RTA_DST-1])
1538 memcpy(&fl.fld_dst, RTA_DATA(rta[RTA_DST-1]), 2); 1538 memcpy(&fl.fld_dst, RTA_DATA(rta[RTA_DST-1]), 2);
1539 if (rta[RTA_IIF-1]) 1539 if (rta[RTA_IIF-1])
1540 memcpy(&fl.iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int)); 1540 memcpy(&fl.iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
1541 1541
1542 if (fl.iif) { 1542 if (fl.iif) {
1543 struct net_device *dev; 1543 struct net_device *dev;
1544 if ((dev = dev_get_by_index(&init_net, fl.iif)) == NULL) { 1544 if ((dev = dev_get_by_index(&init_net, fl.iif)) == NULL) {
1545 kfree_skb(skb); 1545 kfree_skb(skb);
1546 return -ENODEV; 1546 return -ENODEV;
1547 } 1547 }
1548 if (!dev->dn_ptr) { 1548 if (!dev->dn_ptr) {
1549 dev_put(dev); 1549 dev_put(dev);
1550 kfree_skb(skb); 1550 kfree_skb(skb);
1551 return -ENODEV; 1551 return -ENODEV;
1552 } 1552 }
1553 skb->protocol = htons(ETH_P_DNA_RT); 1553 skb->protocol = htons(ETH_P_DNA_RT);
1554 skb->dev = dev; 1554 skb->dev = dev;
1555 cb->src = fl.fld_src; 1555 cb->src = fl.fld_src;
1556 cb->dst = fl.fld_dst; 1556 cb->dst = fl.fld_dst;
1557 local_bh_disable(); 1557 local_bh_disable();
1558 err = dn_route_input(skb); 1558 err = dn_route_input(skb);
1559 local_bh_enable(); 1559 local_bh_enable();
1560 memset(cb, 0, sizeof(struct dn_skb_cb)); 1560 memset(cb, 0, sizeof(struct dn_skb_cb));
1561 rt = (struct dn_route *)skb->dst; 1561 rt = (struct dn_route *)skb->dst;
1562 if (!err && -rt->u.dst.error) 1562 if (!err && -rt->u.dst.error)
1563 err = rt->u.dst.error; 1563 err = rt->u.dst.error;
1564 } else { 1564 } else {
1565 int oif = 0; 1565 int oif = 0;
1566 if (rta[RTA_OIF - 1]) 1566 if (rta[RTA_OIF - 1])
1567 memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int)); 1567 memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));
1568 fl.oif = oif; 1568 fl.oif = oif;
1569 err = dn_route_output_key((struct dst_entry **)&rt, &fl, 0); 1569 err = dn_route_output_key((struct dst_entry **)&rt, &fl, 0);
1570 } 1570 }
1571 1571
1572 if (skb->dev) 1572 if (skb->dev)
1573 dev_put(skb->dev); 1573 dev_put(skb->dev);
1574 skb->dev = NULL; 1574 skb->dev = NULL;
1575 if (err) 1575 if (err)
1576 goto out_free; 1576 goto out_free;
1577 skb->dst = &rt->u.dst; 1577 skb->dst = &rt->u.dst;
1578 if (rtm->rtm_flags & RTM_F_NOTIFY) 1578 if (rtm->rtm_flags & RTM_F_NOTIFY)
1579 rt->rt_flags |= RTCF_NOTIFY; 1579 rt->rt_flags |= RTCF_NOTIFY;
1580 1580
1581 err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); 1581 err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
1582 1582
1583 if (err == 0) 1583 if (err == 0)
1584 goto out_free; 1584 goto out_free;
1585 if (err < 0) { 1585 if (err < 0) {
1586 err = -EMSGSIZE; 1586 err = -EMSGSIZE;
1587 goto out_free; 1587 goto out_free;
1588 } 1588 }
1589 1589
1590 return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); 1590 return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid);
1591 1591
1592 out_free: 1592 out_free:
1593 kfree_skb(skb); 1593 kfree_skb(skb);
1594 return err; 1594 return err;
1595 } 1595 }
1596 1596
1597 /* 1597 /*
1598 * For routers, this is called from dn_fib_dump, but for endnodes its 1598 * For routers, this is called from dn_fib_dump, but for endnodes its
1599 * called directly from the rtnetlink dispatch table. 1599 * called directly from the rtnetlink dispatch table.
1600 */ 1600 */
1601 int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) 1601 int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1602 { 1602 {
1603 struct net *net = sock_net(skb->sk); 1603 struct net *net = sock_net(skb->sk);
1604 struct dn_route *rt; 1604 struct dn_route *rt;
1605 int h, s_h; 1605 int h, s_h;
1606 int idx, s_idx; 1606 int idx, s_idx;
1607 1607
1608 if (net != &init_net) 1608 if (net != &init_net)
1609 return 0; 1609 return 0;
1610 1610
1611 if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg)) 1611 if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg))
1612 return -EINVAL; 1612 return -EINVAL;
1613 if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED)) 1613 if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED))
1614 return 0; 1614 return 0;
1615 1615
1616 s_h = cb->args[0]; 1616 s_h = cb->args[0];
1617 s_idx = idx = cb->args[1]; 1617 s_idx = idx = cb->args[1];
1618 for(h = 0; h <= dn_rt_hash_mask; h++) { 1618 for(h = 0; h <= dn_rt_hash_mask; h++) {
1619 if (h < s_h) 1619 if (h < s_h)
1620 continue; 1620 continue;
1621 if (h > s_h) 1621 if (h > s_h)
1622 s_idx = 0; 1622 s_idx = 0;
1623 rcu_read_lock_bh(); 1623 rcu_read_lock_bh();
1624 for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0; 1624 for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0;
1625 rt; 1625 rt;
1626 rt = rcu_dereference(rt->u.dst.dn_next), idx++) { 1626 rt = rcu_dereference(rt->u.dst.dn_next), idx++) {
1627 if (idx < s_idx) 1627 if (idx < s_idx)
1628 continue; 1628 continue;
1629 skb->dst = dst_clone(&rt->u.dst); 1629 skb->dst = dst_clone(&rt->u.dst);
1630 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 1630 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
1631 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1631 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1632 1, NLM_F_MULTI) <= 0) { 1632 1, NLM_F_MULTI) <= 0) {
1633 dst_release(xchg(&skb->dst, NULL)); 1633 dst_release(xchg(&skb->dst, NULL));
1634 rcu_read_unlock_bh(); 1634 rcu_read_unlock_bh();
1635 goto done; 1635 goto done;
1636 } 1636 }
1637 dst_release(xchg(&skb->dst, NULL)); 1637 dst_release(xchg(&skb->dst, NULL));
1638 } 1638 }
1639 rcu_read_unlock_bh(); 1639 rcu_read_unlock_bh();
1640 } 1640 }
1641 1641
1642 done: 1642 done:
1643 cb->args[0] = h; 1643 cb->args[0] = h;
1644 cb->args[1] = idx; 1644 cb->args[1] = idx;
1645 return skb->len; 1645 return skb->len;
1646 } 1646 }
1647 1647
1648 #ifdef CONFIG_PROC_FS 1648 #ifdef CONFIG_PROC_FS
1649 struct dn_rt_cache_iter_state { 1649 struct dn_rt_cache_iter_state {
1650 int bucket; 1650 int bucket;
1651 }; 1651 };
1652 1652
1653 static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq) 1653 static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
1654 { 1654 {
1655 struct dn_route *rt = NULL; 1655 struct dn_route *rt = NULL;
1656 struct dn_rt_cache_iter_state *s = seq->private; 1656 struct dn_rt_cache_iter_state *s = seq->private;
1657 1657
1658 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { 1658 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
1659 rcu_read_lock_bh(); 1659 rcu_read_lock_bh();
1660 rt = dn_rt_hash_table[s->bucket].chain; 1660 rt = dn_rt_hash_table[s->bucket].chain;
1661 if (rt) 1661 if (rt)
1662 break; 1662 break;
1663 rcu_read_unlock_bh(); 1663 rcu_read_unlock_bh();
1664 } 1664 }
1665 return rcu_dereference(rt); 1665 return rcu_dereference(rt);
1666 } 1666 }
1667 1667
1668 static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt) 1668 static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
1669 { 1669 {
1670 struct dn_rt_cache_iter_state *s = seq->private; 1670 struct dn_rt_cache_iter_state *s = seq->private;
1671 1671
1672 rt = rt->u.dst.dn_next; 1672 rt = rt->u.dst.dn_next;
1673 while(!rt) { 1673 while(!rt) {
1674 rcu_read_unlock_bh(); 1674 rcu_read_unlock_bh();
1675 if (--s->bucket < 0) 1675 if (--s->bucket < 0)
1676 break; 1676 break;
1677 rcu_read_lock_bh(); 1677 rcu_read_lock_bh();
1678 rt = dn_rt_hash_table[s->bucket].chain; 1678 rt = dn_rt_hash_table[s->bucket].chain;
1679 } 1679 }
1680 return rcu_dereference(rt); 1680 return rcu_dereference(rt);
1681 } 1681 }
1682 1682
1683 static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 1683 static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
1684 { 1684 {
1685 struct dn_route *rt = dn_rt_cache_get_first(seq); 1685 struct dn_route *rt = dn_rt_cache_get_first(seq);
1686 1686
1687 if (rt) { 1687 if (rt) {
1688 while(*pos && (rt = dn_rt_cache_get_next(seq, rt))) 1688 while(*pos && (rt = dn_rt_cache_get_next(seq, rt)))
1689 --*pos; 1689 --*pos;
1690 } 1690 }
1691 return *pos ? NULL : rt; 1691 return *pos ? NULL : rt;
1692 } 1692 }
1693 1693
1694 static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1694 static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1695 { 1695 {
1696 struct dn_route *rt = dn_rt_cache_get_next(seq, v); 1696 struct dn_route *rt = dn_rt_cache_get_next(seq, v);
1697 ++*pos; 1697 ++*pos;
1698 return rt; 1698 return rt;
1699 } 1699 }
1700 1700
1701 static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v) 1701 static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v)
1702 { 1702 {
1703 if (v) 1703 if (v)
1704 rcu_read_unlock_bh(); 1704 rcu_read_unlock_bh();
1705 } 1705 }
1706 1706
1707 static int dn_rt_cache_seq_show(struct seq_file *seq, void *v) 1707 static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
1708 { 1708 {
1709 struct dn_route *rt = v; 1709 struct dn_route *rt = v;
1710 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; 1710 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
1711 1711
1712 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", 1712 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
1713 rt->u.dst.dev ? rt->u.dst.dev->name : "*", 1713 rt->u.dst.dev ? rt->u.dst.dev->name : "*",
1714 dn_addr2asc(dn_ntohs(rt->rt_daddr), buf1), 1714 dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
1715 dn_addr2asc(dn_ntohs(rt->rt_saddr), buf2), 1715 dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
1716 atomic_read(&rt->u.dst.__refcnt), 1716 atomic_read(&rt->u.dst.__refcnt),
1717 rt->u.dst.__use, 1717 rt->u.dst.__use,
1718 (int) dst_metric(&rt->u.dst, RTAX_RTT)); 1718 (int) dst_metric(&rt->u.dst, RTAX_RTT));
1719 return 0; 1719 return 0;
1720 } 1720 }
1721 1721
1722 static const struct seq_operations dn_rt_cache_seq_ops = { 1722 static const struct seq_operations dn_rt_cache_seq_ops = {
1723 .start = dn_rt_cache_seq_start, 1723 .start = dn_rt_cache_seq_start,
1724 .next = dn_rt_cache_seq_next, 1724 .next = dn_rt_cache_seq_next,
1725 .stop = dn_rt_cache_seq_stop, 1725 .stop = dn_rt_cache_seq_stop,
1726 .show = dn_rt_cache_seq_show, 1726 .show = dn_rt_cache_seq_show,
1727 }; 1727 };
1728 1728
1729 static int dn_rt_cache_seq_open(struct inode *inode, struct file *file) 1729 static int dn_rt_cache_seq_open(struct inode *inode, struct file *file)
1730 { 1730 {
1731 return seq_open_private(file, &dn_rt_cache_seq_ops, 1731 return seq_open_private(file, &dn_rt_cache_seq_ops,
1732 sizeof(struct dn_rt_cache_iter_state)); 1732 sizeof(struct dn_rt_cache_iter_state));
1733 } 1733 }
1734 1734
1735 static const struct file_operations dn_rt_cache_seq_fops = { 1735 static const struct file_operations dn_rt_cache_seq_fops = {
1736 .owner = THIS_MODULE, 1736 .owner = THIS_MODULE,
1737 .open = dn_rt_cache_seq_open, 1737 .open = dn_rt_cache_seq_open,
1738 .read = seq_read, 1738 .read = seq_read,
1739 .llseek = seq_lseek, 1739 .llseek = seq_lseek,
1740 .release = seq_release_private, 1740 .release = seq_release_private,
1741 }; 1741 };
1742 1742
1743 #endif /* CONFIG_PROC_FS */ 1743 #endif /* CONFIG_PROC_FS */
1744 1744
1745 void __init dn_route_init(void) 1745 void __init dn_route_init(void)
1746 { 1746 {
1747 int i, goal, order; 1747 int i, goal, order;
1748 1748
1749 dn_dst_ops.kmem_cachep = 1749 dn_dst_ops.kmem_cachep =
1750 kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0, 1750 kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0,
1751 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1751 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1752 setup_timer(&dn_route_timer, dn_dst_check_expire, 0); 1752 setup_timer(&dn_route_timer, dn_dst_check_expire, 0);
1753 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; 1753 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
1754 add_timer(&dn_route_timer); 1754 add_timer(&dn_route_timer);
1755 1755
1756 goal = num_physpages >> (26 - PAGE_SHIFT); 1756 goal = num_physpages >> (26 - PAGE_SHIFT);
1757 1757
1758 for(order = 0; (1UL << order) < goal; order++) 1758 for(order = 0; (1UL << order) < goal; order++)
1759 /* NOTHING */; 1759 /* NOTHING */;
1760 1760
1761 /* 1761 /*
1762 * Only want 1024 entries max, since the table is very, very unlikely 1762 * Only want 1024 entries max, since the table is very, very unlikely
1763 * to be larger than that. 1763 * to be larger than that.
1764 */ 1764 */
1765 while(order && ((((1UL << order) * PAGE_SIZE) / 1765 while(order && ((((1UL << order) * PAGE_SIZE) /
1766 sizeof(struct dn_rt_hash_bucket)) >= 2048)) 1766 sizeof(struct dn_rt_hash_bucket)) >= 2048))
1767 order--; 1767 order--;
1768 1768
1769 do { 1769 do {
1770 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE / 1770 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
1771 sizeof(struct dn_rt_hash_bucket); 1771 sizeof(struct dn_rt_hash_bucket);
1772 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1)) 1772 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
1773 dn_rt_hash_mask--; 1773 dn_rt_hash_mask--;
1774 dn_rt_hash_table = (struct dn_rt_hash_bucket *) 1774 dn_rt_hash_table = (struct dn_rt_hash_bucket *)
1775 __get_free_pages(GFP_ATOMIC, order); 1775 __get_free_pages(GFP_ATOMIC, order);
1776 } while (dn_rt_hash_table == NULL && --order > 0); 1776 } while (dn_rt_hash_table == NULL && --order > 0);
1777 1777
1778 if (!dn_rt_hash_table) 1778 if (!dn_rt_hash_table)
1779 panic("Failed to allocate DECnet route cache hash table\n"); 1779 panic("Failed to allocate DECnet route cache hash table\n");
1780 1780
1781 printk(KERN_INFO 1781 printk(KERN_INFO
1782 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n", 1782 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
1783 dn_rt_hash_mask, 1783 dn_rt_hash_mask,
1784 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024); 1784 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);
1785 1785
1786 dn_rt_hash_mask--; 1786 dn_rt_hash_mask--;
1787 for(i = 0; i <= dn_rt_hash_mask; i++) { 1787 for(i = 0; i <= dn_rt_hash_mask; i++) {
1788 spin_lock_init(&dn_rt_hash_table[i].lock); 1788 spin_lock_init(&dn_rt_hash_table[i].lock);
1789 dn_rt_hash_table[i].chain = NULL; 1789 dn_rt_hash_table[i].chain = NULL;
1790 } 1790 }
1791 1791
1792 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); 1792 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
1793 1793
1794 proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); 1794 proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);
1795 1795
1796 #ifdef CONFIG_DECNET_ROUTER 1796 #ifdef CONFIG_DECNET_ROUTER
1797 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, dn_fib_dump); 1797 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, dn_fib_dump);
1798 #else 1798 #else
1799 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, 1799 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
1800 dn_cache_dump); 1800 dn_cache_dump);
1801 #endif 1801 #endif
1802 } 1802 }
1803 1803
1804 void __exit dn_route_cleanup(void) 1804 void __exit dn_route_cleanup(void)
1805 { 1805 {
1806 del_timer(&dn_route_timer); 1806 del_timer(&dn_route_timer);
1807 dn_run_flush(0); 1807 dn_run_flush(0);
1808 1808
1809 proc_net_remove(&init_net, "decnet_cache"); 1809 proc_net_remove(&init_net, "decnet_cache");
1810 } 1810 }
1811 1811
1812 1812
net/decnet/dn_table.c
1 /* 1 /*
2 * DECnet An implementation of the DECnet protocol suite for the LINUX 2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket 3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * DECnet Routing Forwarding Information Base (Routing Tables) 6 * DECnet Routing Forwarding Information Base (Routing Tables)
7 * 7 *
8 * Author: Steve Whitehouse <SteveW@ACM.org> 8 * Author: Steve Whitehouse <SteveW@ACM.org>
9 * Mostly copied from the IPv4 routing code 9 * Mostly copied from the IPv4 routing code
10 * 10 *
11 * 11 *
12 * Changes: 12 * Changes:
13 * 13 *
14 */ 14 */
15 #include <linux/string.h> 15 #include <linux/string.h>
16 #include <linux/net.h> 16 #include <linux/net.h>
17 #include <linux/socket.h> 17 #include <linux/socket.h>
18 #include <linux/sockios.h> 18 #include <linux/sockios.h>
19 #include <linux/init.h> 19 #include <linux/init.h>
20 #include <linux/skbuff.h> 20 #include <linux/skbuff.h>
21 #include <linux/netlink.h> 21 #include <linux/netlink.h>
22 #include <linux/rtnetlink.h> 22 #include <linux/rtnetlink.h>
23 #include <linux/proc_fs.h> 23 #include <linux/proc_fs.h>
24 #include <linux/netdevice.h> 24 #include <linux/netdevice.h>
25 #include <linux/timer.h> 25 #include <linux/timer.h>
26 #include <linux/spinlock.h> 26 #include <linux/spinlock.h>
27 #include <asm/atomic.h> 27 #include <asm/atomic.h>
28 #include <asm/uaccess.h> 28 #include <asm/uaccess.h>
29 #include <linux/route.h> /* RTF_xxx */ 29 #include <linux/route.h> /* RTF_xxx */
30 #include <net/neighbour.h> 30 #include <net/neighbour.h>
31 #include <net/netlink.h> 31 #include <net/netlink.h>
32 #include <net/dst.h> 32 #include <net/dst.h>
33 #include <net/flow.h> 33 #include <net/flow.h>
34 #include <net/fib_rules.h> 34 #include <net/fib_rules.h>
35 #include <net/dn.h> 35 #include <net/dn.h>
36 #include <net/dn_route.h> 36 #include <net/dn_route.h>
37 #include <net/dn_fib.h> 37 #include <net/dn_fib.h>
38 #include <net/dn_neigh.h> 38 #include <net/dn_neigh.h>
39 #include <net/dn_dev.h> 39 #include <net/dn_dev.h>
40 40
41 struct dn_zone 41 struct dn_zone
42 { 42 {
43 struct dn_zone *dz_next; 43 struct dn_zone *dz_next;
44 struct dn_fib_node **dz_hash; 44 struct dn_fib_node **dz_hash;
45 int dz_nent; 45 int dz_nent;
46 int dz_divisor; 46 int dz_divisor;
47 u32 dz_hashmask; 47 u32 dz_hashmask;
48 #define DZ_HASHMASK(dz) ((dz)->dz_hashmask) 48 #define DZ_HASHMASK(dz) ((dz)->dz_hashmask)
49 int dz_order; 49 int dz_order;
50 __le16 dz_mask; 50 __le16 dz_mask;
51 #define DZ_MASK(dz) ((dz)->dz_mask) 51 #define DZ_MASK(dz) ((dz)->dz_mask)
52 }; 52 };
53 53
54 struct dn_hash 54 struct dn_hash
55 { 55 {
56 struct dn_zone *dh_zones[17]; 56 struct dn_zone *dh_zones[17];
57 struct dn_zone *dh_zone_list; 57 struct dn_zone *dh_zone_list;
58 }; 58 };
59 59
60 #define dz_key_0(key) ((key).datum = 0) 60 #define dz_key_0(key) ((key).datum = 0)
61 #define dz_prefix(key,dz) ((key).datum) 61 #define dz_prefix(key,dz) ((key).datum)
62 62
63 #define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\ 63 #define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
64 for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) 64 for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
65 65
66 #define endfor_nexthops(fi) } 66 #define endfor_nexthops(fi) }
67 67
68 #define DN_MAX_DIVISOR 1024 68 #define DN_MAX_DIVISOR 1024
69 #define DN_S_ZOMBIE 1 69 #define DN_S_ZOMBIE 1
70 #define DN_S_ACCESSED 2 70 #define DN_S_ACCESSED 2
71 71
72 #define DN_FIB_SCAN(f, fp) \ 72 #define DN_FIB_SCAN(f, fp) \
73 for( ; ((f) = *(fp)) != NULL; (fp) = &(f)->fn_next) 73 for( ; ((f) = *(fp)) != NULL; (fp) = &(f)->fn_next)
74 74
75 #define DN_FIB_SCAN_KEY(f, fp, key) \ 75 #define DN_FIB_SCAN_KEY(f, fp, key) \
76 for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next) 76 for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next)
77 77
78 #define RT_TABLE_MIN 1 78 #define RT_TABLE_MIN 1
79 #define DN_FIB_TABLE_HASHSZ 256 79 #define DN_FIB_TABLE_HASHSZ 256
80 static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ]; 80 static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ];
81 static DEFINE_RWLOCK(dn_fib_tables_lock); 81 static DEFINE_RWLOCK(dn_fib_tables_lock);
82 82
83 static struct kmem_cache *dn_hash_kmem __read_mostly; 83 static struct kmem_cache *dn_hash_kmem __read_mostly;
84 static int dn_fib_hash_zombies; 84 static int dn_fib_hash_zombies;
85 85
86 static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz) 86 static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
87 { 87 {
88 u16 h = dn_ntohs(key.datum)>>(16 - dz->dz_order); 88 u16 h = le16_to_cpu(key.datum)>>(16 - dz->dz_order);
89 h ^= (h >> 10); 89 h ^= (h >> 10);
90 h ^= (h >> 6); 90 h ^= (h >> 6);
91 h &= DZ_HASHMASK(dz); 91 h &= DZ_HASHMASK(dz);
92 return *(dn_fib_idx_t *)&h; 92 return *(dn_fib_idx_t *)&h;
93 } 93 }
94 94
95 static inline dn_fib_key_t dz_key(__le16 dst, struct dn_zone *dz) 95 static inline dn_fib_key_t dz_key(__le16 dst, struct dn_zone *dz)
96 { 96 {
97 dn_fib_key_t k; 97 dn_fib_key_t k;
98 k.datum = dst & DZ_MASK(dz); 98 k.datum = dst & DZ_MASK(dz);
99 return k; 99 return k;
100 } 100 }
101 101
102 static inline struct dn_fib_node **dn_chain_p(dn_fib_key_t key, struct dn_zone *dz) 102 static inline struct dn_fib_node **dn_chain_p(dn_fib_key_t key, struct dn_zone *dz)
103 { 103 {
104 return &dz->dz_hash[dn_hash(key, dz).datum]; 104 return &dz->dz_hash[dn_hash(key, dz).datum];
105 } 105 }
106 106
107 static inline struct dn_fib_node *dz_chain(dn_fib_key_t key, struct dn_zone *dz) 107 static inline struct dn_fib_node *dz_chain(dn_fib_key_t key, struct dn_zone *dz)
108 { 108 {
109 return dz->dz_hash[dn_hash(key, dz).datum]; 109 return dz->dz_hash[dn_hash(key, dz).datum];
110 } 110 }
111 111
112 static inline int dn_key_eq(dn_fib_key_t a, dn_fib_key_t b) 112 static inline int dn_key_eq(dn_fib_key_t a, dn_fib_key_t b)
113 { 113 {
114 return a.datum == b.datum; 114 return a.datum == b.datum;
115 } 115 }
116 116
117 static inline int dn_key_leq(dn_fib_key_t a, dn_fib_key_t b) 117 static inline int dn_key_leq(dn_fib_key_t a, dn_fib_key_t b)
118 { 118 {
119 return a.datum <= b.datum; 119 return a.datum <= b.datum;
120 } 120 }
121 121
122 static inline void dn_rebuild_zone(struct dn_zone *dz, 122 static inline void dn_rebuild_zone(struct dn_zone *dz,
123 struct dn_fib_node **old_ht, 123 struct dn_fib_node **old_ht,
124 int old_divisor) 124 int old_divisor)
125 { 125 {
126 int i; 126 int i;
127 struct dn_fib_node *f, **fp, *next; 127 struct dn_fib_node *f, **fp, *next;
128 128
129 for(i = 0; i < old_divisor; i++) { 129 for(i = 0; i < old_divisor; i++) {
130 for(f = old_ht[i]; f; f = f->fn_next) { 130 for(f = old_ht[i]; f; f = f->fn_next) {
131 next = f->fn_next; 131 next = f->fn_next;
132 for(fp = dn_chain_p(f->fn_key, dz); 132 for(fp = dn_chain_p(f->fn_key, dz);
133 *fp && dn_key_leq((*fp)->fn_key, f->fn_key); 133 *fp && dn_key_leq((*fp)->fn_key, f->fn_key);
134 fp = &(*fp)->fn_next) 134 fp = &(*fp)->fn_next)
135 /* NOTHING */; 135 /* NOTHING */;
136 f->fn_next = *fp; 136 f->fn_next = *fp;
137 *fp = f; 137 *fp = f;
138 } 138 }
139 } 139 }
140 } 140 }
141 141
142 static void dn_rehash_zone(struct dn_zone *dz) 142 static void dn_rehash_zone(struct dn_zone *dz)
143 { 143 {
144 struct dn_fib_node **ht, **old_ht; 144 struct dn_fib_node **ht, **old_ht;
145 int old_divisor, new_divisor; 145 int old_divisor, new_divisor;
146 u32 new_hashmask; 146 u32 new_hashmask;
147 147
148 old_divisor = dz->dz_divisor; 148 old_divisor = dz->dz_divisor;
149 149
150 switch(old_divisor) { 150 switch(old_divisor) {
151 case 16: 151 case 16:
152 new_divisor = 256; 152 new_divisor = 256;
153 new_hashmask = 0xFF; 153 new_hashmask = 0xFF;
154 break; 154 break;
155 default: 155 default:
156 printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n", old_divisor); 156 printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n", old_divisor);
157 case 256: 157 case 256:
158 new_divisor = 1024; 158 new_divisor = 1024;
159 new_hashmask = 0x3FF; 159 new_hashmask = 0x3FF;
160 break; 160 break;
161 } 161 }
162 162
163 ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL); 163 ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL);
164 if (ht == NULL) 164 if (ht == NULL)
165 return; 165 return;
166 166
167 write_lock_bh(&dn_fib_tables_lock); 167 write_lock_bh(&dn_fib_tables_lock);
168 old_ht = dz->dz_hash; 168 old_ht = dz->dz_hash;
169 dz->dz_hash = ht; 169 dz->dz_hash = ht;
170 dz->dz_hashmask = new_hashmask; 170 dz->dz_hashmask = new_hashmask;
171 dz->dz_divisor = new_divisor; 171 dz->dz_divisor = new_divisor;
172 dn_rebuild_zone(dz, old_ht, old_divisor); 172 dn_rebuild_zone(dz, old_ht, old_divisor);
173 write_unlock_bh(&dn_fib_tables_lock); 173 write_unlock_bh(&dn_fib_tables_lock);
174 kfree(old_ht); 174 kfree(old_ht);
175 } 175 }
176 176
177 static void dn_free_node(struct dn_fib_node *f) 177 static void dn_free_node(struct dn_fib_node *f)
178 { 178 {
179 dn_fib_release_info(DN_FIB_INFO(f)); 179 dn_fib_release_info(DN_FIB_INFO(f));
180 kmem_cache_free(dn_hash_kmem, f); 180 kmem_cache_free(dn_hash_kmem, f);
181 } 181 }
182 182
183 183
184 static struct dn_zone *dn_new_zone(struct dn_hash *table, int z) 184 static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
185 { 185 {
186 int i; 186 int i;
187 struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL); 187 struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL);
188 if (!dz) 188 if (!dz)
189 return NULL; 189 return NULL;
190 190
191 if (z) { 191 if (z) {
192 dz->dz_divisor = 16; 192 dz->dz_divisor = 16;
193 dz->dz_hashmask = 0x0F; 193 dz->dz_hashmask = 0x0F;
194 } else { 194 } else {
195 dz->dz_divisor = 1; 195 dz->dz_divisor = 1;
196 dz->dz_hashmask = 0; 196 dz->dz_hashmask = 0;
197 } 197 }
198 198
199 dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL); 199 dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL);
200 if (!dz->dz_hash) { 200 if (!dz->dz_hash) {
201 kfree(dz); 201 kfree(dz);
202 return NULL; 202 return NULL;
203 } 203 }
204 204
205 dz->dz_order = z; 205 dz->dz_order = z;
206 dz->dz_mask = dnet_make_mask(z); 206 dz->dz_mask = dnet_make_mask(z);
207 207
208 for(i = z + 1; i <= 16; i++) 208 for(i = z + 1; i <= 16; i++)
209 if (table->dh_zones[i]) 209 if (table->dh_zones[i])
210 break; 210 break;
211 211
212 write_lock_bh(&dn_fib_tables_lock); 212 write_lock_bh(&dn_fib_tables_lock);
213 if (i>16) { 213 if (i>16) {
214 dz->dz_next = table->dh_zone_list; 214 dz->dz_next = table->dh_zone_list;
215 table->dh_zone_list = dz; 215 table->dh_zone_list = dz;
216 } else { 216 } else {
217 dz->dz_next = table->dh_zones[i]->dz_next; 217 dz->dz_next = table->dh_zones[i]->dz_next;
218 table->dh_zones[i]->dz_next = dz; 218 table->dh_zones[i]->dz_next = dz;
219 } 219 }
220 table->dh_zones[z] = dz; 220 table->dh_zones[z] = dz;
221 write_unlock_bh(&dn_fib_tables_lock); 221 write_unlock_bh(&dn_fib_tables_lock);
222 return dz; 222 return dz;
223 } 223 }
224 224
225 225
226 static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct dn_kern_rta *rta, struct dn_fib_info *fi) 226 static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct dn_kern_rta *rta, struct dn_fib_info *fi)
227 { 227 {
228 struct rtnexthop *nhp; 228 struct rtnexthop *nhp;
229 int nhlen; 229 int nhlen;
230 230
231 if (rta->rta_priority && *rta->rta_priority != fi->fib_priority) 231 if (rta->rta_priority && *rta->rta_priority != fi->fib_priority)
232 return 1; 232 return 1;
233 233
234 if (rta->rta_oif || rta->rta_gw) { 234 if (rta->rta_oif || rta->rta_gw) {
235 if ((!rta->rta_oif || *rta->rta_oif == fi->fib_nh->nh_oif) && 235 if ((!rta->rta_oif || *rta->rta_oif == fi->fib_nh->nh_oif) &&
236 (!rta->rta_gw || memcmp(rta->rta_gw, &fi->fib_nh->nh_gw, 2) == 0)) 236 (!rta->rta_gw || memcmp(rta->rta_gw, &fi->fib_nh->nh_gw, 2) == 0))
237 return 0; 237 return 0;
238 return 1; 238 return 1;
239 } 239 }
240 240
241 if (rta->rta_mp == NULL) 241 if (rta->rta_mp == NULL)
242 return 0; 242 return 0;
243 243
244 nhp = RTA_DATA(rta->rta_mp); 244 nhp = RTA_DATA(rta->rta_mp);
245 nhlen = RTA_PAYLOAD(rta->rta_mp); 245 nhlen = RTA_PAYLOAD(rta->rta_mp);
246 246
247 for_nexthops(fi) { 247 for_nexthops(fi) {
248 int attrlen = nhlen - sizeof(struct rtnexthop); 248 int attrlen = nhlen - sizeof(struct rtnexthop);
249 __le16 gw; 249 __le16 gw;
250 250
251 if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0) 251 if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
252 return -EINVAL; 252 return -EINVAL;
253 if (nhp->rtnh_ifindex && nhp->rtnh_ifindex != nh->nh_oif) 253 if (nhp->rtnh_ifindex && nhp->rtnh_ifindex != nh->nh_oif)
254 return 1; 254 return 1;
255 if (attrlen) { 255 if (attrlen) {
256 gw = dn_fib_get_attr16(RTNH_DATA(nhp), attrlen, RTA_GATEWAY); 256 gw = dn_fib_get_attr16(RTNH_DATA(nhp), attrlen, RTA_GATEWAY);
257 257
258 if (gw && gw != nh->nh_gw) 258 if (gw && gw != nh->nh_gw)
259 return 1; 259 return 1;
260 } 260 }
261 nhp = RTNH_NEXT(nhp); 261 nhp = RTNH_NEXT(nhp);
262 } endfor_nexthops(fi); 262 } endfor_nexthops(fi);
263 263
264 return 0; 264 return 0;
265 } 265 }
266 266
267 static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi) 267 static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi)
268 { 268 {
269 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) 269 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
270 + nla_total_size(4) /* RTA_TABLE */ 270 + nla_total_size(4) /* RTA_TABLE */
271 + nla_total_size(2) /* RTA_DST */ 271 + nla_total_size(2) /* RTA_DST */
272 + nla_total_size(4); /* RTA_PRIORITY */ 272 + nla_total_size(4); /* RTA_PRIORITY */
273 273
274 /* space for nested metrics */ 274 /* space for nested metrics */
275 payload += nla_total_size((RTAX_MAX * nla_total_size(4))); 275 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
276 276
277 if (fi->fib_nhs) { 277 if (fi->fib_nhs) {
278 /* Also handles the special case fib_nhs == 1 */ 278 /* Also handles the special case fib_nhs == 1 */
279 279
280 /* each nexthop is packed in an attribute */ 280 /* each nexthop is packed in an attribute */
281 size_t nhsize = nla_total_size(sizeof(struct rtnexthop)); 281 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
282 282
283 /* may contain a gateway attribute */ 283 /* may contain a gateway attribute */
284 nhsize += nla_total_size(4); 284 nhsize += nla_total_size(4);
285 285
286 /* all nexthops are packed in a nested attribute */ 286 /* all nexthops are packed in a nested attribute */
287 payload += nla_total_size(fi->fib_nhs * nhsize); 287 payload += nla_total_size(fi->fib_nhs * nhsize);
288 } 288 }
289 289
290 return payload; 290 return payload;
291 } 291 }
292 292
293 static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 293 static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
294 u32 tb_id, u8 type, u8 scope, void *dst, int dst_len, 294 u32 tb_id, u8 type, u8 scope, void *dst, int dst_len,
295 struct dn_fib_info *fi, unsigned int flags) 295 struct dn_fib_info *fi, unsigned int flags)
296 { 296 {
297 struct rtmsg *rtm; 297 struct rtmsg *rtm;
298 struct nlmsghdr *nlh; 298 struct nlmsghdr *nlh;
299 unsigned char *b = skb_tail_pointer(skb); 299 unsigned char *b = skb_tail_pointer(skb);
300 300
301 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags); 301 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
302 rtm = NLMSG_DATA(nlh); 302 rtm = NLMSG_DATA(nlh);
303 rtm->rtm_family = AF_DECnet; 303 rtm->rtm_family = AF_DECnet;
304 rtm->rtm_dst_len = dst_len; 304 rtm->rtm_dst_len = dst_len;
305 rtm->rtm_src_len = 0; 305 rtm->rtm_src_len = 0;
306 rtm->rtm_tos = 0; 306 rtm->rtm_tos = 0;
307 rtm->rtm_table = tb_id; 307 rtm->rtm_table = tb_id;
308 RTA_PUT_U32(skb, RTA_TABLE, tb_id); 308 RTA_PUT_U32(skb, RTA_TABLE, tb_id);
309 rtm->rtm_flags = fi->fib_flags; 309 rtm->rtm_flags = fi->fib_flags;
310 rtm->rtm_scope = scope; 310 rtm->rtm_scope = scope;
311 rtm->rtm_type = type; 311 rtm->rtm_type = type;
312 if (rtm->rtm_dst_len) 312 if (rtm->rtm_dst_len)
313 RTA_PUT(skb, RTA_DST, 2, dst); 313 RTA_PUT(skb, RTA_DST, 2, dst);
314 rtm->rtm_protocol = fi->fib_protocol; 314 rtm->rtm_protocol = fi->fib_protocol;
315 if (fi->fib_priority) 315 if (fi->fib_priority)
316 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority); 316 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);
317 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 317 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
318 goto rtattr_failure; 318 goto rtattr_failure;
319 if (fi->fib_nhs == 1) { 319 if (fi->fib_nhs == 1) {
320 if (fi->fib_nh->nh_gw) 320 if (fi->fib_nh->nh_gw)
321 RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw); 321 RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw);
322 if (fi->fib_nh->nh_oif) 322 if (fi->fib_nh->nh_oif)
323 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif); 323 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif);
324 } 324 }
325 if (fi->fib_nhs > 1) { 325 if (fi->fib_nhs > 1) {
326 struct rtnexthop *nhp; 326 struct rtnexthop *nhp;
327 struct rtattr *mp_head; 327 struct rtattr *mp_head;
328 if (skb_tailroom(skb) <= RTA_SPACE(0)) 328 if (skb_tailroom(skb) <= RTA_SPACE(0))
329 goto rtattr_failure; 329 goto rtattr_failure;
330 mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0)); 330 mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0));
331 331
332 for_nexthops(fi) { 332 for_nexthops(fi) {
333 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 333 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
334 goto rtattr_failure; 334 goto rtattr_failure;
335 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 335 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
336 nhp->rtnh_flags = nh->nh_flags & 0xFF; 336 nhp->rtnh_flags = nh->nh_flags & 0xFF;
337 nhp->rtnh_hops = nh->nh_weight - 1; 337 nhp->rtnh_hops = nh->nh_weight - 1;
338 nhp->rtnh_ifindex = nh->nh_oif; 338 nhp->rtnh_ifindex = nh->nh_oif;
339 if (nh->nh_gw) 339 if (nh->nh_gw)
340 RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw); 340 RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw);
341 nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp; 341 nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp;
342 } endfor_nexthops(fi); 342 } endfor_nexthops(fi);
343 mp_head->rta_type = RTA_MULTIPATH; 343 mp_head->rta_type = RTA_MULTIPATH;
344 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head; 344 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
345 } 345 }
346 346
347 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 347 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
348 return skb->len; 348 return skb->len;
349 349
350 350
351 nlmsg_failure: 351 nlmsg_failure:
352 rtattr_failure: 352 rtattr_failure:
353 nlmsg_trim(skb, b); 353 nlmsg_trim(skb, b);
354 return -EMSGSIZE; 354 return -EMSGSIZE;
355 } 355 }
356 356
357 357
358 static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id, 358 static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
359 struct nlmsghdr *nlh, struct netlink_skb_parms *req) 359 struct nlmsghdr *nlh, struct netlink_skb_parms *req)
360 { 360 {
361 struct sk_buff *skb; 361 struct sk_buff *skb;
362 u32 pid = req ? req->pid : 0; 362 u32 pid = req ? req->pid : 0;
363 int err = -ENOBUFS; 363 int err = -ENOBUFS;
364 364
365 skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL); 365 skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL);
366 if (skb == NULL) 366 if (skb == NULL)
367 goto errout; 367 goto errout;
368 368
369 err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id, 369 err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id,
370 f->fn_type, f->fn_scope, &f->fn_key, z, 370 f->fn_type, f->fn_scope, &f->fn_key, z,
371 DN_FIB_INFO(f), 0); 371 DN_FIB_INFO(f), 0);
372 if (err < 0) { 372 if (err < 0) {
373 /* -EMSGSIZE implies BUG in dn_fib_nlmsg_size() */ 373 /* -EMSGSIZE implies BUG in dn_fib_nlmsg_size() */
374 WARN_ON(err == -EMSGSIZE); 374 WARN_ON(err == -EMSGSIZE);
375 kfree_skb(skb); 375 kfree_skb(skb);
376 goto errout; 376 goto errout;
377 } 377 }
378 err = rtnl_notify(skb, &init_net, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL); 378 err = rtnl_notify(skb, &init_net, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
379 errout: 379 errout:
380 if (err < 0) 380 if (err < 0)
381 rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_ROUTE, err); 381 rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_ROUTE, err);
382 } 382 }
383 383
384 static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb, 384 static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
385 struct netlink_callback *cb, 385 struct netlink_callback *cb,
386 struct dn_fib_table *tb, 386 struct dn_fib_table *tb,
387 struct dn_zone *dz, 387 struct dn_zone *dz,
388 struct dn_fib_node *f) 388 struct dn_fib_node *f)
389 { 389 {
390 int i, s_i; 390 int i, s_i;
391 391
392 s_i = cb->args[4]; 392 s_i = cb->args[4];
393 for(i = 0; f; i++, f = f->fn_next) { 393 for(i = 0; f; i++, f = f->fn_next) {
394 if (i < s_i) 394 if (i < s_i)
395 continue; 395 continue;
396 if (f->fn_state & DN_S_ZOMBIE) 396 if (f->fn_state & DN_S_ZOMBIE)
397 continue; 397 continue;
398 if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).pid, 398 if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
399 cb->nlh->nlmsg_seq, 399 cb->nlh->nlmsg_seq,
400 RTM_NEWROUTE, 400 RTM_NEWROUTE,
401 tb->n, 401 tb->n,
402 (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type, 402 (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type,
403 f->fn_scope, &f->fn_key, dz->dz_order, 403 f->fn_scope, &f->fn_key, dz->dz_order,
404 f->fn_info, NLM_F_MULTI) < 0) { 404 f->fn_info, NLM_F_MULTI) < 0) {
405 cb->args[4] = i; 405 cb->args[4] = i;
406 return -1; 406 return -1;
407 } 407 }
408 } 408 }
409 cb->args[4] = i; 409 cb->args[4] = i;
410 return skb->len; 410 return skb->len;
411 } 411 }
412 412
413 static __inline__ int dn_hash_dump_zone(struct sk_buff *skb, 413 static __inline__ int dn_hash_dump_zone(struct sk_buff *skb,
414 struct netlink_callback *cb, 414 struct netlink_callback *cb,
415 struct dn_fib_table *tb, 415 struct dn_fib_table *tb,
416 struct dn_zone *dz) 416 struct dn_zone *dz)
417 { 417 {
418 int h, s_h; 418 int h, s_h;
419 419
420 s_h = cb->args[3]; 420 s_h = cb->args[3];
421 for(h = 0; h < dz->dz_divisor; h++) { 421 for(h = 0; h < dz->dz_divisor; h++) {
422 if (h < s_h) 422 if (h < s_h)
423 continue; 423 continue;
424 if (h > s_h) 424 if (h > s_h)
425 memset(&cb->args[4], 0, sizeof(cb->args) - 4*sizeof(cb->args[0])); 425 memset(&cb->args[4], 0, sizeof(cb->args) - 4*sizeof(cb->args[0]));
426 if (dz->dz_hash == NULL || dz->dz_hash[h] == NULL) 426 if (dz->dz_hash == NULL || dz->dz_hash[h] == NULL)
427 continue; 427 continue;
428 if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) { 428 if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) {
429 cb->args[3] = h; 429 cb->args[3] = h;
430 return -1; 430 return -1;
431 } 431 }
432 } 432 }
433 cb->args[3] = h; 433 cb->args[3] = h;
434 return skb->len; 434 return skb->len;
435 } 435 }
436 436
437 static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb, 437 static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb,
438 struct netlink_callback *cb) 438 struct netlink_callback *cb)
439 { 439 {
440 int m, s_m; 440 int m, s_m;
441 struct dn_zone *dz; 441 struct dn_zone *dz;
442 struct dn_hash *table = (struct dn_hash *)tb->data; 442 struct dn_hash *table = (struct dn_hash *)tb->data;
443 443
444 s_m = cb->args[2]; 444 s_m = cb->args[2];
445 read_lock(&dn_fib_tables_lock); 445 read_lock(&dn_fib_tables_lock);
446 for(dz = table->dh_zone_list, m = 0; dz; dz = dz->dz_next, m++) { 446 for(dz = table->dh_zone_list, m = 0; dz; dz = dz->dz_next, m++) {
447 if (m < s_m) 447 if (m < s_m)
448 continue; 448 continue;
449 if (m > s_m) 449 if (m > s_m)
450 memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(cb->args[0])); 450 memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(cb->args[0]));
451 451
452 if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) { 452 if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) {
453 cb->args[2] = m; 453 cb->args[2] = m;
454 read_unlock(&dn_fib_tables_lock); 454 read_unlock(&dn_fib_tables_lock);
455 return -1; 455 return -1;
456 } 456 }
457 } 457 }
458 read_unlock(&dn_fib_tables_lock); 458 read_unlock(&dn_fib_tables_lock);
459 cb->args[2] = m; 459 cb->args[2] = m;
460 460
461 return skb->len; 461 return skb->len;
462 } 462 }
463 463
464 int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb) 464 int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
465 { 465 {
466 struct net *net = sock_net(skb->sk); 466 struct net *net = sock_net(skb->sk);
467 unsigned int h, s_h; 467 unsigned int h, s_h;
468 unsigned int e = 0, s_e; 468 unsigned int e = 0, s_e;
469 struct dn_fib_table *tb; 469 struct dn_fib_table *tb;
470 struct hlist_node *node; 470 struct hlist_node *node;
471 int dumped = 0; 471 int dumped = 0;
472 472
473 if (net != &init_net) 473 if (net != &init_net)
474 return 0; 474 return 0;
475 475
476 if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) && 476 if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) &&
477 ((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED) 477 ((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED)
478 return dn_cache_dump(skb, cb); 478 return dn_cache_dump(skb, cb);
479 479
480 s_h = cb->args[0]; 480 s_h = cb->args[0];
481 s_e = cb->args[1]; 481 s_e = cb->args[1];
482 482
483 for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) { 483 for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) {
484 e = 0; 484 e = 0;
485 hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) { 485 hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) {
486 if (e < s_e) 486 if (e < s_e)
487 goto next; 487 goto next;
488 if (dumped) 488 if (dumped)
489 memset(&cb->args[2], 0, sizeof(cb->args) - 489 memset(&cb->args[2], 0, sizeof(cb->args) -
490 2 * sizeof(cb->args[0])); 490 2 * sizeof(cb->args[0]));
491 if (tb->dump(tb, skb, cb) < 0) 491 if (tb->dump(tb, skb, cb) < 0)
492 goto out; 492 goto out;
493 dumped = 1; 493 dumped = 1;
494 next: 494 next:
495 e++; 495 e++;
496 } 496 }
497 } 497 }
498 out: 498 out:
499 cb->args[1] = e; 499 cb->args[1] = e;
500 cb->args[0] = h; 500 cb->args[0] = h;
501 501
502 return skb->len; 502 return skb->len;
503 } 503 }
504 504
505 static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct dn_kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req) 505 static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct dn_kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req)
506 { 506 {
507 struct dn_hash *table = (struct dn_hash *)tb->data; 507 struct dn_hash *table = (struct dn_hash *)tb->data;
508 struct dn_fib_node *new_f, *f, **fp, **del_fp; 508 struct dn_fib_node *new_f, *f, **fp, **del_fp;
509 struct dn_zone *dz; 509 struct dn_zone *dz;
510 struct dn_fib_info *fi; 510 struct dn_fib_info *fi;
511 int z = r->rtm_dst_len; 511 int z = r->rtm_dst_len;
512 int type = r->rtm_type; 512 int type = r->rtm_type;
513 dn_fib_key_t key; 513 dn_fib_key_t key;
514 int err; 514 int err;
515 515
516 if (z > 16) 516 if (z > 16)
517 return -EINVAL; 517 return -EINVAL;
518 518
519 dz = table->dh_zones[z]; 519 dz = table->dh_zones[z];
520 if (!dz && !(dz = dn_new_zone(table, z))) 520 if (!dz && !(dz = dn_new_zone(table, z)))
521 return -ENOBUFS; 521 return -ENOBUFS;
522 522
523 dz_key_0(key); 523 dz_key_0(key);
524 if (rta->rta_dst) { 524 if (rta->rta_dst) {
525 __le16 dst; 525 __le16 dst;
526 memcpy(&dst, rta->rta_dst, 2); 526 memcpy(&dst, rta->rta_dst, 2);
527 if (dst & ~DZ_MASK(dz)) 527 if (dst & ~DZ_MASK(dz))
528 return -EINVAL; 528 return -EINVAL;
529 key = dz_key(dst, dz); 529 key = dz_key(dst, dz);
530 } 530 }
531 531
532 if ((fi = dn_fib_create_info(r, rta, n, &err)) == NULL) 532 if ((fi = dn_fib_create_info(r, rta, n, &err)) == NULL)
533 return err; 533 return err;
534 534
535 if (dz->dz_nent > (dz->dz_divisor << 2) && 535 if (dz->dz_nent > (dz->dz_divisor << 2) &&
536 dz->dz_divisor > DN_MAX_DIVISOR && 536 dz->dz_divisor > DN_MAX_DIVISOR &&
537 (z==16 || (1<<z) > dz->dz_divisor)) 537 (z==16 || (1<<z) > dz->dz_divisor))
538 dn_rehash_zone(dz); 538 dn_rehash_zone(dz);
539 539
540 fp = dn_chain_p(key, dz); 540 fp = dn_chain_p(key, dz);
541 541
542 DN_FIB_SCAN(f, fp) { 542 DN_FIB_SCAN(f, fp) {
543 if (dn_key_leq(key, f->fn_key)) 543 if (dn_key_leq(key, f->fn_key))
544 break; 544 break;
545 } 545 }
546 546
547 del_fp = NULL; 547 del_fp = NULL;
548 548
549 if (f && (f->fn_state & DN_S_ZOMBIE) && 549 if (f && (f->fn_state & DN_S_ZOMBIE) &&
550 dn_key_eq(f->fn_key, key)) { 550 dn_key_eq(f->fn_key, key)) {
551 del_fp = fp; 551 del_fp = fp;
552 fp = &f->fn_next; 552 fp = &f->fn_next;
553 f = *fp; 553 f = *fp;
554 goto create; 554 goto create;
555 } 555 }
556 556
557 DN_FIB_SCAN_KEY(f, fp, key) { 557 DN_FIB_SCAN_KEY(f, fp, key) {
558 if (fi->fib_priority <= DN_FIB_INFO(f)->fib_priority) 558 if (fi->fib_priority <= DN_FIB_INFO(f)->fib_priority)
559 break; 559 break;
560 } 560 }
561 561
562 if (f && dn_key_eq(f->fn_key, key) && 562 if (f && dn_key_eq(f->fn_key, key) &&
563 fi->fib_priority == DN_FIB_INFO(f)->fib_priority) { 563 fi->fib_priority == DN_FIB_INFO(f)->fib_priority) {
564 struct dn_fib_node **ins_fp; 564 struct dn_fib_node **ins_fp;
565 565
566 err = -EEXIST; 566 err = -EEXIST;
567 if (n->nlmsg_flags & NLM_F_EXCL) 567 if (n->nlmsg_flags & NLM_F_EXCL)
568 goto out; 568 goto out;
569 569
570 if (n->nlmsg_flags & NLM_F_REPLACE) { 570 if (n->nlmsg_flags & NLM_F_REPLACE) {
571 del_fp = fp; 571 del_fp = fp;
572 fp = &f->fn_next; 572 fp = &f->fn_next;
573 f = *fp; 573 f = *fp;
574 goto replace; 574 goto replace;
575 } 575 }
576 576
577 ins_fp = fp; 577 ins_fp = fp;
578 err = -EEXIST; 578 err = -EEXIST;
579 579
580 DN_FIB_SCAN_KEY(f, fp, key) { 580 DN_FIB_SCAN_KEY(f, fp, key) {
581 if (fi->fib_priority != DN_FIB_INFO(f)->fib_priority) 581 if (fi->fib_priority != DN_FIB_INFO(f)->fib_priority)
582 break; 582 break;
583 if (f->fn_type == type && f->fn_scope == r->rtm_scope 583 if (f->fn_type == type && f->fn_scope == r->rtm_scope
584 && DN_FIB_INFO(f) == fi) 584 && DN_FIB_INFO(f) == fi)
585 goto out; 585 goto out;
586 } 586 }
587 587
588 if (!(n->nlmsg_flags & NLM_F_APPEND)) { 588 if (!(n->nlmsg_flags & NLM_F_APPEND)) {
589 fp = ins_fp; 589 fp = ins_fp;
590 f = *fp; 590 f = *fp;
591 } 591 }
592 } 592 }
593 593
594 create: 594 create:
595 err = -ENOENT; 595 err = -ENOENT;
596 if (!(n->nlmsg_flags & NLM_F_CREATE)) 596 if (!(n->nlmsg_flags & NLM_F_CREATE))
597 goto out; 597 goto out;
598 598
599 replace: 599 replace:
600 err = -ENOBUFS; 600 err = -ENOBUFS;
601 new_f = kmem_cache_zalloc(dn_hash_kmem, GFP_KERNEL); 601 new_f = kmem_cache_zalloc(dn_hash_kmem, GFP_KERNEL);
602 if (new_f == NULL) 602 if (new_f == NULL)
603 goto out; 603 goto out;
604 604
605 new_f->fn_key = key; 605 new_f->fn_key = key;
606 new_f->fn_type = type; 606 new_f->fn_type = type;
607 new_f->fn_scope = r->rtm_scope; 607 new_f->fn_scope = r->rtm_scope;
608 DN_FIB_INFO(new_f) = fi; 608 DN_FIB_INFO(new_f) = fi;
609 609
610 new_f->fn_next = f; 610 new_f->fn_next = f;
611 write_lock_bh(&dn_fib_tables_lock); 611 write_lock_bh(&dn_fib_tables_lock);
612 *fp = new_f; 612 *fp = new_f;
613 write_unlock_bh(&dn_fib_tables_lock); 613 write_unlock_bh(&dn_fib_tables_lock);
614 dz->dz_nent++; 614 dz->dz_nent++;
615 615
616 if (del_fp) { 616 if (del_fp) {
617 f = *del_fp; 617 f = *del_fp;
618 write_lock_bh(&dn_fib_tables_lock); 618 write_lock_bh(&dn_fib_tables_lock);
619 *del_fp = f->fn_next; 619 *del_fp = f->fn_next;
620 write_unlock_bh(&dn_fib_tables_lock); 620 write_unlock_bh(&dn_fib_tables_lock);
621 621
622 if (!(f->fn_state & DN_S_ZOMBIE)) 622 if (!(f->fn_state & DN_S_ZOMBIE))
623 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req); 623 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
624 if (f->fn_state & DN_S_ACCESSED) 624 if (f->fn_state & DN_S_ACCESSED)
625 dn_rt_cache_flush(-1); 625 dn_rt_cache_flush(-1);
626 dn_free_node(f); 626 dn_free_node(f);
627 dz->dz_nent--; 627 dz->dz_nent--;
628 } else { 628 } else {
629 dn_rt_cache_flush(-1); 629 dn_rt_cache_flush(-1);
630 } 630 }
631 631
632 dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req); 632 dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req);
633 633
634 return 0; 634 return 0;
635 out: 635 out:
636 dn_fib_release_info(fi); 636 dn_fib_release_info(fi);
637 return err; 637 return err;
638 } 638 }
639 639
640 640
641 static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct dn_kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req) 641 static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct dn_kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req)
642 { 642 {
643 struct dn_hash *table = (struct dn_hash*)tb->data; 643 struct dn_hash *table = (struct dn_hash*)tb->data;
644 struct dn_fib_node **fp, **del_fp, *f; 644 struct dn_fib_node **fp, **del_fp, *f;
645 int z = r->rtm_dst_len; 645 int z = r->rtm_dst_len;
646 struct dn_zone *dz; 646 struct dn_zone *dz;
647 dn_fib_key_t key; 647 dn_fib_key_t key;
648 int matched; 648 int matched;
649 649
650 650
651 if (z > 16) 651 if (z > 16)
652 return -EINVAL; 652 return -EINVAL;
653 653
654 if ((dz = table->dh_zones[z]) == NULL) 654 if ((dz = table->dh_zones[z]) == NULL)
655 return -ESRCH; 655 return -ESRCH;
656 656
657 dz_key_0(key); 657 dz_key_0(key);
658 if (rta->rta_dst) { 658 if (rta->rta_dst) {
659 __le16 dst; 659 __le16 dst;
660 memcpy(&dst, rta->rta_dst, 2); 660 memcpy(&dst, rta->rta_dst, 2);
661 if (dst & ~DZ_MASK(dz)) 661 if (dst & ~DZ_MASK(dz))
662 return -EINVAL; 662 return -EINVAL;
663 key = dz_key(dst, dz); 663 key = dz_key(dst, dz);
664 } 664 }
665 665
666 fp = dn_chain_p(key, dz); 666 fp = dn_chain_p(key, dz);
667 667
668 DN_FIB_SCAN(f, fp) { 668 DN_FIB_SCAN(f, fp) {
669 if (dn_key_eq(f->fn_key, key)) 669 if (dn_key_eq(f->fn_key, key))
670 break; 670 break;
671 if (dn_key_leq(key, f->fn_key)) 671 if (dn_key_leq(key, f->fn_key))
672 return -ESRCH; 672 return -ESRCH;
673 } 673 }
674 674
675 matched = 0; 675 matched = 0;
676 del_fp = NULL; 676 del_fp = NULL;
677 DN_FIB_SCAN_KEY(f, fp, key) { 677 DN_FIB_SCAN_KEY(f, fp, key) {
678 struct dn_fib_info *fi = DN_FIB_INFO(f); 678 struct dn_fib_info *fi = DN_FIB_INFO(f);
679 679
680 if (f->fn_state & DN_S_ZOMBIE) 680 if (f->fn_state & DN_S_ZOMBIE)
681 return -ESRCH; 681 return -ESRCH;
682 682
683 matched++; 683 matched++;
684 684
685 if (del_fp == NULL && 685 if (del_fp == NULL &&
686 (!r->rtm_type || f->fn_type == r->rtm_type) && 686 (!r->rtm_type || f->fn_type == r->rtm_type) &&
687 (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) && 687 (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) &&
688 (!r->rtm_protocol || 688 (!r->rtm_protocol ||
689 fi->fib_protocol == r->rtm_protocol) && 689 fi->fib_protocol == r->rtm_protocol) &&
690 dn_fib_nh_match(r, n, rta, fi) == 0) 690 dn_fib_nh_match(r, n, rta, fi) == 0)
691 del_fp = fp; 691 del_fp = fp;
692 } 692 }
693 693
694 if (del_fp) { 694 if (del_fp) {
695 f = *del_fp; 695 f = *del_fp;
696 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req); 696 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
697 697
698 if (matched != 1) { 698 if (matched != 1) {
699 write_lock_bh(&dn_fib_tables_lock); 699 write_lock_bh(&dn_fib_tables_lock);
700 *del_fp = f->fn_next; 700 *del_fp = f->fn_next;
701 write_unlock_bh(&dn_fib_tables_lock); 701 write_unlock_bh(&dn_fib_tables_lock);
702 702
703 if (f->fn_state & DN_S_ACCESSED) 703 if (f->fn_state & DN_S_ACCESSED)
704 dn_rt_cache_flush(-1); 704 dn_rt_cache_flush(-1);
705 dn_free_node(f); 705 dn_free_node(f);
706 dz->dz_nent--; 706 dz->dz_nent--;
707 } else { 707 } else {
708 f->fn_state |= DN_S_ZOMBIE; 708 f->fn_state |= DN_S_ZOMBIE;
709 if (f->fn_state & DN_S_ACCESSED) { 709 if (f->fn_state & DN_S_ACCESSED) {
710 f->fn_state &= ~DN_S_ACCESSED; 710 f->fn_state &= ~DN_S_ACCESSED;
711 dn_rt_cache_flush(-1); 711 dn_rt_cache_flush(-1);
712 } 712 }
713 if (++dn_fib_hash_zombies > 128) 713 if (++dn_fib_hash_zombies > 128)
714 dn_fib_flush(); 714 dn_fib_flush();
715 } 715 }
716 716
717 return 0; 717 return 0;
718 } 718 }
719 719
720 return -ESRCH; 720 return -ESRCH;
721 } 721 }
722 722
723 static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table) 723 static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table)
724 { 724 {
725 int found = 0; 725 int found = 0;
726 struct dn_fib_node *f; 726 struct dn_fib_node *f;
727 727
728 while((f = *fp) != NULL) { 728 while((f = *fp) != NULL) {
729 struct dn_fib_info *fi = DN_FIB_INFO(f); 729 struct dn_fib_info *fi = DN_FIB_INFO(f);
730 730
731 if (fi && ((f->fn_state & DN_S_ZOMBIE) || (fi->fib_flags & RTNH_F_DEAD))) { 731 if (fi && ((f->fn_state & DN_S_ZOMBIE) || (fi->fib_flags & RTNH_F_DEAD))) {
732 write_lock_bh(&dn_fib_tables_lock); 732 write_lock_bh(&dn_fib_tables_lock);
733 *fp = f->fn_next; 733 *fp = f->fn_next;
734 write_unlock_bh(&dn_fib_tables_lock); 734 write_unlock_bh(&dn_fib_tables_lock);
735 735
736 dn_free_node(f); 736 dn_free_node(f);
737 found++; 737 found++;
738 continue; 738 continue;
739 } 739 }
740 fp = &f->fn_next; 740 fp = &f->fn_next;
741 } 741 }
742 742
743 return found; 743 return found;
744 } 744 }
745 745
746 static int dn_fib_table_flush(struct dn_fib_table *tb) 746 static int dn_fib_table_flush(struct dn_fib_table *tb)
747 { 747 {
748 struct dn_hash *table = (struct dn_hash *)tb->data; 748 struct dn_hash *table = (struct dn_hash *)tb->data;
749 struct dn_zone *dz; 749 struct dn_zone *dz;
750 int found = 0; 750 int found = 0;
751 751
752 dn_fib_hash_zombies = 0; 752 dn_fib_hash_zombies = 0;
753 for(dz = table->dh_zone_list; dz; dz = dz->dz_next) { 753 for(dz = table->dh_zone_list; dz; dz = dz->dz_next) {
754 int i; 754 int i;
755 int tmp = 0; 755 int tmp = 0;
756 for(i = dz->dz_divisor-1; i >= 0; i--) 756 for(i = dz->dz_divisor-1; i >= 0; i--)
757 tmp += dn_flush_list(&dz->dz_hash[i], dz->dz_order, table); 757 tmp += dn_flush_list(&dz->dz_hash[i], dz->dz_order, table);
758 dz->dz_nent -= tmp; 758 dz->dz_nent -= tmp;
759 found += tmp; 759 found += tmp;
760 } 760 }
761 761
762 return found; 762 return found;
763 } 763 }
764 764
765 static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp, struct dn_fib_res *res) 765 static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp, struct dn_fib_res *res)
766 { 766 {
767 int err; 767 int err;
768 struct dn_zone *dz; 768 struct dn_zone *dz;
769 struct dn_hash *t = (struct dn_hash *)tb->data; 769 struct dn_hash *t = (struct dn_hash *)tb->data;
770 770
771 read_lock(&dn_fib_tables_lock); 771 read_lock(&dn_fib_tables_lock);
772 for(dz = t->dh_zone_list; dz; dz = dz->dz_next) { 772 for(dz = t->dh_zone_list; dz; dz = dz->dz_next) {
773 struct dn_fib_node *f; 773 struct dn_fib_node *f;
774 dn_fib_key_t k = dz_key(flp->fld_dst, dz); 774 dn_fib_key_t k = dz_key(flp->fld_dst, dz);
775 775
776 for(f = dz_chain(k, dz); f; f = f->fn_next) { 776 for(f = dz_chain(k, dz); f; f = f->fn_next) {
777 if (!dn_key_eq(k, f->fn_key)) { 777 if (!dn_key_eq(k, f->fn_key)) {
778 if (dn_key_leq(k, f->fn_key)) 778 if (dn_key_leq(k, f->fn_key))
779 break; 779 break;
780 else 780 else
781 continue; 781 continue;
782 } 782 }
783 783
784 f->fn_state |= DN_S_ACCESSED; 784 f->fn_state |= DN_S_ACCESSED;
785 785
786 if (f->fn_state&DN_S_ZOMBIE) 786 if (f->fn_state&DN_S_ZOMBIE)
787 continue; 787 continue;
788 788
789 if (f->fn_scope < flp->fld_scope) 789 if (f->fn_scope < flp->fld_scope)
790 continue; 790 continue;
791 791
792 err = dn_fib_semantic_match(f->fn_type, DN_FIB_INFO(f), flp, res); 792 err = dn_fib_semantic_match(f->fn_type, DN_FIB_INFO(f), flp, res);
793 793
794 if (err == 0) { 794 if (err == 0) {
795 res->type = f->fn_type; 795 res->type = f->fn_type;
796 res->scope = f->fn_scope; 796 res->scope = f->fn_scope;
797 res->prefixlen = dz->dz_order; 797 res->prefixlen = dz->dz_order;
798 goto out; 798 goto out;
799 } 799 }
800 if (err < 0) 800 if (err < 0)
801 goto out; 801 goto out;
802 } 802 }
803 } 803 }
804 err = 1; 804 err = 1;
805 out: 805 out:
806 read_unlock(&dn_fib_tables_lock); 806 read_unlock(&dn_fib_tables_lock);
807 return err; 807 return err;
808 } 808 }
809 809
810 810
811 struct dn_fib_table *dn_fib_get_table(u32 n, int create) 811 struct dn_fib_table *dn_fib_get_table(u32 n, int create)
812 { 812 {
813 struct dn_fib_table *t; 813 struct dn_fib_table *t;
814 struct hlist_node *node; 814 struct hlist_node *node;
815 unsigned int h; 815 unsigned int h;
816 816
817 if (n < RT_TABLE_MIN) 817 if (n < RT_TABLE_MIN)
818 return NULL; 818 return NULL;
819 819
820 if (n > RT_TABLE_MAX) 820 if (n > RT_TABLE_MAX)
821 return NULL; 821 return NULL;
822 822
823 h = n & (DN_FIB_TABLE_HASHSZ - 1); 823 h = n & (DN_FIB_TABLE_HASHSZ - 1);
824 rcu_read_lock(); 824 rcu_read_lock();
825 hlist_for_each_entry_rcu(t, node, &dn_fib_table_hash[h], hlist) { 825 hlist_for_each_entry_rcu(t, node, &dn_fib_table_hash[h], hlist) {
826 if (t->n == n) { 826 if (t->n == n) {
827 rcu_read_unlock(); 827 rcu_read_unlock();
828 return t; 828 return t;
829 } 829 }
830 } 830 }
831 rcu_read_unlock(); 831 rcu_read_unlock();
832 832
833 if (!create) 833 if (!create)
834 return NULL; 834 return NULL;
835 835
836 if (in_interrupt() && net_ratelimit()) { 836 if (in_interrupt() && net_ratelimit()) {
837 printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n"); 837 printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n");
838 return NULL; 838 return NULL;
839 } 839 }
840 840
841 t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash), 841 t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash),
842 GFP_KERNEL); 842 GFP_KERNEL);
843 if (t == NULL) 843 if (t == NULL)
844 return NULL; 844 return NULL;
845 845
846 t->n = n; 846 t->n = n;
847 t->insert = dn_fib_table_insert; 847 t->insert = dn_fib_table_insert;
848 t->delete = dn_fib_table_delete; 848 t->delete = dn_fib_table_delete;
849 t->lookup = dn_fib_table_lookup; 849 t->lookup = dn_fib_table_lookup;
850 t->flush = dn_fib_table_flush; 850 t->flush = dn_fib_table_flush;
851 t->dump = dn_fib_table_dump; 851 t->dump = dn_fib_table_dump;
852 hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]); 852 hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]);
853 853
854 return t; 854 return t;
855 } 855 }
856 856
857 struct dn_fib_table *dn_fib_empty_table(void) 857 struct dn_fib_table *dn_fib_empty_table(void)
858 { 858 {
859 u32 id; 859 u32 id;
860 860
861 for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++) 861 for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++)
862 if (dn_fib_get_table(id, 0) == NULL) 862 if (dn_fib_get_table(id, 0) == NULL)
863 return dn_fib_get_table(id, 1); 863 return dn_fib_get_table(id, 1);
864 return NULL; 864 return NULL;
865 } 865 }
866 866
867 void dn_fib_flush(void) 867 void dn_fib_flush(void)
868 { 868 {
869 int flushed = 0; 869 int flushed = 0;
870 struct dn_fib_table *tb; 870 struct dn_fib_table *tb;
871 struct hlist_node *node; 871 struct hlist_node *node;
872 unsigned int h; 872 unsigned int h;
873 873
874 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { 874 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
875 hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist) 875 hlist_for_each_entry(tb, node, &dn_fib_table_hash[h], hlist)
876 flushed += tb->flush(tb); 876 flushed += tb->flush(tb);
877 } 877 }
878 878
879 if (flushed) 879 if (flushed)
880 dn_rt_cache_flush(-1); 880 dn_rt_cache_flush(-1);
881 } 881 }
882 882
883 void __init dn_fib_table_init(void) 883 void __init dn_fib_table_init(void)
884 { 884 {
885 dn_hash_kmem = kmem_cache_create("dn_fib_info_cache", 885 dn_hash_kmem = kmem_cache_create("dn_fib_info_cache",
886 sizeof(struct dn_fib_info), 886 sizeof(struct dn_fib_info),
887 0, SLAB_HWCACHE_ALIGN, 887 0, SLAB_HWCACHE_ALIGN,
888 NULL); 888 NULL);
889 } 889 }
890 890
891 void __exit dn_fib_table_cleanup(void) 891 void __exit dn_fib_table_cleanup(void)
892 { 892 {
893 struct dn_fib_table *t; 893 struct dn_fib_table *t;
894 struct hlist_node *node, *next; 894 struct hlist_node *node, *next;
895 unsigned int h; 895 unsigned int h;
896 896
897 write_lock(&dn_fib_tables_lock); 897 write_lock(&dn_fib_tables_lock);
898 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) { 898 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
899 hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h], 899 hlist_for_each_entry_safe(t, node, next, &dn_fib_table_hash[h],
900 hlist) { 900 hlist) {
901 hlist_del(&t->hlist); 901 hlist_del(&t->hlist);
902 kfree(t); 902 kfree(t);
903 } 903 }
904 } 904 }
905 write_unlock(&dn_fib_tables_lock); 905 write_unlock(&dn_fib_tables_lock);
906 } 906 }
907 907
net/decnet/sysctl_net_decnet.c
1 /* 1 /*
2 * DECnet An implementation of the DECnet protocol suite for the LINUX 2 * DECnet An implementation of the DECnet protocol suite for the LINUX
3 * operating system. DECnet is implemented using the BSD Socket 3 * operating system. DECnet is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * DECnet sysctl support functions 6 * DECnet sysctl support functions
7 * 7 *
8 * Author: Steve Whitehouse <SteveW@ACM.org> 8 * Author: Steve Whitehouse <SteveW@ACM.org>
9 * 9 *
10 * 10 *
11 * Changes: 11 * Changes:
12 * Steve Whitehouse - C99 changes and default device handling 12 * Steve Whitehouse - C99 changes and default device handling
13 * Steve Whitehouse - Memory buffer settings, like the tcp ones 13 * Steve Whitehouse - Memory buffer settings, like the tcp ones
14 * 14 *
15 */ 15 */
16 #include <linux/mm.h> 16 #include <linux/mm.h>
17 #include <linux/sysctl.h> 17 #include <linux/sysctl.h>
18 #include <linux/fs.h> 18 #include <linux/fs.h>
19 #include <linux/netdevice.h> 19 #include <linux/netdevice.h>
20 #include <linux/string.h> 20 #include <linux/string.h>
21 #include <net/neighbour.h> 21 #include <net/neighbour.h>
22 #include <net/dst.h> 22 #include <net/dst.h>
23 #include <net/flow.h> 23 #include <net/flow.h>
24 24
25 #include <asm/uaccess.h> 25 #include <asm/uaccess.h>
26 26
27 #include <net/dn.h> 27 #include <net/dn.h>
28 #include <net/dn_dev.h> 28 #include <net/dn_dev.h>
29 #include <net/dn_route.h> 29 #include <net/dn_route.h>
30 30
31 31
32 int decnet_debug_level; 32 int decnet_debug_level;
33 int decnet_time_wait = 30; 33 int decnet_time_wait = 30;
34 int decnet_dn_count = 1; 34 int decnet_dn_count = 1;
35 int decnet_di_count = 3; 35 int decnet_di_count = 3;
36 int decnet_dr_count = 3; 36 int decnet_dr_count = 3;
37 int decnet_log_martians = 1; 37 int decnet_log_martians = 1;
38 int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW; 38 int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW;
39 39
40 /* Reasonable defaults, I hope, based on tcp's defaults */ 40 /* Reasonable defaults, I hope, based on tcp's defaults */
41 int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; 41 int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 };
42 int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; 42 int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
43 int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; 43 int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
44 44
45 #ifdef CONFIG_SYSCTL 45 #ifdef CONFIG_SYSCTL
46 extern int decnet_dst_gc_interval; 46 extern int decnet_dst_gc_interval;
47 static int min_decnet_time_wait[] = { 5 }; 47 static int min_decnet_time_wait[] = { 5 };
48 static int max_decnet_time_wait[] = { 600 }; 48 static int max_decnet_time_wait[] = { 600 };
49 static int min_state_count[] = { 1 }; 49 static int min_state_count[] = { 1 };
50 static int max_state_count[] = { NSP_MAXRXTSHIFT }; 50 static int max_state_count[] = { NSP_MAXRXTSHIFT };
51 static int min_decnet_dst_gc_interval[] = { 1 }; 51 static int min_decnet_dst_gc_interval[] = { 1 };
52 static int max_decnet_dst_gc_interval[] = { 60 }; 52 static int max_decnet_dst_gc_interval[] = { 60 };
53 static int min_decnet_no_fc_max_cwnd[] = { NSP_MIN_WINDOW }; 53 static int min_decnet_no_fc_max_cwnd[] = { NSP_MIN_WINDOW };
54 static int max_decnet_no_fc_max_cwnd[] = { NSP_MAX_WINDOW }; 54 static int max_decnet_no_fc_max_cwnd[] = { NSP_MAX_WINDOW };
55 static char node_name[7] = "???"; 55 static char node_name[7] = "???";
56 56
57 static struct ctl_table_header *dn_table_header = NULL; 57 static struct ctl_table_header *dn_table_header = NULL;
58 58
59 /* 59 /*
60 * ctype.h :-) 60 * ctype.h :-)
61 */ 61 */
62 #define ISNUM(x) (((x) >= '0') && ((x) <= '9')) 62 #define ISNUM(x) (((x) >= '0') && ((x) <= '9'))
63 #define ISLOWER(x) (((x) >= 'a') && ((x) <= 'z')) 63 #define ISLOWER(x) (((x) >= 'a') && ((x) <= 'z'))
64 #define ISUPPER(x) (((x) >= 'A') && ((x) <= 'Z')) 64 #define ISUPPER(x) (((x) >= 'A') && ((x) <= 'Z'))
65 #define ISALPHA(x) (ISLOWER(x) || ISUPPER(x)) 65 #define ISALPHA(x) (ISLOWER(x) || ISUPPER(x))
66 #define INVALID_END_CHAR(x) (ISNUM(x) || ISALPHA(x)) 66 #define INVALID_END_CHAR(x) (ISNUM(x) || ISALPHA(x))
67 67
68 static void strip_it(char *str) 68 static void strip_it(char *str)
69 { 69 {
70 for(;;) { 70 for(;;) {
71 switch(*str) { 71 switch(*str) {
72 case ' ': 72 case ' ':
73 case '\n': 73 case '\n':
74 case '\r': 74 case '\r':
75 case ':': 75 case ':':
76 *str = 0; 76 *str = 0;
77 case 0: 77 case 0:
78 return; 78 return;
79 } 79 }
80 str++; 80 str++;
81 } 81 }
82 } 82 }
83 83
84 /* 84 /*
85 * Simple routine to parse an ascii DECnet address 85 * Simple routine to parse an ascii DECnet address
86 * into a network order address. 86 * into a network order address.
87 */ 87 */
88 static int parse_addr(__le16 *addr, char *str) 88 static int parse_addr(__le16 *addr, char *str)
89 { 89 {
90 __u16 area, node; 90 __u16 area, node;
91 91
92 while(*str && !ISNUM(*str)) str++; 92 while(*str && !ISNUM(*str)) str++;
93 93
94 if (*str == 0) 94 if (*str == 0)
95 return -1; 95 return -1;
96 96
97 area = (*str++ - '0'); 97 area = (*str++ - '0');
98 if (ISNUM(*str)) { 98 if (ISNUM(*str)) {
99 area *= 10; 99 area *= 10;
100 area += (*str++ - '0'); 100 area += (*str++ - '0');
101 } 101 }
102 102
103 if (*str++ != '.') 103 if (*str++ != '.')
104 return -1; 104 return -1;
105 105
106 if (!ISNUM(*str)) 106 if (!ISNUM(*str))
107 return -1; 107 return -1;
108 108
109 node = *str++ - '0'; 109 node = *str++ - '0';
110 if (ISNUM(*str)) { 110 if (ISNUM(*str)) {
111 node *= 10; 111 node *= 10;
112 node += (*str++ - '0'); 112 node += (*str++ - '0');
113 } 113 }
114 if (ISNUM(*str)) { 114 if (ISNUM(*str)) {
115 node *= 10; 115 node *= 10;
116 node += (*str++ - '0'); 116 node += (*str++ - '0');
117 } 117 }
118 if (ISNUM(*str)) { 118 if (ISNUM(*str)) {
119 node *= 10; 119 node *= 10;
120 node += (*str++ - '0'); 120 node += (*str++ - '0');
121 } 121 }
122 122
123 if ((node > 1023) || (area > 63)) 123 if ((node > 1023) || (area > 63))
124 return -1; 124 return -1;
125 125
126 if (INVALID_END_CHAR(*str)) 126 if (INVALID_END_CHAR(*str))
127 return -1; 127 return -1;
128 128
129 *addr = dn_htons((area << 10) | node); 129 *addr = cpu_to_le16((area << 10) | node);
130 130
131 return 0; 131 return 0;
132 } 132 }
133 133
134 134
135 static int dn_node_address_strategy(ctl_table *table, 135 static int dn_node_address_strategy(ctl_table *table,
136 void __user *oldval, size_t __user *oldlenp, 136 void __user *oldval, size_t __user *oldlenp,
137 void __user *newval, size_t newlen) 137 void __user *newval, size_t newlen)
138 { 138 {
139 size_t len; 139 size_t len;
140 __le16 addr; 140 __le16 addr;
141 141
142 if (oldval && oldlenp) { 142 if (oldval && oldlenp) {
143 if (get_user(len, oldlenp)) 143 if (get_user(len, oldlenp))
144 return -EFAULT; 144 return -EFAULT;
145 if (len) { 145 if (len) {
146 if (len != sizeof(unsigned short)) 146 if (len != sizeof(unsigned short))
147 return -EINVAL; 147 return -EINVAL;
148 if (put_user(decnet_address, (__le16 __user *)oldval)) 148 if (put_user(decnet_address, (__le16 __user *)oldval))
149 return -EFAULT; 149 return -EFAULT;
150 } 150 }
151 } 151 }
152 if (newval && newlen) { 152 if (newval && newlen) {
153 if (newlen != sizeof(unsigned short)) 153 if (newlen != sizeof(unsigned short))
154 return -EINVAL; 154 return -EINVAL;
155 if (get_user(addr, (__le16 __user *)newval)) 155 if (get_user(addr, (__le16 __user *)newval))
156 return -EFAULT; 156 return -EFAULT;
157 157
158 dn_dev_devices_off(); 158 dn_dev_devices_off();
159 159
160 decnet_address = addr; 160 decnet_address = addr;
161 161
162 dn_dev_devices_on(); 162 dn_dev_devices_on();
163 } 163 }
164 return 0; 164 return 0;
165 } 165 }
166 166
167 static int dn_node_address_handler(ctl_table *table, int write, 167 static int dn_node_address_handler(ctl_table *table, int write,
168 struct file *filp, 168 struct file *filp,
169 void __user *buffer, 169 void __user *buffer,
170 size_t *lenp, loff_t *ppos) 170 size_t *lenp, loff_t *ppos)
171 { 171 {
172 char addr[DN_ASCBUF_LEN]; 172 char addr[DN_ASCBUF_LEN];
173 size_t len; 173 size_t len;
174 __le16 dnaddr; 174 __le16 dnaddr;
175 175
176 if (!*lenp || (*ppos && !write)) { 176 if (!*lenp || (*ppos && !write)) {
177 *lenp = 0; 177 *lenp = 0;
178 return 0; 178 return 0;
179 } 179 }
180 180
181 if (write) { 181 if (write) {
182 int len = (*lenp < DN_ASCBUF_LEN) ? *lenp : (DN_ASCBUF_LEN-1); 182 int len = (*lenp < DN_ASCBUF_LEN) ? *lenp : (DN_ASCBUF_LEN-1);
183 183
184 if (copy_from_user(addr, buffer, len)) 184 if (copy_from_user(addr, buffer, len))
185 return -EFAULT; 185 return -EFAULT;
186 186
187 addr[len] = 0; 187 addr[len] = 0;
188 strip_it(addr); 188 strip_it(addr);
189 189
190 if (parse_addr(&dnaddr, addr)) 190 if (parse_addr(&dnaddr, addr))
191 return -EINVAL; 191 return -EINVAL;
192 192
193 dn_dev_devices_off(); 193 dn_dev_devices_off();
194 194
195 decnet_address = dnaddr; 195 decnet_address = dnaddr;
196 196
197 dn_dev_devices_on(); 197 dn_dev_devices_on();
198 198
199 *ppos += len; 199 *ppos += len;
200 200
201 return 0; 201 return 0;
202 } 202 }
203 203
204 dn_addr2asc(dn_ntohs(decnet_address), addr); 204 dn_addr2asc(le16_to_cpu(decnet_address), addr);
205 len = strlen(addr); 205 len = strlen(addr);
206 addr[len++] = '\n'; 206 addr[len++] = '\n';
207 207
208 if (len > *lenp) len = *lenp; 208 if (len > *lenp) len = *lenp;
209 209
210 if (copy_to_user(buffer, addr, len)) 210 if (copy_to_user(buffer, addr, len))
211 return -EFAULT; 211 return -EFAULT;
212 212
213 *lenp = len; 213 *lenp = len;
214 *ppos += len; 214 *ppos += len;
215 215
216 return 0; 216 return 0;
217 } 217 }
218 218
219 219
220 static int dn_def_dev_strategy(ctl_table *table, 220 static int dn_def_dev_strategy(ctl_table *table,
221 void __user *oldval, size_t __user *oldlenp, 221 void __user *oldval, size_t __user *oldlenp,
222 void __user *newval, size_t newlen) 222 void __user *newval, size_t newlen)
223 { 223 {
224 size_t len; 224 size_t len;
225 struct net_device *dev; 225 struct net_device *dev;
226 char devname[17]; 226 char devname[17];
227 size_t namel; 227 size_t namel;
228 int rv = 0; 228 int rv = 0;
229 229
230 devname[0] = 0; 230 devname[0] = 0;
231 231
232 if (oldval && oldlenp) { 232 if (oldval && oldlenp) {
233 if (get_user(len, oldlenp)) 233 if (get_user(len, oldlenp))
234 return -EFAULT; 234 return -EFAULT;
235 if (len) { 235 if (len) {
236 dev = dn_dev_get_default(); 236 dev = dn_dev_get_default();
237 if (dev) { 237 if (dev) {
238 strcpy(devname, dev->name); 238 strcpy(devname, dev->name);
239 dev_put(dev); 239 dev_put(dev);
240 } 240 }
241 241
242 namel = strlen(devname) + 1; 242 namel = strlen(devname) + 1;
243 if (len > namel) len = namel; 243 if (len > namel) len = namel;
244 244
245 if (copy_to_user(oldval, devname, len)) 245 if (copy_to_user(oldval, devname, len))
246 return -EFAULT; 246 return -EFAULT;
247 247
248 if (put_user(len, oldlenp)) 248 if (put_user(len, oldlenp))
249 return -EFAULT; 249 return -EFAULT;
250 } 250 }
251 } 251 }
252 252
253 if (newval && newlen) { 253 if (newval && newlen) {
254 if (newlen > 16) 254 if (newlen > 16)
255 return -E2BIG; 255 return -E2BIG;
256 256
257 if (copy_from_user(devname, newval, newlen)) 257 if (copy_from_user(devname, newval, newlen))
258 return -EFAULT; 258 return -EFAULT;
259 259
260 devname[newlen] = 0; 260 devname[newlen] = 0;
261 261
262 dev = dev_get_by_name(&init_net, devname); 262 dev = dev_get_by_name(&init_net, devname);
263 if (dev == NULL) 263 if (dev == NULL)
264 return -ENODEV; 264 return -ENODEV;
265 265
266 rv = -ENODEV; 266 rv = -ENODEV;
267 if (dev->dn_ptr != NULL) { 267 if (dev->dn_ptr != NULL) {
268 rv = dn_dev_set_default(dev, 1); 268 rv = dn_dev_set_default(dev, 1);
269 if (rv) 269 if (rv)
270 dev_put(dev); 270 dev_put(dev);
271 } 271 }
272 } 272 }
273 273
274 return rv; 274 return rv;
275 } 275 }
276 276
277 277
278 static int dn_def_dev_handler(ctl_table *table, int write, 278 static int dn_def_dev_handler(ctl_table *table, int write,
279 struct file * filp, 279 struct file * filp,
280 void __user *buffer, 280 void __user *buffer,
281 size_t *lenp, loff_t *ppos) 281 size_t *lenp, loff_t *ppos)
282 { 282 {
283 size_t len; 283 size_t len;
284 struct net_device *dev; 284 struct net_device *dev;
285 char devname[17]; 285 char devname[17];
286 286
287 if (!*lenp || (*ppos && !write)) { 287 if (!*lenp || (*ppos && !write)) {
288 *lenp = 0; 288 *lenp = 0;
289 return 0; 289 return 0;
290 } 290 }
291 291
292 if (write) { 292 if (write) {
293 if (*lenp > 16) 293 if (*lenp > 16)
294 return -E2BIG; 294 return -E2BIG;
295 295
296 if (copy_from_user(devname, buffer, *lenp)) 296 if (copy_from_user(devname, buffer, *lenp))
297 return -EFAULT; 297 return -EFAULT;
298 298
299 devname[*lenp] = 0; 299 devname[*lenp] = 0;
300 strip_it(devname); 300 strip_it(devname);
301 301
302 dev = dev_get_by_name(&init_net, devname); 302 dev = dev_get_by_name(&init_net, devname);
303 if (dev == NULL) 303 if (dev == NULL)
304 return -ENODEV; 304 return -ENODEV;
305 305
306 if (dev->dn_ptr == NULL) { 306 if (dev->dn_ptr == NULL) {
307 dev_put(dev); 307 dev_put(dev);
308 return -ENODEV; 308 return -ENODEV;
309 } 309 }
310 310
311 if (dn_dev_set_default(dev, 1)) { 311 if (dn_dev_set_default(dev, 1)) {
312 dev_put(dev); 312 dev_put(dev);
313 return -ENODEV; 313 return -ENODEV;
314 } 314 }
315 *ppos += *lenp; 315 *ppos += *lenp;
316 316
317 return 0; 317 return 0;
318 } 318 }
319 319
320 dev = dn_dev_get_default(); 320 dev = dn_dev_get_default();
321 if (dev == NULL) { 321 if (dev == NULL) {
322 *lenp = 0; 322 *lenp = 0;
323 return 0; 323 return 0;
324 } 324 }
325 325
326 strcpy(devname, dev->name); 326 strcpy(devname, dev->name);
327 dev_put(dev); 327 dev_put(dev);
328 len = strlen(devname); 328 len = strlen(devname);
329 devname[len++] = '\n'; 329 devname[len++] = '\n';
330 330
331 if (len > *lenp) len = *lenp; 331 if (len > *lenp) len = *lenp;
332 332
333 if (copy_to_user(buffer, devname, len)) 333 if (copy_to_user(buffer, devname, len))
334 return -EFAULT; 334 return -EFAULT;
335 335
336 *lenp = len; 336 *lenp = len;
337 *ppos += len; 337 *ppos += len;
338 338
339 return 0; 339 return 0;
340 } 340 }
341 341
342 static ctl_table dn_table[] = { 342 static ctl_table dn_table[] = {
343 { 343 {
344 .ctl_name = NET_DECNET_NODE_ADDRESS, 344 .ctl_name = NET_DECNET_NODE_ADDRESS,
345 .procname = "node_address", 345 .procname = "node_address",
346 .maxlen = 7, 346 .maxlen = 7,
347 .mode = 0644, 347 .mode = 0644,
348 .proc_handler = dn_node_address_handler, 348 .proc_handler = dn_node_address_handler,
349 .strategy = dn_node_address_strategy, 349 .strategy = dn_node_address_strategy,
350 }, 350 },
351 { 351 {
352 .ctl_name = NET_DECNET_NODE_NAME, 352 .ctl_name = NET_DECNET_NODE_NAME,
353 .procname = "node_name", 353 .procname = "node_name",
354 .data = node_name, 354 .data = node_name,
355 .maxlen = 7, 355 .maxlen = 7,
356 .mode = 0644, 356 .mode = 0644,
357 .proc_handler = proc_dostring, 357 .proc_handler = proc_dostring,
358 .strategy = sysctl_string, 358 .strategy = sysctl_string,
359 }, 359 },
360 { 360 {
361 .ctl_name = NET_DECNET_DEFAULT_DEVICE, 361 .ctl_name = NET_DECNET_DEFAULT_DEVICE,
362 .procname = "default_device", 362 .procname = "default_device",
363 .maxlen = 16, 363 .maxlen = 16,
364 .mode = 0644, 364 .mode = 0644,
365 .proc_handler = dn_def_dev_handler, 365 .proc_handler = dn_def_dev_handler,
366 .strategy = dn_def_dev_strategy, 366 .strategy = dn_def_dev_strategy,
367 }, 367 },
368 { 368 {
369 .ctl_name = NET_DECNET_TIME_WAIT, 369 .ctl_name = NET_DECNET_TIME_WAIT,
370 .procname = "time_wait", 370 .procname = "time_wait",
371 .data = &decnet_time_wait, 371 .data = &decnet_time_wait,
372 .maxlen = sizeof(int), 372 .maxlen = sizeof(int),
373 .mode = 0644, 373 .mode = 0644,
374 .proc_handler = proc_dointvec_minmax, 374 .proc_handler = proc_dointvec_minmax,
375 .strategy = sysctl_intvec, 375 .strategy = sysctl_intvec,
376 .extra1 = &min_decnet_time_wait, 376 .extra1 = &min_decnet_time_wait,
377 .extra2 = &max_decnet_time_wait 377 .extra2 = &max_decnet_time_wait
378 }, 378 },
379 { 379 {
380 .ctl_name = NET_DECNET_DN_COUNT, 380 .ctl_name = NET_DECNET_DN_COUNT,
381 .procname = "dn_count", 381 .procname = "dn_count",
382 .data = &decnet_dn_count, 382 .data = &decnet_dn_count,
383 .maxlen = sizeof(int), 383 .maxlen = sizeof(int),
384 .mode = 0644, 384 .mode = 0644,
385 .proc_handler = proc_dointvec_minmax, 385 .proc_handler = proc_dointvec_minmax,
386 .strategy = sysctl_intvec, 386 .strategy = sysctl_intvec,
387 .extra1 = &min_state_count, 387 .extra1 = &min_state_count,
388 .extra2 = &max_state_count 388 .extra2 = &max_state_count
389 }, 389 },
390 { 390 {
391 .ctl_name = NET_DECNET_DI_COUNT, 391 .ctl_name = NET_DECNET_DI_COUNT,
392 .procname = "di_count", 392 .procname = "di_count",
393 .data = &decnet_di_count, 393 .data = &decnet_di_count,
394 .maxlen = sizeof(int), 394 .maxlen = sizeof(int),
395 .mode = 0644, 395 .mode = 0644,
396 .proc_handler = proc_dointvec_minmax, 396 .proc_handler = proc_dointvec_minmax,
397 .strategy = sysctl_intvec, 397 .strategy = sysctl_intvec,
398 .extra1 = &min_state_count, 398 .extra1 = &min_state_count,
399 .extra2 = &max_state_count 399 .extra2 = &max_state_count
400 }, 400 },
401 { 401 {
402 .ctl_name = NET_DECNET_DR_COUNT, 402 .ctl_name = NET_DECNET_DR_COUNT,
403 .procname = "dr_count", 403 .procname = "dr_count",
404 .data = &decnet_dr_count, 404 .data = &decnet_dr_count,
405 .maxlen = sizeof(int), 405 .maxlen = sizeof(int),
406 .mode = 0644, 406 .mode = 0644,
407 .proc_handler = proc_dointvec_minmax, 407 .proc_handler = proc_dointvec_minmax,
408 .strategy = sysctl_intvec, 408 .strategy = sysctl_intvec,
409 .extra1 = &min_state_count, 409 .extra1 = &min_state_count,
410 .extra2 = &max_state_count 410 .extra2 = &max_state_count
411 }, 411 },
412 { 412 {
413 .ctl_name = NET_DECNET_DST_GC_INTERVAL, 413 .ctl_name = NET_DECNET_DST_GC_INTERVAL,
414 .procname = "dst_gc_interval", 414 .procname = "dst_gc_interval",
415 .data = &decnet_dst_gc_interval, 415 .data = &decnet_dst_gc_interval,
416 .maxlen = sizeof(int), 416 .maxlen = sizeof(int),
417 .mode = 0644, 417 .mode = 0644,
418 .proc_handler = proc_dointvec_minmax, 418 .proc_handler = proc_dointvec_minmax,
419 .strategy = sysctl_intvec, 419 .strategy = sysctl_intvec,
420 .extra1 = &min_decnet_dst_gc_interval, 420 .extra1 = &min_decnet_dst_gc_interval,
421 .extra2 = &max_decnet_dst_gc_interval 421 .extra2 = &max_decnet_dst_gc_interval
422 }, 422 },
423 { 423 {
424 .ctl_name = NET_DECNET_NO_FC_MAX_CWND, 424 .ctl_name = NET_DECNET_NO_FC_MAX_CWND,
425 .procname = "no_fc_max_cwnd", 425 .procname = "no_fc_max_cwnd",
426 .data = &decnet_no_fc_max_cwnd, 426 .data = &decnet_no_fc_max_cwnd,
427 .maxlen = sizeof(int), 427 .maxlen = sizeof(int),
428 .mode = 0644, 428 .mode = 0644,
429 .proc_handler = proc_dointvec_minmax, 429 .proc_handler = proc_dointvec_minmax,
430 .strategy = sysctl_intvec, 430 .strategy = sysctl_intvec,
431 .extra1 = &min_decnet_no_fc_max_cwnd, 431 .extra1 = &min_decnet_no_fc_max_cwnd,
432 .extra2 = &max_decnet_no_fc_max_cwnd 432 .extra2 = &max_decnet_no_fc_max_cwnd
433 }, 433 },
434 { 434 {
435 .ctl_name = NET_DECNET_MEM, 435 .ctl_name = NET_DECNET_MEM,
436 .procname = "decnet_mem", 436 .procname = "decnet_mem",
437 .data = &sysctl_decnet_mem, 437 .data = &sysctl_decnet_mem,
438 .maxlen = sizeof(sysctl_decnet_mem), 438 .maxlen = sizeof(sysctl_decnet_mem),
439 .mode = 0644, 439 .mode = 0644,
440 .proc_handler = proc_dointvec, 440 .proc_handler = proc_dointvec,
441 .strategy = sysctl_intvec, 441 .strategy = sysctl_intvec,
442 }, 442 },
443 { 443 {
444 .ctl_name = NET_DECNET_RMEM, 444 .ctl_name = NET_DECNET_RMEM,
445 .procname = "decnet_rmem", 445 .procname = "decnet_rmem",
446 .data = &sysctl_decnet_rmem, 446 .data = &sysctl_decnet_rmem,
447 .maxlen = sizeof(sysctl_decnet_rmem), 447 .maxlen = sizeof(sysctl_decnet_rmem),
448 .mode = 0644, 448 .mode = 0644,
449 .proc_handler = proc_dointvec, 449 .proc_handler = proc_dointvec,
450 .strategy = sysctl_intvec, 450 .strategy = sysctl_intvec,
451 }, 451 },
452 { 452 {
453 .ctl_name = NET_DECNET_WMEM, 453 .ctl_name = NET_DECNET_WMEM,
454 .procname = "decnet_wmem", 454 .procname = "decnet_wmem",
455 .data = &sysctl_decnet_wmem, 455 .data = &sysctl_decnet_wmem,
456 .maxlen = sizeof(sysctl_decnet_wmem), 456 .maxlen = sizeof(sysctl_decnet_wmem),
457 .mode = 0644, 457 .mode = 0644,
458 .proc_handler = proc_dointvec, 458 .proc_handler = proc_dointvec,
459 .strategy = sysctl_intvec, 459 .strategy = sysctl_intvec,
460 }, 460 },
461 { 461 {
462 .ctl_name = NET_DECNET_DEBUG_LEVEL, 462 .ctl_name = NET_DECNET_DEBUG_LEVEL,
463 .procname = "debug", 463 .procname = "debug",
464 .data = &decnet_debug_level, 464 .data = &decnet_debug_level,
465 .maxlen = sizeof(int), 465 .maxlen = sizeof(int),
466 .mode = 0644, 466 .mode = 0644,
467 .proc_handler = proc_dointvec, 467 .proc_handler = proc_dointvec,
468 .strategy = sysctl_intvec, 468 .strategy = sysctl_intvec,
469 }, 469 },
470 {0} 470 {0}
471 }; 471 };
472 472
473 static struct ctl_path dn_path[] = { 473 static struct ctl_path dn_path[] = {
474 { .procname = "net", .ctl_name = CTL_NET, }, 474 { .procname = "net", .ctl_name = CTL_NET, },
475 { .procname = "decnet", .ctl_name = NET_DECNET, }, 475 { .procname = "decnet", .ctl_name = NET_DECNET, },
476 { } 476 { }
477 }; 477 };
478 478
479 void dn_register_sysctl(void) 479 void dn_register_sysctl(void)
480 { 480 {
481 dn_table_header = register_sysctl_paths(dn_path, dn_table); 481 dn_table_header = register_sysctl_paths(dn_path, dn_table);
482 } 482 }
483 483
484 void dn_unregister_sysctl(void) 484 void dn_unregister_sysctl(void)
485 { 485 {
486 unregister_sysctl_table(dn_table_header); 486 unregister_sysctl_table(dn_table_header);
487 } 487 }
488 488
489 #else /* CONFIG_SYSCTL */ 489 #else /* CONFIG_SYSCTL */
490 void dn_unregister_sysctl(void) 490 void dn_unregister_sysctl(void)
491 { 491 {
492 } 492 }
493 void dn_register_sysctl(void) 493 void dn_register_sysctl(void)
494 { 494 {
495 } 495 }
496 496
497 #endif 497 #endif
498 498